diff --git a/hackfest_firewall_pnf/charms/vyos-config/README.md b/hackfest_firewall_pnf/charms/vyos-config/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2bdcab20c1a6d39aaffe9a450ba3bdbc44044b42 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/README.md @@ -0,0 +1,3 @@ +# Vyos-config + +This is a proxy charm used by Open Source Mano (OSM) to configure Vyos Router PNF, written in the [Python Operator Framwork](https://github.com/canonical/operator) diff --git a/hackfest_firewall_pnf/charms/vyos-config/actions.yaml b/hackfest_firewall_pnf/charms/vyos-config/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea5220497b12df3780728d7cf4d42df06bba2370 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/actions.yaml @@ -0,0 +1,43 @@ +# VyOS Action +configure-remote: + description: "Add firewall rule to VyOS PNF." + params: + magmaIP: + description: "Magma AGW allowed IP" + type: "string" + default: "0.0.0.0" + required: + - magmaIP + +backup: + description: "Backup the current firewall configuration" + params: + backupFile: + description: "Filename to store the backup into" + type: "string" + default: "backup.cfg" + +restore: + description: "Backup the current firewall configuration" + params: + backupFile: + description: "Filename to restore the backup from" + type: "string" + default: "backup.cfg" + +# Required by charms.osm.sshproxy +run: + description: "Run an arbitrary command" + params: + command: + description: "The command to execute." + type: string + default: "" + required: + - command +generate-ssh-key: + description: "Generate a new SSH keypair for this unit. This will replace any existing previously generated keypair." +verify-ssh-credentials: + description: "Verify that this unit can authenticate with server specified by ssh-hostname and ssh-username." +get-ssh-public-key: + description: "Get the public SSH key for this unit." diff --git a/hackfest_firewall_pnf/charms/vyos-config/config.yaml b/hackfest_firewall_pnf/charms/vyos-config/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b908ae0a19da5d540c59d887ab4ca7939ebdd08 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/config.yaml @@ -0,0 +1,29 @@ +options: + ssh-hostname: + type: string + default: "" + description: "The hostname or IP address of the machine to" + ssh-username: + type: string + default: "" + description: "The username to login as." + ssh-password: + type: string + default: "" + description: "The password used to authenticate." + # ssh-private-key: + # type: string + # default: "" + # description: "DEPRECATED. The private ssh key to be used to authenticate." + ssh-public-key: + type: string + default: "" + description: "The public key of this unit." + ssh-key-type: + type: string + default: "rsa" + description: "The type of encryption to use for the SSH key." + ssh-key-bits: + type: int + default: 4096 + description: "The number of bits to use for the SSH key." diff --git a/hackfest_firewall_pnf/charms/vyos-config/hooks/install b/hackfest_firewall_pnf/charms/vyos-config/hooks/install new file mode 100755 index 0000000000000000000000000000000000000000..b110e1c95fac46445b6f50c5334716fab6705f98 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/hooks/install @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append("lib") + +from ops.charm import CharmBase, CharmEvents +from ops.framework import StoredState, EventBase, EventSource +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess +import traceback + +from charms.osm.sshproxy import SSHProxyCharm +from charms.osm import libansible + + +class VyosCharm(SSHProxyCharm): + def __init__(self, framework, key): + super().__init__(framework, key) + + # Register all of the events we want to observe + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.install, self.on_install) + self.framework.observe(self.on.start, self.on_start) + self.framework.observe(self.on.upgrade_charm, self.on_upgrade_charm) + # Charm actions (primitives) + self.framework.observe( + self.on.configure_remote_action, self.on_configure_remote_action + ) + + def on_config_changed(self, event): + """Handle changes in configuration""" + super().on_config_changed(event) + + def on_install(self, event): + """Called when the charm is being installed""" + super().on_install(event) + self.unit.status = MaintenanceStatus("Installing Ansible") + libansible.install_ansible_support() + self.unit.status = ActiveStatus() + + def on_start(self, event): + """Called when the charm is being started""" + super().on_start(event) + + def on_configure_remote_action(self, event): + """Configure remote.""" + + if self.unit.is_leader(): + try: + config = self.model.config + magmaIP = event.params["magmaIP"] + dict_vars = {"MAGMA_AGW_IP": magmaIP} + result = libansible.execute_playbook( + "configure-remote.yaml", + config["ssh-hostname"], + config["ssh-username"], + config["ssh-password"], + dict_vars, + ) + event.set_results({"output": result}) + except: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = traceback.format_exception(exc_type, exc_value, exc_traceback) + event.fail(message="configure-remote failed: " + str(err)) + + else: + event.fail("Unit is not leader") + return + + def on_upgrade_charm(self, event): + """Upgrade the charm.""" + + +if __name__ == "__main__": + main(VyosCharm) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61ef90719b5d5d759de1a6b80a1ea748d8bb0911 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/__init__.py @@ -0,0 +1,97 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect +import subprocess +import sys + +try: + import six # NOQA:F401 +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # NOQA:F401 + +try: + import yaml # NOQA:F401 +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # NOQA:F401 + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/README.rst b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7901c09c79c268d353825776a74072a7ee4dee7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/README.rst @@ -0,0 +1,57 @@ +========== +Commandant +========== + +----------------------------------------------------- +Automatic command-line interfaces to Python functions +----------------------------------------------------- + +One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands. + +Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life. + +Goals +===== + +* Single decorator to expose a function as a command. + * now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW) +* Automatic analysis of function signature through ``inspect.getargspec()`` +* Command argument parser built automatically with ``argparse`` +* Interactive interpreter loop object made with ``Cmd`` +* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps. + +Other Important Features that need writing +------------------------------------------ + +* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour +* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc. + - Filename arguments are important, as good practice is for functions to accept file objects as parameters. + - choices arguments help to limit bad input before the function is called +* Some automatic behaviour could make for better defaults, once the user can override them. + - We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True. + - We could automatically support hyphens as alternates for underscores + - Arguments defaulting to sequence types could support the ``append`` action. + + +----------------------------------------------------- +Implementing subcommands +----------------------------------------------------- + +(WIP) + +So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose. + +Some examples:: + + from charmhelpers.cli import CommandLine + from charmhelpers.payload import execd + from charmhelpers.foo import bar + + cli = CommandLine() + + cli.subcommand(execd.execd_run) + + @cli.subcommand_builder("bar", help="Bar baz qux") + def barcmd_builder(subparser): + subparser.add_argument('argument1', help="yackety") + return bar diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..389b490f4eec27f18118ab6a5f3f529dbf2e9ecc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/__init__.py @@ -0,0 +1,189 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import argparse +import sys + +from six.moves import zip + +import charmhelpers.core.unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + output = arguments.func(*vargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/benchmark.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..303af14b607d31e338aefff0df593609b7b45feb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/benchmark.py @@ -0,0 +1,34 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/commands.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/commands.py new file mode 100644 index 0000000000000000000000000000000000000000..b93105650be8226aa390fda46aa08afb23ebc7bc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/commands.py @@ -0,0 +1,30 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/hookenv.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/hookenv.py new file mode 100644 index 0000000000000000000000000000000000000000..bd72f448bf0092251a454ff6dd3145f09048ae72 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/hookenv.py @@ -0,0 +1,21 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/host.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/host.py new file mode 100644 index 0000000000000000000000000000000000000000..40396849907976fd077cc9c53a0852c4380c6266 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/host.py @@ -0,0 +1,29 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/unitdata.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/unitdata.py new file mode 100644 index 0000000000000000000000000000000000000000..acce846f84ef32ed0b5829cf08e67ad33f0eb5d1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/cli/unitdata.py @@ -0,0 +1,46 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/context.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/context.py new file mode 100644 index 0000000000000000000000000000000000000000..01864740e89633f6c65f43c32ed89ef9d5e857c9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/context.py @@ -0,0 +1,205 @@ +# Copyright 2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +A Pythonic API to interact with the charm hook environment. + +:author: Stuart Bishop +''' + +import six + +from charmhelpers.core import hookenv + +from collections import OrderedDict +if six.PY3: + from collections import UserDict # pragma: nocover +else: + from UserDict import IterableUserDict as UserDict # pragma: nocover + + +class Relations(OrderedDict): + '''Mapping relation name -> relation id -> Relation. + + >>> rels = Relations() + >>> rels['sprog']['sprog:12']['client/6']['widget'] + 'remote widget' + >>> rels['sprog']['sprog:12'].local['widget'] = 'local widget' + >>> rels['sprog']['sprog:12'].local['widget'] + 'local widget' + >>> rels.peer.local['widget'] + 'local widget on the peer relation' + ''' + def __init__(self): + super(Relations, self).__init__() + for relname in sorted(hookenv.relation_types()): + self[relname] = OrderedDict() + relids = hookenv.relation_ids(relname) + relids.sort(key=lambda x: int(x.split(':', 1)[-1])) + for relid in relids: + self[relname][relid] = Relation(relid) + + @property + def peer(self): + peer_relid = hookenv.peer_relation_id() + for rels in self.values(): + if peer_relid in rels: + return rels[peer_relid] + + +class Relation(OrderedDict): + '''Mapping of unit -> remote RelationInfo for a relation. + + This is an OrderedDict mapping, ordered numerically by + by unit number. + + Also provides access to the local RelationInfo, and peer RelationInfo + instances by the 'local' and 'peers' attributes. + + >>> r = Relation('sprog:12') + >>> r.keys() + ['client/9', 'client/10'] # Ordered numerically + >>> r['client/10']['widget'] # A remote RelationInfo setting + 'remote widget' + >>> r.local['widget'] # The local RelationInfo setting + 'local widget' + ''' + relid = None # The relation id. + relname = None # The relation name (also known as relation type). + service = None # The remote service name, if known. + local = None # The local end's RelationInfo. + peers = None # Map of peer -> RelationInfo. None if no peer relation. + + def __init__(self, relid): + remote_units = hookenv.related_units(relid) + remote_units.sort(key=lambda u: int(u.split('/', 1)[-1])) + super(Relation, self).__init__((unit, RelationInfo(relid, unit)) + for unit in remote_units) + + self.relname = relid.split(':', 1)[0] + self.relid = relid + self.local = RelationInfo(relid, hookenv.local_unit()) + + for relinfo in self.values(): + self.service = relinfo.service + break + + # If we have peers, and they have joined both the provided peer + # relation and this relation, we can peek at their data too. + # This is useful for creating consensus without leadership. + peer_relid = hookenv.peer_relation_id() + if peer_relid and peer_relid != relid: + peers = hookenv.related_units(peer_relid) + if peers: + peers.sort(key=lambda u: int(u.split('/', 1)[-1])) + self.peers = OrderedDict((peer, RelationInfo(relid, peer)) + for peer in peers) + else: + self.peers = OrderedDict() + else: + self.peers = None + + def __str__(self): + return '{} ({})'.format(self.relid, self.service) + + +class RelationInfo(UserDict): + '''The bag of data at an end of a relation. + + Every unit participating in a relation has a single bag of + data associated with that relation. This is that bag. + + The bag of data for the local unit may be updated. Remote data + is immutable and will remain static for the duration of the hook. + + Changes made to the local units relation data only become visible + to other units after the hook completes successfully. If the hook + does not complete successfully, the changes are rolled back. + + Unlike standard Python mappings, setting an item to None is the + same as deleting it. + + >>> relinfo = RelationInfo('db:12') # Default is the local unit. + >>> relinfo['user'] = 'fred' + >>> relinfo['user'] + 'fred' + >>> relinfo['user'] = None + >>> 'fred' in relinfo + False + + This class wraps hookenv.relation_get and hookenv.relation_set. + All caching is left up to these two methods to avoid synchronization + issues. Data is only loaded on demand. + ''' + relid = None # The relation id. + relname = None # The relation name (also know as the relation type). + unit = None # The unit id. + number = None # The unit number (integer). + service = None # The service name. + + def __init__(self, relid, unit): + self.relname = relid.split(':', 1)[0] + self.relid = relid + self.unit = unit + self.service, num = self.unit.split('/', 1) + self.number = int(num) + + def __str__(self): + return '{} ({})'.format(self.relid, self.unit) + + @property + def data(self): + return hookenv.relation_get(rid=self.relid, unit=self.unit) + + def __setitem__(self, key, value): + if self.unit != hookenv.local_unit(): + raise TypeError('Attempting to set {} on remote unit {}' + ''.format(key, self.unit)) + if value is not None and not isinstance(value, six.string_types): + # We don't do implicit casting. This would cause simple + # types like integers to be read back as strings in subsequent + # hooks, and mutable types would require a lot of wrapping + # to ensure relation-set gets called when they are mutated. + raise ValueError('Only string values allowed') + hookenv.relation_set(self.relid, {key: value}) + + def __delitem__(self, key): + # Deleting a key and setting it to null is the same thing in + # Juju relations. + self[key] = None + + +class Leader(UserDict): + def __init__(self): + pass # Don't call superclass initializer, as it will nuke self.data + + @property + def data(self): + return hookenv.leader_get() + + def __setitem__(self, key, value): + if not hookenv.is_leader(): + raise TypeError('Not the leader. Cannot change leader settings.') + if value is not None and not isinstance(value, six.string_types): + # We don't do implicit casting. This would cause simple + # types like integers to be read back as strings in subsequent + # hooks, and mutable types would require a lot of wrapping + # to ensure leader-set gets called when they are mutated. + raise ValueError('Only string values allowed') + hookenv.leader_set({key: value}) + + def __delitem__(self, key): + # Deleting a key and setting it to null is the same thing in + # Juju leadership settings. + self[key] = None diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/deployment.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..d21d01d8ffe242d686283b0ed977b88be6bfc74e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,99 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints'), + storage=this_service.get('storage')) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints'), + storage=svc.get('storage')) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) + try: + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) + except amulet.helpers.TimeoutError: + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..54283088e1ddd53bc85008bc7446620e87b42278 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,820 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import json +import logging +import os +import re +import socket +import subprocess +import sys +import time +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.ssh(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg + + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding + service units. + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): + # init is systemd (or regular sysv) + cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output + + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = configparser.ConfigParser(allow_no_value=True) + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. + + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + pid_list = self.get_process_id_list( + sentry_unit, service, pgrep_full=pgrep_full) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=None, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) + time.sleep(sleep_time) + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) + return True + else: + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, or if file not found. + """ + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) + time.sleep(sleep_time) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + return True + else: + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers + retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return None + + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True, pgrep_full=False): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Amulet sentry instance (juju unit) + :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. + :returns: List of process IDs + """ + if pgrep_full: + cmd = 'pgrep -f "{}"'.format(process_name) + else: + cmd = 'pidof -x "{}"'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output).split() + + def get_unit_process_ids( + self, unit_processes, expect_success=True, pgrep_full=False): + """Construct a dict containing unit sentries, process names, and + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ + pid_dict = {} + for sentry_unit, process_list in six.iteritems(unit_processes): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success, + pgrep_full=pgrep_full) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + + for (e_sentry, e_proc_names) in six.iteritems(expected): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + + a_pids_length = len(a_pids) + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids, a_pids_length, + a_pids)) + + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: + return fail_msg + else: + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids, a_pids)) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + self.log.debug('Dicts within list are identical') + else: + return 'Dicts within list are not identical' + + return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + + # amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output, + params=None): + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. + + params a dict of parameters to use + _check_output parameter is no longer used + + @return action_id. + """ + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + action_id a string action uuid + _check_output parameter is no longer used + """ + data = amulet.actions.get_action_output(action_id, full_output=True) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ansible/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ansible/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..824780aca93c406f7331ec3f0da8f22bee1272ec --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ansible/__init__.py @@ -0,0 +1,303 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +""" +The ansible package enables you to easily use the configuration management +tool `Ansible`_ to setup and configure your charm. All of your charm +configuration options and relation-data are available as regular Ansible +variables which can be used in your playbooks and templates. + +.. _Ansible: https://www.ansible.com/ + +Usage +===== + +Here is an example directory structure for a charm to get you started:: + + charm-ansible-example/ + |-- ansible + | |-- playbook.yaml + | `-- templates + | `-- example.j2 + |-- config.yaml + |-- copyright + |-- icon.svg + |-- layer.yaml + |-- metadata.yaml + |-- reactive + | `-- example.py + |-- README.md + +Running a playbook called ``playbook.yaml`` when the ``install`` hook is run +can be as simple as:: + + from charmhelpers.contrib import ansible + from charms.reactive import hook + + @hook('install') + def install(): + ansible.install_ansible_support() + ansible.apply_playbook('ansible/playbook.yaml') + +Here is an example playbook that uses the ``template`` module to template the +file ``example.j2`` to the charm host and then uses the ``debug`` module to +print out all the host and Juju variables that you can use in your playbooks. +Note that you must target ``localhost`` as the playbook is run locally on the +charm host:: + + --- + - hosts: localhost + tasks: + - name: Template a file + template: + src: templates/example.j2 + dest: /tmp/example.j2 + + - name: Print all variables available to Ansible + debug: + var: vars + +Read more online about `playbooks`_ and standard Ansible `modules`_. + +.. _playbooks: https://docs.ansible.com/ansible/latest/user_guide/playbooks.html +.. _modules: https://docs.ansible.com/ansible/latest/user_guide/modules.html + +A further feature of the Ansible hooks is to provide a light weight "action" +scripting tool. This is a decorator that you apply to a function, and that +function can now receive cli args, and can pass extra args to the playbook:: + + @hooks.action() + def some_action(amount, force="False"): + "Usage: some-action AMOUNT [force=True]" # <-- shown on error + # process the arguments + # do some calls + # return extra-vars to be passed to ansible-playbook + return { + 'amount': int(amount), + 'type': force, + } + +You can now create a symlink to hooks.py that can be invoked like a hook, but +with cli params:: + + # link actions/some-action to hooks/hooks.py + + actions/some-action amount=10 force=true + +Install Ansible via pip +======================= + +If you want to install a specific version of Ansible via pip instead of +``install_ansible_support`` which uses APT, consider using the layer options +of `layer-basic`_ to install Ansible in a virtualenv:: + + options: + basic: + python_packages: ['ansible==2.9.0'] + include_system_packages: true + use_venv: true + +.. _layer-basic: https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#layer-configuration + +""" +import os +import json +import stat +import subprocess +import functools + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv +import charmhelpers.fetch + + +charm_dir = os.environ.get('CHARM_DIR', '') +ansible_hosts_path = '/etc/ansible/hosts' +# Ansible will automatically include any vars in the following +# file in its inventory when run locally. +ansible_vars_path = '/etc/ansible/host_vars/localhost' + + +def install_ansible_support(from_ppa=True, ppa_location='ppa:ansible/ansible'): + """Installs Ansible via APT. + + By default this installs Ansible from the `PPA`_ linked from + the Ansible `website`_ or from a PPA set in ``ppa_location``. + + .. _PPA: https://launchpad.net/~ansible/+archive/ubuntu/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If ``from_ppa`` is ``False``, then Ansible will be installed from + Ubuntu's Universe repositories. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install('ansible') + with open(ansible_hosts_path, 'w+') as hosts_file: + hosts_file.write('localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp') + + +def apply_playbook(playbook, tags=None, extra_vars=None): + """Run a playbook. + + This helper runs a playbook with juju state variables as context, + therefore variables set in application config can be used directly. + List of tags (--tags) and dictionary with extra_vars (--extra-vars) + can be passed as additional parameters. + + Read more about playbook `_variables`_ online. + + .. _variables: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html + + Example:: + + # Run ansible/playbook.yaml with tag install and pass extra + # variables var_a and var_b + apply_playbook( + playbook='ansible/playbook.yaml', + tags=['install'], + extra_vars={'var_a': 'val_a', 'var_b': 'val_b'} + ) + + # Run ansible/playbook.yaml with tag config and extra variable nested, + # which is passed as json and can be used as dictionary in playbook + apply_playbook( + playbook='ansible/playbook.yaml', + tags=['config'], + extra_vars={'nested': {'a': 'value1', 'b': 'value2'}} + ) + + # Custom config file can be passed within extra_vars + apply_playbook( + playbook='ansible/playbook.yaml', + extra_vars="@some_file.json" + ) + + """ + tags = tags or [] + tags = ",".join(tags) + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + ansible_vars_path, namespace_separator='__', + allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR)) + + # we want ansible's log output to be unbuffered + env = os.environ.copy() + env['PYTHONUNBUFFERED'] = "1" + call = [ + 'ansible-playbook', + '-c', + 'local', + playbook, + ] + if tags: + call.extend(['--tags', '{}'.format(tags)]) + if extra_vars: + call.extend(['--extra-vars', json.dumps(extra_vars)]) + subprocess.check_call(call, env=env) + + +class AnsibleHooks(charmhelpers.core.hookenv.Hooks): + """Run a playbook with the hook-name as the tag. + + This helper builds on the standard hookenv.Hooks helper, + but additionally runs the playbook with the hook-name specified + using --tags (ie. running all the tasks tagged with the hook-name). + + Example:: + + hooks = AnsibleHooks(playbook_path='ansible/my_machine_state.yaml') + + # All the tasks within my_machine_state.yaml tagged with 'install' + # will be run automatically after do_custom_work() + @hooks.hook() + def install(): + do_custom_work() + + # For most of your hooks, you won't need to do anything other + # than run the tagged tasks for the hook: + @hooks.hook('config-changed', 'start', 'stop') + def just_use_playbook(): + pass + + # As a convenience, you can avoid the above noop function by specifying + # the hooks which are handled by ansible-only and they'll be registered + # for you: + # hooks = AnsibleHooks( + # 'ansible/my_machine_state.yaml', + # default_hooks=['config-changed', 'start', 'stop']) + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, playbook_path, default_hooks=None): + """Register any hooks handled by ansible.""" + super(AnsibleHooks, self).__init__() + + self._actions = {} + self.playbook_path = playbook_path + + default_hooks = default_hooks or [] + + def noop(*args, **kwargs): + pass + + for hook in default_hooks: + self.register(hook, noop) + + def register_action(self, name, function): + """Register a hook""" + self._actions[name] = function + + def execute(self, args): + """Execute the hook followed by the playbook using the hook as tag.""" + hook_name = os.path.basename(args[0]) + extra_vars = None + if hook_name in self._actions: + extra_vars = self._actions[hook_name](args[1:]) + else: + super(AnsibleHooks, self).execute(args) + + charmhelpers.contrib.ansible.apply_playbook( + self.playbook_path, tags=[hook_name], extra_vars=extra_vars) + + def action(self, *action_names): + """Decorator, registering them as actions""" + def action_wrapper(decorated): + + @functools.wraps(decorated) + def wrapper(argv): + kwargs = dict(arg.split('=') for arg in argv) + try: + return decorated(**kwargs) + except TypeError as e: + if decorated.__doc__: + e.args += (decorated.__doc__,) + raise + + self.register_action(decorated.__name__, wrapper) + if '_' in decorated.__name__: + self.register_action( + decorated.__name__.replace('_', '-'), wrapper) + + return wrapper + + return action_wrapper diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/benchmark/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c35f7fe785a29519a257f1ed5aa33fdfa19129c7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/benchmark/__init__.py @@ -0,0 +1,124 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import time +import os +from distutils.spawn import find_executable + +from charmhelpers.core.hookenv import ( + in_relation_hook, + relation_ids, + relation_set, + relation_get, +) + + +def action_set(key, val): + if find_executable('action-set'): + action_cmd = ['action-set'] + + if isinstance(val, dict): + for k, v in iter(val.items()): + action_set('%s.%s' % (key, k), v) + return True + + action_cmd.append('%s=%s' % (key, val)) + subprocess.check_call(action_cmd) + return True + return False + + +class Benchmark(): + """ + Helper class for the `benchmark` interface. + + :param list actions: Define the actions that are also benchmarks + + From inside the benchmark-relation-changed hook, you would + Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom']) + + Examples: + + siege = Benchmark(['siege']) + siege.start() + [... run siege ...] + # The higher the score, the better the benchmark + siege.set_composite_score(16.70, 'trans/sec', 'desc') + siege.finish() + + + """ + + BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing + + required_keys = [ + 'hostname', + 'port', + 'graphite_port', + 'graphite_endpoint', + 'api_port' + ] + + def __init__(self, benchmarks=None): + if in_relation_hook(): + if benchmarks is not None: + for rid in sorted(relation_ids('benchmark')): + relation_set(relation_id=rid, relation_settings={ + 'benchmarks': ",".join(benchmarks) + }) + + # Check the relation data + config = {} + for key in self.required_keys: + val = relation_get(key) + if val is not None: + config[key] = val + else: + # We don't have all of the required keys + config = {} + break + + if len(config): + with open(self.BENCHMARK_CONF, 'w') as f: + for key, val in iter(config.items()): + f.write("%s=%s\n" % (key, val)) + + @staticmethod + def start(): + action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + """ + If the collectd charm is also installed, tell it to send a snapshot + of the current profile data. + """ + COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' + if os.path.exists(COLLECT_PROFILE_DATA): + subprocess.check_output([COLLECT_PROFILE_DATA]) + + @staticmethod + def finish(): + action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + @staticmethod + def set_composite_score(value, units, direction='asc'): + """ + Set the composite score for a benchmark run. This is a single number + representative of the benchmark results. This could be the most + important metric, or an amalgamation of metric scores. + """ + return action_set( + "meta.composite", + {'value': value, 'units': units, 'direction': direction} + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmhelpers/IMPORT b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmhelpers/IMPORT new file mode 100644 index 0000000000000000000000000000000000000000..d41cb041f45cb19b87aee7d3f5fd4d47a3d3fb2d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmhelpers/IMPORT @@ -0,0 +1,4 @@ +Source lp:charm-tools/trunk + +charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py +charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmhelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmhelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed63e8121e23a8e01ccb8ddcbf9aea34543d3666 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmhelpers/__init__.py @@ -0,0 +1,203 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa + +import operator +import tempfile +import time +import yaml +import subprocess + +import six +if six.PY3: + from urllib.request import urlopen + from urllib.error import (HTTPError, URLError) +else: + from urllib2 import (urlopen, HTTPError, URLError) + +"""Helper functions for writing Juju charms in Python.""" + +__metaclass__ = type +__all__ = [ + # 'get_config', # core.hookenv.config() + # 'log', # core.hookenv.log() + # 'log_entry', # core.hookenv.log() + # 'log_exit', # core.hookenv.log() + # 'relation_get', # core.hookenv.relation_get() + # 'relation_set', # core.hookenv.relation_set() + # 'relation_ids', # core.hookenv.relation_ids() + # 'relation_list', # core.hookenv.relation_units() + # 'config_get', # core.hookenv.config() + # 'unit_get', # core.hookenv.unit_get() + # 'open_port', # core.hookenv.open_port() + # 'close_port', # core.hookenv.close_port() + # 'service_control', # core.host.service() + 'unit_info', # client-side, NOT IMPLEMENTED + 'wait_for_machine', # client-side, NOT IMPLEMENTED + 'wait_for_page_contents', # client-side, NOT IMPLEMENTED + 'wait_for_relation', # client-side, NOT IMPLEMENTED + 'wait_for_unit', # client-side, NOT IMPLEMENTED +] + + +SLEEP_AMOUNT = 0.1 + + +# We create a juju_status Command here because it makes testing much, +# much easier. +def juju_status(): + subprocess.check_call(['juju', 'status']) + +# re-implemented as charmhelpers.fetch.configure_sources() +# def configure_source(update=False): +# source = config_get('source') +# if ((source.startswith('ppa:') or +# source.startswith('cloud:') or +# source.startswith('http:'))): +# run('add-apt-repository', source) +# if source.startswith("http:"): +# run('apt-key', 'import', config_get('key')) +# if update: +# run('apt-get', 'update') + + +# DEPRECATED: client-side only +def make_charm_config_file(charm_config): + charm_config_file = tempfile.NamedTemporaryFile(mode='w+') + charm_config_file.write(yaml.dump(charm_config)) + charm_config_file.flush() + # The NamedTemporaryFile instance is returned instead of just the name + # because we want to take advantage of garbage collection-triggered + # deletion of the temp file when it goes out of scope in the caller. + return charm_config_file + + +# DEPRECATED: client-side only +def unit_info(service_name, item_name, data=None, unit=None): + if data is None: + data = yaml.safe_load(juju_status()) + service = data['services'].get(service_name) + if service is None: + # XXX 2012-02-08 gmb: + # This allows us to cope with the race condition that we + # have between deploying a service and having it come up in + # `juju status`. We could probably do with cleaning it up so + # that it fails a bit more noisily after a while. + return '' + units = service['units'] + if unit is not None: + item = units[unit][item_name] + else: + # It might seem odd to sort the units here, but we do it to + # ensure that when no unit is specified, the first unit for the + # service (or at least the one with the lowest number) is the + # one whose data gets returned. + sorted_unit_names = sorted(units.keys()) + item = units[sorted_unit_names[0]][item_name] + return item + + +# DEPRECATED: client-side only +def get_machine_data(): + return yaml.safe_load(juju_status())['machines'] + + +# DEPRECATED: client-side only +def wait_for_machine(num_machines=1, timeout=300): + """Wait `timeout` seconds for `num_machines` machines to come up. + + This wait_for... function can be called by other wait_for functions + whose timeouts might be too short in situations where only a bare + Juju setup has been bootstrapped. + + :return: A tuple of (num_machines, time_taken). This is used for + testing. + """ + # You may think this is a hack, and you'd be right. The easiest way + # to tell what environment we're working in (LXC vs EC2) is to check + # the dns-name of the first machine. If it's localhost we're in LXC + # and we can just return here. + if get_machine_data()[0]['dns-name'] == 'localhost': + return 1, 0 + start_time = time.time() + while True: + # Drop the first machine, since it's the Zookeeper and that's + # not a machine that we need to wait for. This will only work + # for EC2 environments, which is why we return early above if + # we're in LXC. + machine_data = get_machine_data() + non_zookeeper_machines = [ + machine_data[key] for key in list(machine_data.keys())[1:]] + if len(non_zookeeper_machines) >= num_machines: + all_machines_running = True + for machine in non_zookeeper_machines: + if machine.get('instance-state') != 'running': + all_machines_running = False + break + if all_machines_running: + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + return num_machines, time.time() - start_time + + +# DEPRECATED: client-side only +def wait_for_unit(service_name, timeout=480): + """Wait `timeout` seconds for a given service name to come up.""" + wait_for_machine(num_machines=1) + start_time = time.time() + while True: + state = unit_info(service_name, 'agent-state') + if 'error' in state or state == 'started': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + if state != 'started': + raise RuntimeError('unit did not start, agent-state: ' + state) + + +# DEPRECATED: client-side only +def wait_for_relation(service_name, relation_name, timeout=120): + """Wait `timeout` seconds for a given relation to come up.""" + start_time = time.time() + while True: + relation = unit_info(service_name, 'relations').get(relation_name) + if relation is not None and relation['state'] == 'up': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for relation to be up') + time.sleep(SLEEP_AMOUNT) + + +# DEPRECATED: client-side only +def wait_for_page_contents(url, contents, timeout=120, validate=None): + if validate is None: + validate = operator.contains + start_time = time.time() + while True: + try: + stream = urlopen(url) + except (HTTPError, URLError): + pass + else: + page = stream.read() + if validate(page, contents): + return page + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for contents of ' + url) + time.sleep(SLEEP_AMOUNT) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/IMPORT b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/IMPORT new file mode 100644 index 0000000000000000000000000000000000000000..554fddda9f48d6d17abaff942834493b2cca2dbe --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/IMPORT @@ -0,0 +1,14 @@ +Source: lp:charmsupport/trunk + +charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py +charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py +charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py +charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py +charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py + +charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py +charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py +charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py +charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py + +charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/nrpe.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 0000000000000000000000000000000000000000..d775861b0868a174a3f0f4a8d4a42320272a4cb0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,500 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import glob +import shutil +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + hook_name, + local_unit, + log, + relation_get, + relation_ids, + relation_set, + relations_of_type, +) + +from charmhelpers.core.host import service +from charmhelpers.core import host + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_.@]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + + def write(self, nagios_context, hostname, nagios_servicegroups): + nrpe_check_file = self._get_check_filename() + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + if nagios_servicegroups: + nrpe_check_config.write( + "# The following header was added automatically by juju\n") + nrpe_check_config.write( + "# Modifying it will affect nagios monitoring and alerting\n") + nrpe_check_config.write( + "# servicegroups: {}\n".format(nagios_servicegroups)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) + + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups): + self._remove_service_files() + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_servicegroups, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = self._get_service_filename(hostname) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server + + def __init__(self, hostname=None, primary=True): + super(NRPE, self).__init__() + self.config = config() + self.primary = primary + self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = self.nagios_context + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation: + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() + + def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass + + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except Exception: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unnecessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') + + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_host_context' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately + """ + for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-e -s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, + ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) + + +def copy_nrpe_checks(nrpe_files_dir=None): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/volumes.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea43f0888cd92ac4344f908aa7c9d0afe7568ed --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,173 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas:: + + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage:: + + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) + +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/database/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..64fac9def6101c1de83cd5dafe7fdb5213d6a955 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/database/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/database/mysql.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/database/mysql.py new file mode 100644 index 0000000000000000000000000000000000000000..c9ecce5f793eedeb963557d109deaf5e1271e65f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/database/mysql.py @@ -0,0 +1,821 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper for working with a MySQL database""" +import collections +import copy +import json +import re +import sys +import platform +import os +import glob +import six + +# from string import upper + +from charmhelpers.core.host import ( + CompareHostReleases, + lsb_release, + mkdir, + pwgen, + write_file +) +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + related_units, + unit_get, + log, + DEBUG, + INFO, + WARNING, + leader_get, + leader_set, + is_leader, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, + filter_installed_packages, +) +from charmhelpers.contrib.network.ip import get_host_ip + +try: + import MySQLdb +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install(filter_installed_packages(['python-mysqldb']), fatal=True) + else: + apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True) + import MySQLdb + + +class MySQLSetPasswordError(Exception): + pass + + +class MySQLHelper(object): + + def __init__(self, rpasswdf_template, upasswdf_template, host='localhost', + migrate_passwd_to_leader_storage=True, + delete_ondisk_passwd_file=True, user="root", password=None, port=None): + self.user = user + self.host = host + self.password = password + self.port = port + + # Password file path templates + self.root_passwd_file_template = rpasswdf_template + self.user_passwd_file_template = upasswdf_template + + self.migrate_passwd_to_leader_storage = migrate_passwd_to_leader_storage + # If we migrate we have the option to delete local copy of root passwd + self.delete_ondisk_passwd_file = delete_ondisk_passwd_file + self.connection = None + + def connect(self, user='root', password=None, host=None, port=None): + _connection_info = { + "user": user or self.user, + "passwd": password or self.password, + "host": host or self.host + } + # port cannot be None but we also do not want to specify it unless it + # has been explicit set. + port = port or self.port + if port is not None: + _connection_info["port"] = port + + log("Opening db connection for %s@%s" % (user, host), level=DEBUG) + self.connection = MySQLdb.connect(**_connection_info) + + def database_exists(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("SHOW DATABASES") + databases = [i[0] for i in cursor.fetchall()] + finally: + cursor.close() + + return db_name in databases + + def create_database(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("CREATE DATABASE `{}` CHARACTER SET UTF8" + .format(db_name)) + finally: + cursor.close() + + def grant_exists(self, db_name, db_user, remote_ip): + cursor = self.connection.cursor() + priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \ + "TO '{}'@'{}'".format(db_name, db_user, remote_ip) + try: + cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user, + remote_ip)) + grants = [i[0] for i in cursor.fetchall()] + except MySQLdb.OperationalError: + return False + finally: + cursor.close() + + # TODO: review for different grants + return priv_string in grants + + def create_grant(self, db_name, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + # TODO: review for different grants + cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_name, + db_user, + remote_ip, + password)) + finally: + cursor.close() + + def create_admin_grant(self, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_user, + remote_ip, + password)) + finally: + cursor.close() + + def cleanup_grant(self, db_user, remote_ip): + cursor = self.connection.cursor() + try: + cursor.execute("DROP FROM mysql.user WHERE user='{}' " + "AND HOST='{}'".format(db_user, + remote_ip)) + finally: + cursor.close() + + def flush_priviledges(self): + cursor = self.connection.cursor() + try: + cursor.execute("FLUSH PRIVILEGES") + finally: + cursor.close() + + def execute(self, sql): + """Execute arbitary SQL against the database.""" + cursor = self.connection.cursor() + try: + cursor.execute(sql) + finally: + cursor.close() + + def select(self, sql): + """ + Execute arbitrary SQL select query against the database + and return the results. + + :param sql: SQL select query to execute + :type sql: string + :returns: SQL select query result + :rtype: list of lists + :raises: MySQLdb.Error + """ + cursor = self.connection.cursor() + try: + cursor.execute(sql) + results = [list(i) for i in cursor.fetchall()] + finally: + cursor.close() + return results + + def migrate_passwords_to_leader_storage(self, excludes=None): + """Migrate any passwords storage on disk to leader storage.""" + if not is_leader(): + log("Skipping password migration as not the lead unit", + level=DEBUG) + return + dirname = os.path.dirname(self.root_passwd_file_template) + path = os.path.join(dirname, '*.passwd') + for f in glob.glob(path): + if excludes and f in excludes: + log("Excluding %s from leader storage migration" % (f), + level=DEBUG) + continue + + key = os.path.basename(f) + with open(f, 'r') as passwd: + _value = passwd.read().strip() + + try: + leader_set(settings={key: _value}) + + if self.delete_ondisk_passwd_file: + os.unlink(f) + except ValueError: + # NOTE cluster relation not yet ready - skip for now + pass + + def get_mysql_password_on_disk(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username on disk.""" + if username: + template = self.user_passwd_file_template + passwd_file = template.format(username) + else: + passwd_file = self.root_passwd_file_template + + _password = None + if os.path.exists(passwd_file): + log("Using existing password file '%s'" % passwd_file, level=DEBUG) + with open(passwd_file, 'r') as passwd: + _password = passwd.read().strip() + else: + log("Generating new password file '%s'" % passwd_file, level=DEBUG) + if not os.path.isdir(os.path.dirname(passwd_file)): + # NOTE: need to ensure this is not mysql root dir (which needs + # to be mysql readable) + mkdir(os.path.dirname(passwd_file), owner='root', group='root', + perms=0o770) + # Force permissions - for some reason the chmod in makedirs + # fails + os.chmod(os.path.dirname(passwd_file), 0o770) + + _password = password or pwgen(length=32) + write_file(passwd_file, _password, owner='root', group='root', + perms=0o660) + + return _password + + def passwd_keys(self, username): + """Generator to return keys used to store passwords in peer store. + + NOTE: we support both legacy and new format to support mysql + charm prior to refactor. This is necessary to avoid LP 1451890. + """ + keys = [] + if username == 'mysql': + log("Bad username '%s'" % (username), level=WARNING) + + if username: + # IMPORTANT: *newer* format must be returned first + keys.append('mysql-%s.passwd' % (username)) + keys.append('%s.passwd' % (username)) + else: + keys.append('mysql.passwd') + + for key in keys: + yield key + + def get_mysql_password(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username using peer relation cluster.""" + excludes = [] + + # First check peer relation. + try: + for key in self.passwd_keys(username): + _password = leader_get(key) + if _password: + break + + # If root password available don't update peer relation from local + if _password and not username: + excludes.append(self.root_passwd_file_template) + + except ValueError: + # cluster relation is not yet started; use on-disk + _password = None + + # If none available, generate new one + if not _password: + _password = self.get_mysql_password_on_disk(username, password) + + # Put on wire if required + if self.migrate_passwd_to_leader_storage: + self.migrate_passwords_to_leader_storage(excludes=excludes) + + return _password + + def get_mysql_root_password(self, password=None): + """Retrieve or generate mysql root password for service units.""" + return self.get_mysql_password(username=None, password=password) + + def set_mysql_password(self, username, password, current_password=None): + """Update a mysql password for the provided username changing the + leader settings + + To update root's password pass `None` in the username + + :param username: Username to change password of + :type username: str + :param password: New password for user. + :type password: str + :param current_password: Existing password for user. + :type current_password: str + """ + + if username is None: + username = 'root' + + # get root password via leader-get, it may be that in the past (when + # changes to root-password were not supported) the user changed the + # password, so leader-get is more reliable source than + # config.previous('root-password'). + rel_username = None if username == 'root' else username + if not current_password: + current_password = self.get_mysql_password(rel_username) + + # password that needs to be set + new_passwd = password + + # update password for all users (e.g. root@localhost, root@::1, etc) + try: + self.connect(user=username, password=current_password) + cursor = self.connection.cursor() + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError(('Cannot connect using password in ' + 'leader settings (%s)') % ex, ex) + + try: + # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account + # fails when using SET PASSWORD so using UPDATE against the + # mysql.user table is needed, but changes to this table are not + # replicated across the cluster, so this update needs to run in + # all the nodes. More info at + # http://galeracluster.com/documentation-webpages/userchanges.html + release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) + if release < 'bionic': + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = " + "PASSWORD( %s ) WHERE user = %s;") + else: + # PXC 5.7 (introduced in Bionic) uses authentication_string + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET " + "authentication_string = " + "PASSWORD( %s ) WHERE user = %s;") + cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username)) + cursor.execute('FLUSH PRIVILEGES;') + self.connection.commit() + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError('Cannot update password: %s' % str(ex), + ex) + finally: + cursor.close() + + # check the password was changed + try: + self.connect(user=username, password=new_passwd) + self.execute('select 1;') + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError(('Cannot connect using new password: ' + '%s') % str(ex), ex) + + if not is_leader(): + log('Only the leader can set a new password in the relation', + level=DEBUG) + return + + for key in self.passwd_keys(rel_username): + _password = leader_get(key) + if _password: + log('Updating password for %s (%s)' % (key, rel_username), + level=DEBUG) + leader_set(settings={key: new_passwd}) + + def set_mysql_root_password(self, password, current_password=None): + """Update mysql root password changing the leader settings + + :param password: New password for user. + :type password: str + :param current_password: Existing password for user. + :type current_password: str + """ + self.set_mysql_password( + 'root', + password, + current_password=current_password) + + def normalize_address(self, hostname): + """Ensure that address returned is an IP address (i.e. not fqdn)""" + if config_get('prefer-ipv6'): + # TODO: add support for ipv6 dns + return hostname + + if hostname != unit_get('private-address'): + return get_host_ip(hostname, fallback=hostname) + + # Otherwise assume localhost + return '127.0.0.1' + + def get_allowed_units(self, database, username, relation_id=None): + """Get list of units with access grants for database with username. + + This is typically used to provide shared-db relations with a list of + which units have been granted access to the given database. + """ + self.connect(password=self.get_mysql_root_password()) + allowed_units = set() + for unit in related_units(relation_id): + settings = relation_get(rid=relation_id, unit=unit) + # First check for setting with prefix, then without + for attr in ["%s_hostname" % (database), 'hostname']: + hosts = settings.get(attr, None) + if hosts: + break + + if hosts: + # hostname can be json-encoded list of hostnames + try: + hosts = json.loads(hosts) + except ValueError: + hosts = [hosts] + else: + hosts = [settings['private-address']] + + if hosts: + for host in hosts: + host = self.normalize_address(host) + if self.grant_exists(database, username, host): + log("Grant exists for host '%s' on db '%s'" % + (host, database), level=DEBUG) + if unit not in allowed_units: + allowed_units.add(unit) + else: + log("Grant does NOT exist for host '%s' on db '%s'" % + (host, database), level=DEBUG) + else: + log("No hosts found for grant check", level=INFO) + + return allowed_units + + def configure_db(self, hostname, database, username, admin=False): + """Configure access to database for username from hostname.""" + self.connect(password=self.get_mysql_root_password()) + if not self.database_exists(database): + self.create_database(database) + + remote_ip = self.normalize_address(hostname) + password = self.get_mysql_password(username) + if not self.grant_exists(database, username, remote_ip): + if not admin: + self.create_grant(database, username, remote_ip, password) + else: + self.create_admin_grant(username, remote_ip, password) + self.flush_priviledges() + + return password + + +# `_singleton_config_helper` stores the instance of the helper class that is +# being used during a hook invocation. +_singleton_config_helper = None + + +def get_mysql_config_helper(): + global _singleton_config_helper + if _singleton_config_helper is None: + _singleton_config_helper = MySQLConfigHelper() + return _singleton_config_helper + + +class MySQLConfigHelper(object): + """Base configuration helper for MySQL.""" + + # Going for the biggest page size to avoid wasted bytes. + # InnoDB page size is 16MB + + DEFAULT_PAGE_SIZE = 16 * 1024 * 1024 + DEFAULT_INNODB_BUFFER_FACTOR = 0.50 + DEFAULT_INNODB_BUFFER_SIZE_MAX = 512 * 1024 * 1024 + + # Validation and lookups for InnoDB configuration + INNODB_VALID_BUFFERING_VALUES = [ + 'none', + 'inserts', + 'deletes', + 'changes', + 'purges', + 'all' + ] + INNODB_FLUSH_CONFIG_VALUES = { + 'fast': 2, + 'safest': 1, + 'unsafe': 0, + } + + def human_to_bytes(self, human): + """Convert human readable configuration options to bytes.""" + num_re = re.compile('^[0-9]+$') + if num_re.match(human): + return human + + factors = { + 'K': 1024, + 'M': 1048576, + 'G': 1073741824, + 'T': 1099511627776 + } + modifier = human[-1] + if modifier in factors: + return int(human[:-1]) * factors[modifier] + + if modifier == '%': + total_ram = self.human_to_bytes(self.get_mem_total()) + if self.is_32bit_system() and total_ram > self.sys_mem_limit(): + total_ram = self.sys_mem_limit() + factor = int(human[:-1]) * 0.01 + pctram = total_ram * factor + return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) + + raise ValueError("Can only convert K,M,G, or T") + + def is_32bit_system(self): + """Determine whether system is 32 or 64 bit.""" + try: + return sys.maxsize < 2 ** 32 + except OverflowError: + return False + + def sys_mem_limit(self): + """Determine the default memory limit for the current service unit.""" + if platform.machine() in ['armv7l']: + _mem_limit = self.human_to_bytes('2700M') # experimentally determined + else: + # Limit for x86 based 32bit systems + _mem_limit = self.human_to_bytes('4G') + + return _mem_limit + + def get_mem_total(self): + """Calculate the total memory in the current service unit.""" + with open('/proc/meminfo') as meminfo_file: + for line in meminfo_file: + key, mem = line.split(':', 2) + if key == 'MemTotal': + mtot, modifier = mem.strip().split(' ') + return '%s%s' % (mtot, modifier[0].upper()) + + def get_innodb_flush_log_at_trx_commit(self): + """Get value for innodb_flush_log_at_trx_commit. + + Use the innodb-flush-log-at-trx-commit or the tunning-level setting + translated by INNODB_FLUSH_CONFIG_VALUES to get the + innodb_flush_log_at_trx_commit value. + + :returns: Numeric value for innodb_flush_log_at_trx_commit + :rtype: Union[None, int] + """ + _iflatc = config_get('innodb-flush-log-at-trx-commit') + _tuning_level = config_get('tuning-level') + if _iflatc: + return _iflatc + elif _tuning_level: + return self.INNODB_FLUSH_CONFIG_VALUES.get(_tuning_level, 1) + + def get_innodb_change_buffering(self): + """Get value for innodb_change_buffering. + + Use the innodb-change-buffering validated against + INNODB_VALID_BUFFERING_VALUES to get the innodb_change_buffering value. + + :returns: String value for innodb_change_buffering. + :rtype: Union[None, str] + """ + _icb = config_get('innodb-change-buffering') + if _icb and _icb in self.INNODB_VALID_BUFFERING_VALUES: + return _icb + + def get_innodb_buffer_pool_size(self): + """Get value for innodb_buffer_pool_size. + + Return the number value of innodb-buffer-pool-size or dataset-size. If + neither is set, calculate a sane default based on total memory. + + :returns: Numeric value for innodb_buffer_pool_size. + :rtype: int + """ + total_memory = self.human_to_bytes(self.get_mem_total()) + + dataset_bytes = config_get('dataset-size') + innodb_buffer_pool_size = config_get('innodb-buffer-pool-size') + + if innodb_buffer_pool_size: + innodb_buffer_pool_size = self.human_to_bytes( + innodb_buffer_pool_size) + elif dataset_bytes: + log("Option 'dataset-size' has been deprecated, please use" + "innodb_buffer_pool_size option instead", level="WARN") + innodb_buffer_pool_size = self.human_to_bytes( + dataset_bytes) + else: + # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB + # to ensure that deployments in containers + # without constraints don't try to consume + # silly amounts of memory. + innodb_buffer_pool_size = min( + int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), + self.DEFAULT_INNODB_BUFFER_SIZE_MAX + ) + + if innodb_buffer_pool_size > total_memory: + log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( + innodb_buffer_pool_size, + total_memory), level='WARN') + + return innodb_buffer_pool_size + + +class PerconaClusterHelper(MySQLConfigHelper): + """Percona-cluster specific configuration helper.""" + + def parse_config(self): + """Parse charm configuration and calculate values for config files.""" + config = config_get() + mysql_config = {} + if 'max-connections' in config: + mysql_config['max_connections'] = config['max-connections'] + + if 'wait-timeout' in config: + mysql_config['wait_timeout'] = config['wait-timeout'] + + if self.get_innodb_flush_log_at_trx_commit() is not None: + mysql_config['innodb_flush_log_at_trx_commit'] = \ + self.get_innodb_flush_log_at_trx_commit() + + if self.get_innodb_change_buffering() is not None: + mysql_config['innodb_change_buffering'] = config['innodb-change-buffering'] + + if 'innodb-io-capacity' in config: + mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] + + # Set a sane default key_buffer size + mysql_config['key_buffer'] = self.human_to_bytes('32M') + mysql_config['innodb_buffer_pool_size'] = self.get_innodb_buffer_pool_size() + return mysql_config + + +class MySQL8Helper(MySQLHelper): + + def grant_exists(self, db_name, db_user, remote_ip): + cursor = self.connection.cursor() + priv_string = ("GRANT ALL PRIVILEGES ON {}.* " + "TO {}@{}".format(db_name, db_user, remote_ip)) + try: + cursor.execute("SHOW GRANTS FOR '{}'@'{}'".format(db_user, + remote_ip)) + grants = [i[0] for i in cursor.fetchall()] + except MySQLdb.OperationalError: + return False + finally: + cursor.close() + + # Different versions of MySQL use ' or `. Ignore these in the check. + return priv_string in [ + i.replace("'", "").replace("`", "") for i in grants] + + def create_grant(self, db_name, db_user, remote_ip, password): + if self.grant_exists(db_name, db_user, remote_ip): + return + + # Make sure the user exists + # MySQL8 must create the user before the grant + self.create_user(db_user, remote_ip, password) + + cursor = self.connection.cursor() + try: + cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}'" + .format(db_name, db_user, remote_ip)) + finally: + cursor.close() + + def create_user(self, db_user, remote_ip, password): + + SQL_USER_CREATE = ( + "CREATE USER '{db_user}'@'{remote_ip}' " + "IDENTIFIED BY '{password}'") + + cursor = self.connection.cursor() + try: + cursor.execute(SQL_USER_CREATE.format( + db_user=db_user, + remote_ip=remote_ip, + password=password) + ) + except MySQLdb._exceptions.OperationalError: + log("DB user {} already exists.".format(db_user), + "WARNING") + finally: + cursor.close() + + def create_router_grant(self, db_user, remote_ip, password): + + # Make sure the user exists + # MySQL8 must create the user before the grant + self.create_user(db_user, remote_ip, password) + + # Mysql-Router specific grants + cursor = self.connection.cursor() + try: + cursor.execute("GRANT CREATE USER ON *.* TO '{}'@'{}' WITH GRANT " + "OPTION".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON " + "mysql_innodb_cluster_metadata.* TO '{}'@'{}'" + .format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON mysql.user TO '{}'@'{}'" + .format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.replication_group_members " + "TO '{}'@'{}'".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.replication_group_member_stats " + "TO '{}'@'{}'".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.global_variables " + "TO '{}'@'{}'".format(db_user, remote_ip)) + finally: + cursor.close() + + def configure_router(self, hostname, username): + + if self.connection is None: + self.connect(password=self.get_mysql_root_password()) + + remote_ip = self.normalize_address(hostname) + password = self.get_mysql_password(username) + self.create_user(username, remote_ip, password) + self.create_router_grant(username, remote_ip, password) + + return password + + +def get_prefix(requested, keys=None): + """Return existing prefix or None. + + :param requested: Request string. i.e. novacell0_username + :type requested: str + :param keys: Keys to determine prefix. Defaults set in function. + :type keys: List of str keys + :returns: String prefix i.e. novacell0 + :rtype: Union[None, str] + """ + if keys is None: + # Shared-DB default keys + keys = ["_database", "_username", "_hostname"] + for key in keys: + if requested.endswith(key): + return requested[:-len(key)] + + +def get_db_data(relation_data, unprefixed): + """Organize database requests into a collections.OrderedDict + + :param relation_data: shared-db relation data + :type relation_data: dict + :param unprefixed: Prefix to use for requests without a prefix. This should + be unique for each side of the relation to avoid + conflicts. + :type unprefixed: str + :returns: Order dict of databases and users + :rtype: collections.OrderedDict + """ + # Deep copy to avoid unintentionally changing relation data + settings = copy.deepcopy(relation_data) + databases = collections.OrderedDict() + + # Clear non-db related elements + if "egress-subnets" in settings.keys(): + settings.pop("egress-subnets") + if "ingress-address" in settings.keys(): + settings.pop("ingress-address") + if "private-address" in settings.keys(): + settings.pop("private-address") + + singleset = {"database", "username", "hostname"} + if singleset.issubset(settings): + settings["{}_{}".format(unprefixed, "hostname")] = ( + settings["hostname"]) + settings.pop("hostname") + settings["{}_{}".format(unprefixed, "database")] = ( + settings["database"]) + settings.pop("database") + settings["{}_{}".format(unprefixed, "username")] = ( + settings["username"]) + settings.pop("username") + + for k, v in settings.items(): + db = k.split("_")[0] + x = "_".join(k.split("_")[1:]) + if db not in databases: + databases[db] = collections.OrderedDict() + databases[db][x] = v + + return databases diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/apache.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1e371e179bb6926f227331f86cb14a38c229a2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,86 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os + +from charmhelpers.core import host +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get(ssl_cert_attr, + rid=r_id, unit=unit) + if not key: + key = relation_get(ssl_key_attr, + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def retrieve_ca_cert(cert_file): + cert = None + if os.path.isfile(cert_file): + with open(cert_file, 'rb') as crt: + cert = crt.read() + return cert + + +def install_ca_cert(ca_cert): + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/cluster.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..ba34fba0cafa21d15a6a27946544b2c99fbd3663 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,451 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import functools +import subprocess +import os +import time + +from socket import gethostname as get_unit_hostname + +import six + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + DEBUG, + WARNING, + unit_get, + is_leader as juju_is_leader, + status_set, +) +from charmhelpers.core.host import ( + modulo_distribution, +) +from charmhelpers.core.decorators import ( + retry_on_exception, +) +from charmhelpers.core.strutils import ( + bool_from_string, +) + +DC_RESOURCE_NAME = 'DC' + + +class HAIncompleteConfig(Exception): + pass + + +class HAIncorrectConfig(Exception): + pass + + +class CRMResourceNotFound(Exception): + pass + + +class CRMDCNotFound(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 3. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + + return False + + +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) +def is_crm_leader(resource, retry=False): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. + """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() + cmd = ['crm', 'resource', 'show', resource] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port, singlenode_mode=False): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the API service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port, singlenode_mode=False): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + +def get_hacluster_config(exclude_keys=None): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname, os-access-hostname + + param: exclude_keys: list of setting key(s) to be excluded. + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing or incorrect. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] + conf = {} + for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + + conf[setting] = config_get(setting) + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') + return conf + + +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname', 'os-access-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') or 3 + if wait is None: + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/README.hardening.md b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/README.hardening.md new file mode 100644 index 0000000000000000000000000000000000000000..91280c03e6d7b5d75b356cd94614fc821abc2644 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/README.hardening.md @@ -0,0 +1,38 @@ +# Juju charm-helpers hardening library + +## Description + +This library provides multiple implementations of system and application +hardening that conform to the standards of http://hardening.io/. + +Current implementations include: + + * OS + * SSH + * MySQL + * Apache + +## Requirements + +* Juju Charms + +## Usage + +1. Synchronise this library into your charm and add the harden() decorator + (from contrib.hardening.harden) to any functions or methods you want to use + to trigger hardening of your application/system. + +2. Add a config option called 'harden' to your charm config.yaml and set it to + a space-delimited list of hardening modules you want to run e.g. "os ssh" + +3. Override any config defaults (contrib.hardening.defaults) by adding a file + called hardening.yaml to your charm root containing the name(s) of the + modules whose settings you want override at root level and then any settings + with overrides e.g. + + os: + general: + desktop_enable: True + +4. Now just run your charm as usual and hardening will be applied each time the + hook runs. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..30a3e94359e94011cd247de7ade76667346e7379 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3bc2ebd4760124e23c128868e098aceac610260f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.apache.checks import config + + +def run_apache_checks(): + log("Starting Apache hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("Apache hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/checks/config.py new file mode 100644 index 0000000000000000000000000000000000000000..341da9eee10f73cbe3d7e7e5cf91b57b4d2a89b4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/checks/config.py @@ -0,0 +1,104 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +import six +import subprocess + + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + NoReadWriteForOther, + TemplatedFile, + DeletedFile +) +from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit +from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get Apache hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: + log("Apache server does not appear to be installed on this node - " + "skipping apache hardening", level=INFO) + return [] + + context = ApacheConfContext() + settings = utils.get_settings('apache') + audits = [ + FilePermissionAudit(paths=os.path.join( + settings['common']['apache_dir'], 'apache2.conf'), + user='root', group='root', mode=0o0640), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'mods-available/alias.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'conf-enabled/99-hardening.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + DirectoryPermissionAudit(settings['common']['apache_dir'], + user='root', + group='root', + mode=0o0750), + + DisabledModuleAudit(settings['hardening']['modules_to_disable']), + + NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) + ] + + return audits + + +class ApacheConfContext(object): + """Defines the set of key/value pairs to set in a apache config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/apache/conf-enabled/hardening.conf file. + """ + def __call__(self): + settings = utils.get_settings('apache') + ctxt = settings['hardening'] + + out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') + ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', + out).group(1) + ctxt['apache_icondir'] = '/usr/share/apache2/icons/' + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf new file mode 100644 index 0000000000000000000000000000000000000000..22b68041d50ff753284bbb4b41a21e8f2bd8c18a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -0,0 +1,32 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + + + # http://httpd.apache.org/docs/2.4/upgrading.html + {% if apache_version > '2.2' -%} + Require all granted + {% else -%} + Order Allow,Deny + Deny from all + {% endif %} + + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + +TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/templates/alias.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/templates/alias.conf new file mode 100644 index 0000000000000000000000000000000000000000..e46a58a30dadbb6ccffa02d82593c63a9cbf52df --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/apache/templates/alias.conf @@ -0,0 +1,31 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + # + # Aliases: Add here as many aliases as you need (with no limit). The format is + # Alias fakename realname + # + # Note that if you include a trailing / on fakename then the server will + # require it to be present in the URL. So "/icons" isn't aliased in this + # example, only "/icons/". If the fakename is slash-terminated, then the + # realname must also be slash terminated, and if the fakename omits the + # trailing slash, the realname must also omit it. + # + # We include the /icons/ alias for FancyIndexed directory listings. If + # you do not use FancyIndexing, you may comment this out. + # + Alias /icons/ "{{ apache_icondir }}/" + + + Options -Indexes -MultiViews -FollowSymLinks + AllowOverride None +{% if apache_version == '2.4' -%} + Require all granted +{% else -%} + Order allow,deny + Allow from all +{% endif %} + + diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6dd5b05fec4ffcfcdb4378a06dfda4e8ac7e8371 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/__init__.py @@ -0,0 +1,54 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class BaseAudit(object): # NO-QA + """Base class for hardening checks. + + The lifecycle of a hardening check is to first check to see if the system + is in compliance for the specified check. If it is not in compliance, the + check method will return a value which will be supplied to the. + """ + def __init__(self, *args, **kwargs): + self.unless = kwargs.get('unless', None) + super(BaseAudit, self).__init__() + + def ensure_compliance(self): + """Checks to see if the current hardening check is in compliance or + not. + + If the check that is performed is not in compliance, then an exception + should be raised. + """ + pass + + def _take_action(self): + """Determines whether to perform the action or not. + + Checks whether or not an action should be taken. This is determined by + the truthy value for the unless parameter. If unless is a callback + method, it will be invoked with no parameters in order to determine + whether or not the action should be taken. Otherwise, the truthy value + of the unless attribute will determine if the action should be + performed. + """ + # Do the action if there isn't an unless override. + if self.unless is None: + return True + + # Invoke the callback if there is one. + if hasattr(self.unless, '__call__'): + return not self.unless() + + return not self.unless diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/apache.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/apache.py new file mode 100644 index 0000000000000000000000000000000000000000..04825f5ada0c5b0bb9fc0955baa9a10fa199184d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/apache.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +import six + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class DisabledModuleAudit(BaseAudit): + """Audits Apache2 modules. + + Determines if the apache2 modules are enabled. If the modules are enabled + then they are removed in the ensure_compliance. + """ + def __init__(self, modules): + if modules is None: + self.modules = [] + elif isinstance(modules, six.string_types): + self.modules = [modules] + else: + self.modules = modules + + def ensure_compliance(self): + """Ensures that the modules are not loaded.""" + if not self.modules: + return + + try: + loaded_modules = self._get_loaded_modules() + non_compliant_modules = [] + for module in self.modules: + if module in loaded_modules: + log("Module '%s' is enabled but should not be." % + (module), level=INFO) + non_compliant_modules.append(module) + + if len(non_compliant_modules) == 0: + return + + for module in non_compliant_modules: + self._disable_module(module) + self._restart_apache() + except subprocess.CalledProcessError as e: + log('Error occurred auditing apache module compliance. ' + 'This may have been already reported. ' + 'Output is: %s' % e.output, level=ERROR) + + @staticmethod + def _get_loaded_modules(): + """Returns the modules which are enabled in Apache.""" + output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') + modules = [] + for line in output.splitlines(): + # Each line of the enabled module output looks like: + # module_name (static|shared) + # Plus a header line at the top of the output which is stripped + # out by the regex. + matcher = re.search(r'^ (\S*)_module (\S*)', line) + if matcher: + modules.append(matcher.group(1)) + return modules + + @staticmethod + def _disable_module(module): + """Disables the specified module in Apache.""" + try: + subprocess.check_call(['a2dismod', module]) + except subprocess.CalledProcessError as e: + # Note: catch error here to allow the attempt of disabling + # multiple modules in one go rather than failing after the + # first module fails. + log('Error occurred disabling module %s. ' + 'Output is: %s' % (module, e.output), level=ERROR) + + @staticmethod + def _restart_apache(): + """Restarts the apache process""" + subprocess.check_output(['service', 'apache2', 'restart']) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/apt.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/apt.py new file mode 100644 index 0000000000000000000000000000000000000000..cad7bf7376d6f22ce8feccd843b070c399887aa9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/apt.py @@ -0,0 +1,104 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import # required for external apt import +from six import string_types + +from charmhelpers.fetch import ( + apt_cache, + apt_purge +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg + + +class AptConfig(BaseAudit): + + def __init__(self, config, **kwargs): + self.config = config + + def verify_config(self): + apt_pkg.init() + for cfg in self.config: + value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) + if value and value != cfg['expected']: + log("APT config '%s' has unexpected value '%s' " + "(expected='%s')" % + (cfg['key'], value, cfg['expected']), level=WARNING) + + def ensure_compliance(self): + self.verify_config() + + +class RestrictedPackages(BaseAudit): + """Class used to audit restricted packages on the system.""" + + def __init__(self, pkgs, **kwargs): + super(RestrictedPackages, self).__init__(**kwargs) + if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + self.pkgs = pkgs.split() + else: + self.pkgs = pkgs + + def ensure_compliance(self): + cache = apt_cache() + + for p in self.pkgs: + if p not in cache: + continue + + pkg = cache[p] + if not self.is_virtual_package(pkg): + if not pkg.current_ver: + log("Package '%s' is not installed." % pkg.name, + level=DEBUG) + continue + else: + log("Restricted package '%s' is installed" % pkg.name, + level=WARNING) + self.delete_package(cache, pkg) + else: + log("Checking restricted virtual package '%s' provides" % + pkg.name, level=DEBUG) + self.delete_package(cache, pkg) + + def delete_package(self, cache, pkg): + """Deletes the package from the system. + + Deletes the package form the system, properly handling virtual + packages. + + :param cache: the apt cache + :param pkg: the package to remove + """ + if self.is_virtual_package(pkg): + log("Package '%s' appears to be virtual - purging provides" % + pkg.name, level=DEBUG) + for _p in pkg.provides_list: + self.delete_package(cache, _p[2].parent_pkg) + elif not pkg.current_ver: + log("Package '%s' not installed" % pkg.name, level=DEBUG) + return + else: + log("Purging package '%s'" % pkg.name, level=DEBUG) + apt_purge(pkg.name) + + def is_virtual_package(self, pkg): + return (pkg.get('has_provides', False) and + not pkg.get('has_versions', False)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/file.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/file.py new file mode 100644 index 0000000000000000000000000000000000000000..257c6351a0b0d244273013faef913f52349f2486 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/audits/file.py @@ -0,0 +1,550 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grp +import os +import pwd +import re + +from subprocess import ( + CalledProcessError, + check_output, + check_call, +) +from traceback import format_exc +from six import string_types +from stat import ( + S_ISGID, + S_ISUID +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core import unitdata +from charmhelpers.core.host import file_hash +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.contrib.hardening.templating import ( + get_template_path, + render_and_write, +) +from charmhelpers.contrib.hardening import utils + + +class BaseFileAudit(BaseAudit): + """Base class for file audits. + + Provides api stubs for compliance check flow that must be used by any class + that implemented this one. + """ + + def __init__(self, paths, always_comply=False, *args, **kwargs): + """ + :param paths: string path of list of paths of files we want to apply + compliance checks are criteria to. + :param always_comply: if true compliance criteria is always applied + else compliance is skipped for non-existent + paths. + """ + super(BaseFileAudit, self).__init__(*args, **kwargs) + self.always_comply = always_comply + if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + self.paths = [paths] + else: + self.paths = paths + + def ensure_compliance(self): + """Ensure that the all registered files comply to registered criteria. + """ + for p in self.paths: + if os.path.exists(p): + if self.is_compliant(p): + continue + + log('File %s is not in compliance.' % p, level=INFO) + else: + if not self.always_comply: + log("Non-existent path '%s' - skipping compliance check" + % (p), level=INFO) + continue + + if self._take_action(): + log("Applying compliance criteria to '%s'" % (p), level=INFO) + self.comply(p) + + def is_compliant(self, path): + """Audits the path to see if it is compliance. + + :param path: the path to the file that should be checked. + """ + raise NotImplementedError + + def comply(self, path): + """Enforces the compliance of a path. + + :param path: the path to the file that should be enforced. + """ + raise NotImplementedError + + @classmethod + def _get_stat(cls, path): + """Returns the Posix st_stat information for the specified file path. + + :param path: the path to get the st_stat information for. + :returns: an st_stat object for the path or None if the path doesn't + exist. + """ + return os.stat(path) + + +class FilePermissionAudit(BaseFileAudit): + """Implements an audit for file permissions and ownership for a user. + + This class implements functionality that ensures that a specific user/group + will own the file(s) specified and that the permissions specified are + applied properly to the file. + """ + def __init__(self, paths, user, group=None, mode=0o600, **kwargs): + self.user = user + self.group = group + self.mode = mode + super(FilePermissionAudit, self).__init__(paths, user, group, mode, + **kwargs) + + @property + def user(self): + return self._user + + @user.setter + def user(self, name): + try: + user = pwd.getpwnam(name) + except KeyError: + log('Unknown user %s' % name, level=ERROR) + user = None + self._user = user + + @property + def group(self): + return self._group + + @group.setter + def group(self, name): + try: + group = None + if name: + group = grp.getgrnam(name) + else: + group = grp.getgrgid(self.user.pw_gid) + except KeyError: + log('Unknown group %s' % name, level=ERROR) + self._group = group + + def is_compliant(self, path): + """Checks if the path is in compliance. + + Used to determine if the path specified meets the necessary + requirements to be in compliance with the check itself. + + :param path: the file path to check + :returns: True if the path is compliant, False otherwise. + """ + stat = self._get_stat(path) + user = self.user + group = self.group + + compliant = True + if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: + log('File %s is not owned by %s:%s.' % (path, user.pw_name, + group.gr_name), + level=INFO) + compliant = False + + # POSIX refers to the st_mode bits as corresponding to both the + # file type and file permission bits, where the least significant 12 + # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the + # file permission bits (8-0) + perms = stat.st_mode & 0o7777 + if perms != self.mode: + log('File %s has incorrect permissions, currently set to %s' % + (path, oct(stat.st_mode & 0o7777)), level=INFO) + compliant = False + + return compliant + + def comply(self, path): + """Issues a chown and chmod to the file paths specified.""" + utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, + self.mode) + + +class DirectoryPermissionAudit(FilePermissionAudit): + """Performs a permission check for the specified directory path.""" + + def __init__(self, paths, user, group=None, mode=0o600, + recursive=True, **kwargs): + super(DirectoryPermissionAudit, self).__init__(paths, user, group, + mode, **kwargs) + self.recursive = recursive + + def is_compliant(self, path): + """Checks if the directory is compliant. + + Used to determine if the path specified and all of its children + directories are in compliance with the check itself. + + :param path: the directory path to check + :returns: True if the directory tree is compliant, otherwise False. + """ + if not os.path.isdir(path): + log('Path specified %s is not a directory.' % path, level=ERROR) + raise ValueError("%s is not a directory." % path) + + if not self.recursive: + return super(DirectoryPermissionAudit, self).is_compliant(path) + + compliant = True + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + continue + + if not super(DirectoryPermissionAudit, self).is_compliant(root): + compliant = False + continue + + return compliant + + def comply(self, path): + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + super(DirectoryPermissionAudit, self).comply(root) + + +class ReadOnly(BaseFileAudit): + """Audits that files and folders are read only.""" + def __init__(self, paths, *args, **kwargs): + super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + try: + output = check_output(['find', path, '-perm', '-go+w', + '-type', 'f']).strip() + + # The find above will find any files which have permission sets + # which allow too broad of write access. As such, the path is + # compliant if there is no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred checking finding writable files for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + return False + + def comply(self, path): + try: + check_output(['chmod', 'go-w', '-R', path]) + except CalledProcessError as e: + log('Error occurred removing writeable permissions for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class NoReadWriteForOther(BaseFileAudit): + """Ensures that the files found under the base path are readable or + writable by anyone other than the owner or the group. + """ + def __init__(self, paths): + super(NoReadWriteForOther, self).__init__(paths) + + def is_compliant(self, path): + try: + cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', + '-perm', '-o+w', '-type', 'f'] + output = check_output(cmd).strip() + + # The find above here will find any files which have read or + # write permissions for other, meaning there is too broad of access + # to read/write the file. As such, the path is compliant if there's + # no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred while finding files which are readable or ' + 'writable to the world in %s. ' + 'Command output is: %s.' % (path, e.output), level=ERROR) + + def comply(self, path): + try: + check_output(['chmod', '-R', 'o-rw', path]) + except CalledProcessError as e: + log('Error occurred attempting to change modes of files under ' + 'path %s. Output of command is: %s' % (path, e.output)) + + +class NoSUIDSGIDAudit(BaseFileAudit): + """Audits that specified files do not have SUID/SGID bits set.""" + def __init__(self, paths, *args, **kwargs): + super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + stat = self._get_stat(path) + if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: + return False + + return True + + def comply(self, path): + try: + log('Removing suid/sgid from %s.' % path, level=DEBUG) + check_output(['chmod', '-s', path]) + except CalledProcessError as e: + log('Error occurred removing suid/sgid from %s.' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class TemplatedFile(BaseFileAudit): + """The TemplatedFileAudit audits the contents of a templated file. + + This audit renders a file from a template, sets the appropriate file + permissions, then generates a hashsum with which to check the content + changed. + """ + def __init__(self, path, context, template_dir, mode, user='root', + group='root', service_actions=None, **kwargs): + self.context = context + self.user = user + self.group = group + self.mode = mode + self.template_dir = template_dir + self.service_actions = service_actions + super(TemplatedFile, self).__init__(paths=path, always_comply=True, + **kwargs) + + def is_compliant(self, path): + """Determines if the templated file is compliant. + + A templated file is only compliant if it has not changed (as + determined by its sha256 hashsum) AND its file permissions are set + appropriately. + + :param path: the path to check compliance. + """ + same_templates = self.templates_match(path) + same_content = self.contents_match(path) + same_permissions = self.permissions_match(path) + + if same_content and same_permissions and same_templates: + return True + + return False + + def run_service_actions(self): + """Run any actions on services requested.""" + if not self.service_actions: + return + + for svc_action in self.service_actions: + name = svc_action['service'] + actions = svc_action['actions'] + log("Running service '%s' actions '%s'" % (name, actions), + level=DEBUG) + for action in actions: + cmd = ['service', name, action] + try: + check_call(cmd) + except CalledProcessError as exc: + log("Service name='%s' action='%s' failed - %s" % + (name, action, exc), level=WARNING) + + def comply(self, path): + """Ensures the contents and the permissions of the file. + + :param path: the path to correct + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + self.pre_write() + render_and_write(self.template_dir, path, self.context()) + utils.ensure_permissions(path, self.user, self.group, self.mode) + self.run_service_actions() + self.save_checksum(path) + self.post_write() + + def pre_write(self): + """Invoked prior to writing the template.""" + pass + + def post_write(self): + """Invoked after writing the template.""" + pass + + def templates_match(self, path): + """Determines if the template files are the same. + + The template file equality is determined by the hashsum of the + template files themselves. If there is no hashsum, then the content + cannot be sure to be the same so treat it as if they changed. + Otherwise, return whether or not the hashsums are the same. + + :param path: the path to check + :returns: boolean + """ + template_path = get_template_path(self.template_dir, path) + key = 'hardening:template:%s' % template_path + template_checksum = file_hash(template_path) + kv = unitdata.kv() + stored_tmplt_checksum = kv.get(key) + if not stored_tmplt_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Saved template checksum for %s.' % template_path, + level=DEBUG) + # Since we don't have a template checksum, then assume it doesn't + # match and return that the template is different. + return False + elif stored_tmplt_checksum != template_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Updated template checksum for %s.' % template_path, + level=DEBUG) + return False + + # Here the template hasn't changed based upon the calculated + # checksum of the template and what was previously stored. + return True + + def contents_match(self, path): + """Determines if the file content is the same. + + This is determined by comparing hashsum of the file contents and + the saved hashsum. If there is no hashsum, then the content cannot + be sure to be the same so treat them as if they are not the same. + Otherwise, return True if the hashsums are the same, False if they + are not the same. + + :param path: the file to check. + """ + checksum = file_hash(path) + + kv = unitdata.kv() + stored_checksum = kv.get('hardening:%s' % path) + if not stored_checksum: + # If the checksum hasn't been generated, return False to ensure + # the file is written and the checksum stored. + log('Checksum for %s has not been calculated.' % path, level=DEBUG) + return False + elif stored_checksum != checksum: + log('Checksum mismatch for %s.' % path, level=DEBUG) + return False + + return True + + def permissions_match(self, path): + """Determines if the file owner and permissions match. + + :param path: the path to check. + """ + audit = FilePermissionAudit(path, self.user, self.group, self.mode) + return audit.is_compliant(path) + + def save_checksum(self, path): + """Calculates and saves the checksum for the path specified. + + :param path: the path of the file to save the checksum. + """ + checksum = file_hash(path) + kv = unitdata.kv() + kv.set('hardening:%s' % path, checksum) + kv.flush() + + +class DeletedFile(BaseFileAudit): + """Audit to ensure that a file is deleted.""" + def __init__(self, paths): + super(DeletedFile, self).__init__(paths) + + def is_compliant(self, path): + return not os.path.exists(path) + + def comply(self, path): + os.remove(path) + + +class FileContentAudit(BaseFileAudit): + """Audit the contents of a file.""" + def __init__(self, paths, cases, **kwargs): + # Cases we expect to pass + self.pass_cases = cases.get('pass', []) + # Cases we expect to fail + self.fail_cases = cases.get('fail', []) + super(FileContentAudit, self).__init__(paths, **kwargs) + + def is_compliant(self, path): + """ + Given a set of content matching cases i.e. tuple(regex, bool) where + bool value denotes whether or not regex is expected to match, check that + all cases match as expected with the contents of the file. Cases can be + expected to pass of fail. + + :param path: Path of file to check. + :returns: Boolean value representing whether or not all cases are + found to be compliant. + """ + log("Auditing contents of file '%s'" % (path), level=DEBUG) + with open(path, 'r') as fd: + contents = fd.read() + + matches = 0 + for pattern in self.pass_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if results: + matches += 1 + else: + log("Pattern '%s' was expected to pass but instead it failed" + % (pattern), level=WARNING) + + for pattern in self.fail_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if not results: + matches += 1 + else: + log("Pattern '%s' was expected to fail but instead it passed" + % (pattern), level=WARNING) + + total = len(self.pass_cases) + len(self.fail_cases) + log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) + return matches == total + + def comply(self, *args, **kwargs): + """NOOP since we just issue warnings. This is to avoid the + NotImplememtedError. + """ + log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/apache.yaml b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/apache.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f940d4cfa85ca7051dd60a4805d84bb6aebed6d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,16 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..c112137cb45c4b63cb05384145b3edf8c443e2b8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,12 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 0000000000000000000000000000000000000000..682d22bf3ded32eb1c8d6188486ec4468d9ec457 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..2edf325c311c6fbb062a072083b4d12cebc3d9c1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/os.yaml b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/os.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a8627b5ed2803828e1e4d78260c6b5f90cae659 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/os.yaml @@ -0,0 +1,68 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..cc3b9c206eae56cbe68826cb76748e2deb9483e1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,43 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: + ssh_tmout: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd529bcae1ec00fef2e969f43dc3cf530b46ef9a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..d05e054bc234015206bb1195152fa9ffd6a33151 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/harden.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/harden.py new file mode 100644 index 0000000000000000000000000000000000000000..63f21b9c9855065da3be875c01a2c94db7df47b4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/harden.py @@ -0,0 +1,96 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.host.checks import run_os_checks +from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks +from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks +from charmhelpers.contrib.hardening.apache.checks import run_apache_checks + +_DISABLE_HARDENING_FOR_UNIT_TEST = False + + +def harden(overrides=None): + """Hardening decorator. + + This is the main entry point for running the hardening stack. In order to + run modules of the stack you must add this decorator to charm hook(s) and + ensure that your charm config.yaml contains the 'harden' option set to + one or more of the supported modules. Setting these will cause the + corresponding hardening code to be run when the hook fires. + + This decorator can and should be applied to more than one hook or function + such that hardening modules are called multiple times. This is because + subsequent calls will perform auditing checks that will report any changes + to resources hardened by the first run (and possibly perform compliance + actions as a result of any detected infractions). + + :param overrides: Optional list of stack modules used to override those + provided with 'harden' config. + :returns: Returns value returned by decorated function once executed. + """ + if overrides is None: + overrides = [] + + def _harden_inner1(f): + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} + + def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True + RUN_CATALOG = OrderedDict([('os', run_os_checks), + ('ssh', run_ssh_checks), + ('mysql', run_mysql_checks), + ('apache', run_apache_checks)]) + + enabled = overrides[:] or (config("harden") or "").split() + if enabled: + modules_to_run = [] + # modules will always be performed in the following order + for module, func in six.iteritems(RUN_CATALOG): + if module in enabled: + enabled.remove(module) + modules_to_run.append(func) + + if enabled: + log("Unknown hardening modules '%s' - ignoring" % + (', '.join(enabled)), level=WARNING) + + for hardener in modules_to_run: + log("Executing hardening module '%s'" % + (hardener.__name__), level=DEBUG) + hardener() + else: + log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) + + return f(*args, **kwargs) + return _harden_inner2 + + return _harden_inner1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7e409f3c7e0406b40353f48acfc3479e4c1a24 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -0,0 +1,48 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.host.checks import ( + apt, + limits, + login, + minimize_access, + pam, + profile, + securetty, + suid_sgid, + sysctl +) + + +def run_os_checks(): + log("Starting OS hardening checks.", level=DEBUG) + checks = apt.get_audits() + checks.extend(limits.get_audits()) + checks.extend(login.get_audits()) + checks.extend(minimize_access.get_audits()) + checks.extend(pam.get_audits()) + checks.extend(profile.get_audits()) + checks.extend(securetty.get_audits()) + checks.extend(suid_sgid.get_audits()) + checks.extend(sysctl.get_audits()) + + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("OS hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/apt.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/apt.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce41b0043134e256d9c20ee729f1c4345faa3f9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/apt.py @@ -0,0 +1,37 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.utils import get_settings +from charmhelpers.contrib.hardening.audits.apt import ( + AptConfig, + RestrictedPackages, +) + + +def get_audits(): + """Get OS hardening apt audits. + + :returns: dictionary of audits + """ + audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}])] + + settings = get_settings('os') + clean_packages = settings['security']['packages_clean'] + if clean_packages: + security_packages = settings['security']['packages_list'] + if security_packages: + audits.append(RestrictedPackages(security_packages)) + + return audits diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/limits.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/limits.py new file mode 100644 index 0000000000000000000000000000000000000000..e94f5ebef360c7c80c35eba8243d3e7f7dcbb14d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/limits.py @@ -0,0 +1,53 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import ( + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening security limits audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Ensure that the /etc/security/limits.d directory is only writable + # by the root user, but others can execute and read. + audits.append(DirectoryPermissionAudit('/etc/security/limits.d', + user='root', group='root', + mode=0o755)) + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', + SecurityLimitsContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0440)) + return audits + + +class SecurityLimitsContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'disable_core_dump': + not settings['security']['kernel_enable_core_dump']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/login.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/login.py new file mode 100644 index 0000000000000000000000000000000000000000..fe2bc6ef34a0dae612c2617dc1d13390f651e419 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/login.py @@ -0,0 +1,65 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import string_types + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening login.defs audits. + + :returns: dictionary of audits + """ + audits = [TemplatedFile('/etc/login.defs', LoginContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0444)] + return audits + + +class LoginContext(object): + + def __call__(self): + settings = utils.get_settings('os') + + # Octal numbers in yaml end up being turned into decimal, + # so check if the umask is entered as a string (e.g. '027') + # or as an octal umask as we know it (e.g. 002). If its not + # a string assume it to be octal and turn it into an octal + # string. + umask = settings['environment']['umask'] + if not isinstance(umask, string_types): + umask = '%s' % oct(umask) + + ctxt = { + 'additional_user_paths': + settings['environment']['extra_user_paths'], + 'umask': umask, + 'pwd_max_age': settings['auth']['pw_max_age'], + 'pwd_min_age': settings['auth']['pw_min_age'], + 'uid_min': settings['auth']['uid_min'], + 'sys_uid_min': settings['auth']['sys_uid_min'], + 'sys_uid_max': settings['auth']['sys_uid_max'], + 'gid_min': settings['auth']['gid_min'], + 'sys_gid_min': settings['auth']['sys_gid_min'], + 'sys_gid_max': settings['auth']['sys_gid_max'], + 'login_retries': settings['auth']['retries'], + 'login_timeout': settings['auth']['timeout'], + 'chfn_restrict': settings['auth']['chfn_restrict'], + 'allow_login_without_home': settings['auth']['allow_homeless'] + } + + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/minimize_access.py new file mode 100644 index 0000000000000000000000000000000000000000..6e64be003be0b89d1416b22c35c43b6024979361 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -0,0 +1,50 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + ReadOnly, +) +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening access audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Remove write permissions from $PATH folders for all regular users. + # This prevents changing system-wide commands from normal users. + path_folders = {'/usr/local/sbin', + '/usr/local/bin', + '/usr/sbin', + '/usr/bin', + '/bin'} + extra_user_paths = settings['environment']['extra_user_paths'] + path_folders.update(extra_user_paths) + audits.append(ReadOnly(path_folders)) + + # Only allow the root user to have access to the shadow file. + audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) + + if 'change_user' not in settings['security']['users_allow']: + # su should only be accessible to user and group root, unless it is + # expressly defined to allow users to change to root via the + # security_users_allow config option. + audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) + + return audits diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/pam.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/pam.py new file mode 100644 index 0000000000000000000000000000000000000000..9b38d5f0cf0b16282968825b79b44d80a1a7f577 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/pam.py @@ -0,0 +1,132 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from subprocess import ( + check_output, + CalledProcessError, +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) +from charmhelpers.fetch import ( + apt_install, + apt_purge, + apt_update, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + DeletedFile, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +def get_audits(): + """Get OS hardening PAM authentication audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + if settings['auth']['pam_passwdqc_enable']: + audits.append(PasswdqcPAM('/etc/passwdqc.conf')) + + if settings['auth']['retries']: + audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) + else: + audits.append(DeletedFile('/usr/share/pam-configs/tally2')) + + return audits + + +class PasswdqcPAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_pam_passwdqc_options'] = \ + settings['auth']['pam_passwdqc_options'] + + return ctxt + + +class PasswdqcPAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(PasswdqcPAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=PasswdqcPAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + for pkg in ['libpam-ccreds', 'libpam-cracklib']: + log("Purging package '%s'" % pkg, level=DEBUG), + apt_purge(pkg) + + apt_update(fatal=True) + for pkg in ['libpam-passwdqc']: + log("Installing package '%s'" % pkg, level=DEBUG), + apt_install(pkg) + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) + + +class Tally2PAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] + ctxt['auth_retries'] = settings['auth']['retries'] + + return ctxt + + +class Tally2PAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(Tally2PAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=Tally2PAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + apt_purge('libpam-ccreds') + apt_update(fatal=True) + apt_install('libpam-modules') + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/profile.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/profile.py new file mode 100644 index 0000000000000000000000000000000000000000..2727428da9241ccf88a60843d05dffb26cebac96 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/profile.py @@ -0,0 +1,49 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening profile audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) + return audits + + +class ProfileContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/securetty.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/securetty.py new file mode 100644 index 0000000000000000000000000000000000000000..34cd02178c1fbf1c7d467af0814ad9fd4199dc3d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -0,0 +1,37 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening Secure TTY audits. + + :returns: dictionary of audits + """ + audits = [] + audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), + template_dir=TEMPLATES_DIR, + mode=0o0400, user='root', group='root')) + return audits + + +class SecureTTYContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ttys': settings['auth']['root_ttys']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/suid_sgid.py new file mode 100644 index 0000000000000000000000000000000000000000..bcbe3fde07ea0716e2de6d1d4e103fcb19166c14 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -0,0 +1,129 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit +from charmhelpers.contrib.hardening import utils + + +BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', + '/usr/libexec/openssh/ssh-keysign', + '/usr/lib/openssh/ssh-keysign', + '/sbin/netreport', + '/usr/sbin/usernetctl', + '/usr/sbin/userisdnctl', + '/usr/sbin/pppd', + '/usr/bin/lockfile', + '/usr/bin/mail-lock', + '/usr/bin/mail-unlock', + '/usr/bin/mail-touchlock', + '/usr/bin/dotlockfile', + '/usr/bin/arping', + '/usr/sbin/uuidd', + '/usr/bin/mtr', + '/usr/lib/evolution/camel-lock-helper-1.2', + '/usr/lib/pt_chown', + '/usr/lib/eject/dmcrypt-get-device', + '/usr/lib/mc/cons.saver'] + +WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', + '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', + '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', + '/usr/bin/passwd', '/usr/bin/ssh-agent', + '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', + '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', + '/bin/ping6', '/usr/bin/traceroute6.iputils', + '/sbin/mount.nfs', '/sbin/umount.nfs', + '/sbin/mount.nfs4', '/sbin/umount.nfs4', + '/usr/bin/crontab', + '/usr/bin/wall', '/usr/bin/write', + '/usr/bin/screen', + '/usr/bin/mlocate', + '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', + '/bin/fusermount', + '/usr/bin/pkexec', + '/usr/bin/sudo', '/usr/bin/sudoedit', + '/usr/sbin/postdrop', '/usr/sbin/postqueue', + '/usr/sbin/suexec', + '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', + '/usr/kerberos/bin/ksu', + '/usr/sbin/ccreds_validate', + '/usr/bin/Xorg', + '/usr/bin/X', + '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', + '/usr/lib/vte/gnome-pty-helper', + '/usr/lib/libvte9/gnome-pty-helper', + '/usr/lib/libvte-2.90-9/gnome-pty-helper'] + + +def get_audits(): + """Get OS hardening suid/sgid audits. + + :returns: dictionary of audits + """ + checks = [] + settings = utils.get_settings('os') + if not settings['security']['suid_sgid_enforce']: + log("Skipping suid/sgid hardening", level=INFO) + return checks + + # Build the blacklist and whitelist of files for suid/sgid checks. + # There are a total of 4 lists: + # 1. the system blacklist + # 2. the system whitelist + # 3. the user blacklist + # 4. the user whitelist + # + # The blacklist is the set of paths which should NOT have the suid/sgid bit + # set and the whitelist is the set of paths which MAY have the suid/sgid + # bit setl. The user whitelist/blacklist effectively override the system + # whitelist/blacklist. + u_b = settings['security']['suid_sgid_blacklist'] + u_w = settings['security']['suid_sgid_whitelist'] + + blacklist = set(BLACKLIST) - set(u_w + u_b) + whitelist = set(WHITELIST) - set(u_b + u_w) + + checks.append(NoSUIDSGIDAudit(blacklist)) + + dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] + + if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: + # If the policy is a dry_run (e.g. complain only) or remove unknown + # suid/sgid bits then find all of the paths which have the suid/sgid + # bit set and then remove the whitelisted paths. + root_path = settings['environment']['root_path'] + unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) + checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) + + return checks + + +def find_paths_with_suid_sgid(root_path): + """Finds all paths/files which have an suid/sgid bit enabled. + + Starting with the root_path, this will recursively find all paths which + have an suid or sgid bit set. + """ + cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', + '-type', 'f', '!', '-path', '/proc/*', '-print'] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = p.communicate() + return set(out.split('\n')) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/sysctl.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/sysctl.py new file mode 100644 index 0000000000000000000000000000000000000000..f1ea5813036b11893e8b9a986bf30a2f7a541b5d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -0,0 +1,209 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import platform +import re +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, + WARNING, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s +net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_ratelimit=100 +net.ipv4.icmp_ratemask=88089 +net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s +net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s +net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s +net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s +net.ipv4.tcp_rfc1337=1 +net.ipv4.tcp_syncookies=1 +net.ipv4.conf.all.shared_media=1 +net.ipv4.conf.default.shared_media=1 +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=0 +net.ipv4.conf.default.secure_redirects=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 +net.ipv4.conf.all.log_martians=0 +net.ipv6.conf.default.router_solicitations=0 +net.ipv6.conf.default.accept_ra_rtr_pref=0 +net.ipv6.conf.default.accept_ra_pinfo=0 +net.ipv6.conf.default.accept_ra_defrtr=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.default.dad_transmits=0 +net.ipv6.conf.default.max_addresses=1 +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +kernel.modules_disabled=%(kernel_modules_disabled)s +kernel.sysrq=%(kernel_sysrq)s +fs.suid_dumpable=%(fs_suid_dumpable)s +kernel.randomize_va_space=2 +""" + + +def get_audits(): + """Get OS hardening sysctl audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Apply the sysctl settings which are configured to be applied. + audits.append(SysctlConf()) + # Make sure that only root has access to the sysctl.conf file, and + # that it is read-only. + audits.append(FilePermissionAudit('/etc/sysctl.conf', + user='root', + group='root', mode=0o0440)) + # If module loading is not enabled, then ensure that the modules + # file has the appropriate permissions and rebuild the initramfs + if not settings['security']['kernel_enable_module_loading']: + audits.append(ModulesTemplate()) + + return audits + + +class ModulesContext(object): + + def __call__(self): + settings = utils.get_settings('os') + with open('/proc/cpuinfo', 'r') as fd: + cpuinfo = fd.readlines() + + for line in cpuinfo: + match = re.search(r"^vendor_id\s+:\s+(.+)", line) + if match: + vendor = match.group(1) + + if vendor == "GenuineIntel": + vendor = "intel" + elif vendor == "AuthenticAMD": + vendor = "amd" + + ctxt = {'arch': platform.processor(), + 'cpuVendor': vendor, + 'desktop_enable': settings['general']['desktop_enable']} + + return ctxt + + +class ModulesTemplate(object): + + def __init__(self): + super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', + ModulesContext(), + templates_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + subprocess.check_call(['update-initramfs', '-u']) + + +class SysCtlHardeningContext(object): + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'sysctl': {}} + + log("Applying sysctl settings", level=INFO) + extras = {'net_ipv4_ip_forward': 0, + 'net_ipv6_conf_all_forwarding': 0, + 'net_ipv6_conf_all_disable_ipv6': 1, + 'net_ipv4_tcp_timestamps': 0, + 'net_ipv4_conf_all_arp_ignore': 0, + 'net_ipv4_conf_all_arp_announce': 0, + 'kernel_sysrq': 0, + 'fs_suid_dumpable': 0, + 'kernel_modules_disabled': 1} + + if settings['sysctl']['ipv6_enable']: + extras['net_ipv6_conf_all_disable_ipv6'] = 0 + + if settings['sysctl']['forwarding']: + extras['net_ipv4_ip_forward'] = 1 + extras['net_ipv6_conf_all_forwarding'] = 1 + + if settings['sysctl']['arp_restricted']: + extras['net_ipv4_conf_all_arp_ignore'] = 1 + extras['net_ipv4_conf_all_arp_announce'] = 2 + + if settings['security']['kernel_enable_module_loading']: + extras['kernel_modules_disabled'] = 0 + + if settings['sysctl']['kernel_enable_sysrq']: + sysrq_val = settings['sysctl']['kernel_secure_sysrq'] + extras['kernel_sysrq'] = sysrq_val + + if settings['security']['kernel_enable_core_dump']: + extras['fs_suid_dumpable'] = 1 + + settings.update(extras) + for d in (SYSCTL_DEFAULTS % settings).split(): + d = d.strip().partition('=') + key = d[0].strip() + path = os.path.join('/proc/sys', key.replace('.', '/')) + if not os.path.exists(path): + log("Skipping '%s' since '%s' does not exist" % (key, path), + level=WARNING) + continue + + ctxt['sysctl'][key] = d[2] or None + + # Translate for python3 + return {'sysctl_settings': + [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + + +class SysctlConf(TemplatedFile): + """An audit check for sysctl settings.""" + def __init__(self): + self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' + super(SysctlConf, self).__init__(self.conffile, + SysCtlHardeningContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + try: + subprocess.check_call(['sysctl', '-p', self.conffile]) + except subprocess.CalledProcessError as e: + # NOTE: on some systems if sysctl cannot apply all settings it + # will return non-zero as well. + log("sysctl command returned an error (maybe some " + "keys could not be set) - %s" % (e), + level=WARNING) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf new file mode 100644 index 0000000000000000000000000000000000000000..0014191fc8152fd9147b3fb5446987e6e62f2d77 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% if disable_core_dump -%} +# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. +* hard core 0 +{% endif %} \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 0000000000000000000000000000000000000000..616cef46f492f682aca28c71a6e20176870a36f2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf new file mode 100644 index 0000000000000000000000000000000000000000..101f1e1d709c268890553957f30c93259681ce59 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf @@ -0,0 +1,7 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% for key, value in sysctl_settings -%} +{{ key }}={{ value }} +{% endfor -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/login.defs b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/login.defs new file mode 100644 index 0000000000000000000000000000000000000000..db137d6dbb7a3a850294407199225392a880cfc2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/login.defs @@ -0,0 +1,349 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# +# /etc/login.defs - Configuration control definitions for the login package. +# +# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. +# If unspecified, some arbitrary (and possibly incorrect) value will +# be assumed. All other items are optional - if not specified then +# the described action or option will be inhibited. +# +# Comment lines (lines beginning with "#") and blank lines are ignored. +# +# Modified for Linux. --marekm + +# REQUIRED for useradd/userdel/usermod +# Directory where mailboxes reside, _or_ name of file, relative to the +# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, +# MAIL_DIR takes precedence. +# +# Essentially: +# - MAIL_DIR defines the location of users mail spool files +# (for mbox use) by appending the username to MAIL_DIR as defined +# below. +# - MAIL_FILE defines the location of the users mail spool files as the +# fully-qualified filename obtained by prepending the user home +# directory before $MAIL_FILE +# +# NOTE: This is no more used for setting up users MAIL environment variable +# which is, starting from shadow 4.0.12-1 in Debian, entirely the +# job of the pam_mail PAM modules +# See default PAM configuration files provided for +# login, su, etc. +# +# This is a temporary situation: setting these variables will soon +# move to /etc/default/useradd and the variables will then be +# no more supported +MAIL_DIR /var/mail +#MAIL_FILE .mail + +# +# Enable logging and display of /var/log/faillog login failure info. +# This option conflicts with the pam_tally PAM module. +# +FAILLOG_ENAB yes + +# +# Enable display of unknown usernames when login failures are recorded. +# +# WARNING: Unknown usernames may become world readable. +# See #290803 and #298773 for details about how this could become a security +# concern +LOG_UNKFAIL_ENAB no + +# +# Enable logging of successful logins +# +LOG_OK_LOGINS yes + +# +# Enable "syslog" logging of su activity - in addition to sulog file logging. +# SYSLOG_SG_ENAB does the same for newgrp and sg. +# +SYSLOG_SU_ENAB yes +SYSLOG_SG_ENAB yes + +# +# If defined, all su activity is logged to this file. +# +#SULOG_FILE /var/log/sulog + +# +# If defined, file which maps tty line to TERM environment parameter. +# Each line of the file is in a format something like "vt100 tty01". +# +#TTYTYPE_FILE /etc/ttytype + +# +# If defined, login failures will be logged here in a utmp format +# last, when invoked as lastb, will read /var/log/btmp, so... +# +FTMP_FILE /var/log/btmp + +# +# If defined, the command name to display when running "su -". For +# example, if this is defined as "su" then a "ps" will display the +# command is "-su". If not defined, then "ps" would display the +# name of the shell actually being run, e.g. something like "-sh". +# +SU_NAME su + +# +# If defined, file which inhibits all the usual chatter during the login +# sequence. If a full pathname, then hushed mode will be enabled if the +# user's name or shell are found in the file. If not a full pathname, then +# hushed mode will be enabled if the file exists in the user's home directory. +# +HUSHLOGIN_FILE .hushlogin +#HUSHLOGIN_FILE /etc/hushlogins + +# +# *REQUIRED* The default PATH settings, for superuser and normal users. +# +# (they are minimal, add the rest in the shell startup files) +ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} + +# +# Terminal permissions +# +# TTYGROUP Login tty will be assigned this group ownership. +# TTYPERM Login tty will be set to this permission. +# +# If you have a "write" program which is "setgid" to a special group +# which owns the terminals, define TTYGROUP to the group number and +# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign +# TTYPERM to either 622 or 600. +# +# In Debian /usr/bin/bsd-write or similar programs are setgid tty +# However, the default and recommended value for TTYPERM is still 0600 +# to not allow anyone to write to anyone else console or terminal + +# Users can still allow other people to write them by issuing +# the "mesg y" command. + +TTYGROUP tty +TTYPERM 0600 + +# +# Login configuration initializations: +# +# ERASECHAR Terminal ERASE character ('\010' = backspace). +# KILLCHAR Terminal KILL character ('\025' = CTRL/U). +# UMASK Default "umask" value. +# +# The ERASECHAR and KILLCHAR are used only on System V machines. +# +# UMASK is the default umask value for pam_umask and is used by +# useradd and newusers to set the mode of the new home directories. +# 022 is the "historical" value in Debian for UMASK +# 027, or even 077, could be considered better for privacy +# There is no One True Answer here : each sysadmin must make up his/her +# mind. +# +# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value +# for private user groups, i. e. the uid is the same as gid, and username is +# the same as the primary group name: for these, the user permissions will be +# used as group permissions, e. g. 022 will become 002. +# +# Prefix these values with "0" to get octal, "0x" to get hexadecimal. +# +ERASECHAR 0177 +KILLCHAR 025 +UMASK {{ umask }} + +# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. +# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. +USERGROUPS_ENAB yes + +# +# Password aging controls: +# +# PASS_MAX_DAYS Maximum number of days a password may be used. +# PASS_MIN_DAYS Minimum number of days allowed between password changes. +# PASS_WARN_AGE Number of days warning given before a password expires. +# +PASS_MAX_DAYS {{ pwd_max_age }} +PASS_MIN_DAYS {{ pwd_min_age }} +PASS_WARN_AGE 7 + +# +# Min/max values for automatic uid selection in useradd +# +UID_MIN {{ uid_min }} +UID_MAX 60000 +# System accounts +SYS_UID_MIN {{ sys_uid_min }} +SYS_UID_MAX {{ sys_uid_max }} + +# Min/max values for automatic gid selection in groupadd +GID_MIN {{ gid_min }} +GID_MAX 60000 +# System accounts +SYS_GID_MIN {{ sys_gid_min }} +SYS_GID_MAX {{ sys_gid_max }} + +# +# Max number of login retries if password is bad. This will most likely be +# overriden by PAM, since the default pam_unix module has it's own built +# in of 3 retries. However, this is a safe fallback in case you are using +# an authentication module that does not enforce PAM_MAXTRIES. +# +LOGIN_RETRIES {{ login_retries }} + +# +# Max time in seconds for login +# +LOGIN_TIMEOUT {{ login_timeout }} + +# +# Which fields may be changed by regular users using chfn - use +# any combination of letters "frwh" (full name, room number, work +# phone, home phone). If not defined, no changes are allowed. +# For backward compatibility, "yes" = "rwh" and "no" = "frwh". +# +{% if chfn_restrict %} +CHFN_RESTRICT {{ chfn_restrict }} +{% endif %} + +# +# Should login be allowed if we can't cd to the home directory? +# Default in no. +# +DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} + +# +# If defined, this command is run when removing a user. +# It should remove any at/cron/print jobs etc. owned by +# the user to be removed (passed as the first argument). +# +#USERDEL_CMD /usr/sbin/userdel_local + +# +# Enable setting of the umask group bits to be the same as owner bits +# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is +# the same as gid, and username is the same as the primary group name. +# +# If set to yes, userdel will remove the user´s group if it contains no +# more members, and useradd will create by default a group with the name +# of the user. +# +USERGROUPS_ENAB yes + +# +# Instead of the real user shell, the program specified by this parameter +# will be launched, although its visible name (argv[0]) will be the shell's. +# The program may do whatever it wants (logging, additional authentification, +# banner, ...) before running the actual shell. +# +# FAKE_SHELL /bin/fakeshell + +# +# If defined, either full pathname of a file containing device names or +# a ":" delimited list of device names. Root logins will be allowed only +# upon these devices. +# +# This variable is used by login and su. +# +#CONSOLE /etc/consoles +#CONSOLE console:tty01:tty02:tty03:tty04 + +# +# List of groups to add to the user's supplementary group set +# when logging in on the console (as determined by the CONSOLE +# setting). Default is none. +# +# Use with caution - it is possible for users to gain permanent +# access to these groups, even when not logged in on the console. +# How to do it is left as an exercise for the reader... +# +# This variable is used by login and su. +# +#CONSOLE_GROUPS floppy:audio:cdrom + +# +# If set to "yes", new passwords will be encrypted using the MD5-based +# algorithm compatible with the one used by recent releases of FreeBSD. +# It supports passwords of unlimited length and longer salt strings. +# Set to "no" if you need to copy encrypted passwords to other systems +# which don't understand the new algorithm. Default is "no". +# +# This variable is deprecated. You should use ENCRYPT_METHOD. +# +MD5_CRYPT_ENAB no + +# +# If set to MD5 , MD5-based algorithm will be used for encrypting password +# If set to SHA256, SHA256-based algorithm will be used for encrypting password +# If set to SHA512, SHA512-based algorithm will be used for encrypting password +# If set to DES, DES-based algorithm will be used for encrypting password (default) +# Overrides the MD5_CRYPT_ENAB option +# +# Note: It is recommended to use a value consistent with +# the PAM modules configuration. +# +ENCRYPT_METHOD SHA512 + +# +# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. +# +# Define the number of SHA rounds. +# With a lot of rounds, it is more difficult to brute forcing the password. +# But note also that it more CPU resources will be needed to authenticate +# users. +# +# If not specified, the libc will choose the default number of rounds (5000). +# The values must be inside the 1000-999999999 range. +# If only one of the MIN or MAX values is set, then this value will be used. +# If MIN > MAX, the highest value will be used. +# +# SHA_CRYPT_MIN_ROUNDS 5000 +# SHA_CRYPT_MAX_ROUNDS 5000 + +################# OBSOLETED BY PAM ############## +# # +# These options are now handled by PAM. Please # +# edit the appropriate file in /etc/pam.d/ to # +# enable the equivelants of them. +# +############### + +#MOTD_FILE +#DIALUPS_CHECK_ENAB +#LASTLOG_ENAB +#MAIL_CHECK_ENAB +#OBSCURE_CHECKS_ENAB +#PORTTIME_CHECKS_ENAB +#SU_WHEEL_ONLY +#CRACKLIB_DICTPATH +#PASS_CHANGE_TRIES +#PASS_ALWAYS_WARN +#ENVIRON_FILE +#NOLOGINS_FILE +#ISSUE_FILE +#PASS_MIN_LEN +#PASS_MAX_LEN +#ULIMIT +#ENV_HZ +#CHFN_AUTH +#CHSH_AUTH +#FAIL_DELAY + +################# OBSOLETED ####################### +# # +# These options are no more handled by shadow. # +# # +# Shadow utilities will display a warning if they # +# still appear. # +# # +################################################### + +# CLOSE_SESSIONS +# LOGIN_STRING +# NO_PASSWORD_CONSOLE +# QMAIL_DIR + + + diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/modules b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/modules new file mode 100644 index 0000000000000000000000000000000000000000..ef0354ee35fa363b303bb22c6ed0d2d1196aed52 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/modules @@ -0,0 +1,117 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +# Arch +# ---- +# +# Modules for certains builds, contains support modules and some CPU-specific optimizations. + +{% if arch == "x86_64" -%} +# Optimize for x86_64 cryptographic features +twofish-x86_64-3way +twofish-x86_64 +aes-x86_64 +salsa20-x86_64 +blowfish-x86_64 +{% endif -%} + +{% if cpuVendor == "intel" -%} +# Intel-specific optimizations +ghash-clmulni-intel +aesni-intel +kvm-intel +{% endif -%} + +{% if cpuVendor == "amd" -%} +# AMD-specific optimizations +kvm-amd +{% endif -%} + +kvm + + +# Crypto +# ------ + +# Some core modules which comprise strong cryptography. +blowfish_common +blowfish_generic +ctr +cts +lrw +lzo +rmd160 +rmd256 +rmd320 +serpent +sha512_generic +twofish_common +twofish_generic +xts +zlib + + +# Drivers +# ------- + +# Basics +lp +rtc +loop + +# Filesystems +ext2 +btrfs + +{% if desktop_enable -%} +# Desktop +psmouse +snd +snd_ac97_codec +snd_intel8x0 +snd_page_alloc +snd_pcm +snd_timer +soundcore +usbhid +{% endif -%} + +# Lib +# --- +xz + + +# Net +# --- + +# All packets needed for netfilter rules (ie iptables, ebtables). +ip_tables +x_tables +iptable_filter +iptable_nat + +# Targets +ipt_LOG +ipt_REJECT + +# Modules +xt_connlimit +xt_tcpudp +xt_recent +xt_limit +xt_conntrack +nf_conntrack +nf_conntrack_ipv4 +nf_defrag_ipv4 +xt_state +nf_nat + +# Addons +xt_pknock \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/passwdqc.conf new file mode 100644 index 0000000000000000000000000000000000000000..f98d14e57428c106692e0f57e8b381f2b0a12c44 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/passwdqc.conf @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: passwdqc password strength enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Password-Type: Primary +Password: + requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh new file mode 100644 index 0000000000000000000000000000000000000000..fd2de791b96fbb8889811daf7340d1f2ca2ab3a6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Disable core dumps via soft limits for all users. Compliance to this setting +# is voluntary and can be modified by users up to a hard limit. This setting is +# a sane default. +ulimit -S -c 0 > /dev/null 2>&1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/securetty b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/securetty new file mode 100644 index 0000000000000000000000000000000000000000..15b18d4e2f45747845d0b65c06997f154ef674a4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/securetty @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# A list of TTYs, from which root can log in +# see `man securetty` for reference +{% if ttys -%} +{% for tty in ttys -%} +{{ tty }} +{% endfor -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/tally2 b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/tally2 new file mode 100644 index 0000000000000000000000000000000000000000..d9620299c55e51abbee1017a227c217cd4a9fd33 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/host/templates/tally2 @@ -0,0 +1,14 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: tally2 lockout after failed attempts enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Auth-Type: Primary +Auth-Initial: + required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} +Account-Type: Primary +Account-Initial: + required pam_tally2.so diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1990d8513bbeef067a8d9a2168e1952efb2961dc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.mysql.checks import config + + +def run_mysql_checks(): + log("Starting MySQL hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("MySQL hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/checks/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a79f33b74a5c2972a82f0b4d8de8d1073dc293ed --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -0,0 +1,87 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + WARNING, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get MySQL hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: + log("MySQL does not appear to be installed on this node - " + "skipping mysql hardening", level=WARNING) + return [] + + settings = utils.get_settings('mysql') + hardening_settings = settings['hardening'] + my_cnf = hardening_settings['mysql-conf'] + + audits = [ + FilePermissionAudit(paths=[my_cnf], user='root', + group='root', mode=0o0600), + + TemplatedFile(hardening_settings['hardening-conf'], + MySQLConfContext(), + TEMPLATES_DIR, + mode=0o0750, + user='mysql', + group='root', + service_actions=[{'service': 'mysql', + 'actions': ['restart']}]), + + # MySQL and Percona charms do not allow configuration of the + # data directory, so use the default. + DirectoryPermissionAudit('/var/lib/mysql', + user='mysql', + group='mysql', + recursive=False, + mode=0o755), + + DirectoryPermissionAudit('/etc/mysql', + user='root', + group='root', + recursive=False, + mode=0o700), + ] + + return audits + + +class MySQLConfContext(object): + """Defines the set of key/value pairs to set in a mysql config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/mysql/conf.d/hardening.cnf file. + """ + def __call__(self): + settings = utils.get_settings('mysql') + # Translate for python3 + return {'mysql_settings': + [(k, v) for k, v in six.iteritems(settings['security'])]} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf new file mode 100644 index 0000000000000000000000000000000000000000..8242586cd66360b7e6ae33f13018363b95cd4ea9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf @@ -0,0 +1,12 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +[mysqld] +{% for setting, value in mysql_settings -%} +{% if value == 'True' -%} +{{ setting }} +{% elif value != 'None' and value != None -%} +{{ setting }} = {{ value }} +{% endif -%} +{% endfor -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edaf484b39f8c7353cebb2f4b68944c6493ba7b3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.ssh.checks import config + + +def run_ssh_checks(): + log("Starting SSH hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("SSH hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/checks/config.py new file mode 100644 index 0000000000000000000000000000000000000000..41bed2d1e7b031182edcf62710876e4073dfbc6e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -0,0 +1,435 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_iface_addr, + is_ip, +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get SSH hardening config audits. + + :returns: dictionary of audits + """ + audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), + SSHDConfigFileContentAudit()] + return audits + + +class SSHConfigContext(object): + + type = 'client' + + def get_macs(self, allow_weak_mac): + if allow_weak_mac: + weak_macs = 'weak' + else: + weak_macs = 'default' + + default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' + macs = {'default': default, + 'weak': default + ',hmac-sha1'} + + default = ('hmac-sha2-512-etm@openssh.com,' + 'hmac-sha2-256-etm@openssh.com,' + 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' + 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') + macs_66 = {'default': default, + 'weak': default + ',hmac-sha1'} + + # Use newer ciphers on Ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) + macs = macs_66 + + return macs[weak_macs] + + def get_kexs(self, allow_weak_kex): + if allow_weak_kex: + weak_kex = 'weak' + else: + weak_kex = 'default' + + default = 'diffie-hellman-group-exchange-sha256' + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex = {'default': default, + 'weak': weak} + + default = ('curve25519-sha256@libssh.org,' + 'diffie-hellman-group-exchange-sha256') + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex_66 = {'default': default, + 'weak': weak} + + # Use newer kex on Ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new key exchange ' + 'algorithms', level=DEBUG) + kex = kex_66 + + return kex[weak_kex] + + def get_ciphers(self, cbc_required): + if cbc_required: + weak_ciphers = 'weak' + else: + weak_ciphers = 'default' + + default = 'aes256-ctr,aes192-ctr,aes128-ctr' + cipher = {'default': default, + 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} + + default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' + 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') + ciphers_66 = {'default': default, + 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} + + # Use newer ciphers on ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new ciphers', + level=DEBUG) + cipher = ciphers_66 + + return cipher[weak_ciphers] + + def get_listening(self, listen=['0.0.0.0']): + """Returns a list of addresses SSH can list on + + Turns input into a sensible list of IPs SSH can listen on. Input + must be a python list of interface names, IPs and/or CIDRs. + + :param listen: list of IPs, CIDRs, interface names + + :returns: list of IPs available on the host + """ + if listen == ['0.0.0.0']: + return listen + + value = [] + for network in listen: + try: + ip = get_address_in_network(network=network, fatal=True) + except ValueError: + if is_ip(network): + ip = network + else: + try: + ip = get_iface_addr(iface=network, fatal=False)[0] + except IndexError: + continue + value.append(ip) + if value == []: + return ['0.0.0.0'] + return value + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'addr_family': addr_family, + 'remote_hosts': settings['common']['remote_hosts'], + 'password_auth_allowed': + settings['client']['password_authentication'], + 'ports': settings['common']['ports'], + 'ciphers': self.get_ciphers(settings['client']['cbc_required']), + 'macs': self.get_macs(settings['client']['weak_hmac']), + 'kexs': self.get_kexs(settings['client']['weak_kex']), + 'roaming': settings['client']['roaming'], + } + return ctxt + + +class SSHConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/ssh_config' + super(SSHConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHConfigContext(), + user='root', + group='root', + mode=0o0644) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['client']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHDConfigContext(SSHConfigContext): + + type = 'server' + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'ssh_ip': self.get_listening(settings['server']['listen_to']), + 'password_auth_allowed': + settings['server']['password_authentication'], + 'ports': settings['common']['ports'], + 'addr_family': addr_family, + 'ciphers': self.get_ciphers(settings['server']['cbc_required']), + 'macs': self.get_macs(settings['server']['weak_hmac']), + 'kexs': self.get_kexs(settings['server']['weak_kex']), + 'host_key_files': settings['server']['host_key_files'], + 'allow_root_with_key': settings['server']['allow_root_with_key'], + 'password_authentication': + settings['server']['password_authentication'], + 'use_priv_sep': settings['server']['use_privilege_separation'], + 'use_pam': settings['server']['use_pam'], + 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], + 'print_motd': settings['server']['print_motd'], + 'print_last_log': settings['server']['print_last_log'], + 'client_alive_interval': + settings['server']['alive_interval'], + 'client_alive_count': settings['server']['alive_count'], + 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], + 'allow_agent_forwarding': + settings['server']['allow_agent_forwarding'], + 'deny_users': settings['server']['deny_users'], + 'allow_users': settings['server']['allow_users'], + 'deny_groups': settings['server']['deny_groups'], + 'allow_groups': settings['server']['allow_groups'], + 'use_dns': settings['server']['use_dns'], + 'sftp_enable': settings['server']['sftp_enable'], + 'sftp_group': settings['server']['sftp_group'], + 'sftp_chroot': settings['server']['sftp_chroot'], + 'max_auth_tries': settings['server']['max_auth_tries'], + 'max_sessions': settings['server']['max_sessions'], + } + return ctxt + + +class SSHDConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/sshd_config' + super(SSHDConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHDConfigContext(), + user='root', + group='root', + mode=0o0600, + service_actions=[{'service': 'ssh', + 'actions': + ['restart']}]) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['server']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/ssh_config' + super(SSHConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['client']['weak_hmac']: + self.fail_cases.append(r'^MACs.+,hmac-sha1$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['client']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['client']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['client']['roaming']: + self.pass_cases.append(r'^UseRoaming yes$') + else: + self.fail_cases.append(r'^UseRoaming yes$') + + return super(SSHConfigFileContentAudit, self).is_compliant(*args, + **kwargs) + + +class SSHDConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/sshd_config' + super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['server']['sftp_enable']: + self.pass_cases.append(r'^Subsystem\ssftp') + else: + self.fail_cases.append(r'^Subsystem\ssftp') + + return super(SSHDConfigFileContentAudit, self).is_compliant(*args, + **kwargs) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/templates/ssh_config new file mode 100644 index 0000000000000000000000000000000000000000..9742d8e2a32cd5da01a9dcb691a5a1201ed93050 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/templates/ssh_config @@ -0,0 +1,70 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +# Restrict the following configuration to be limited to this Host. +{% if remote_hosts -%} +Host {{ ' '.join(remote_hosts) }} +{% endif %} +ForwardAgent no +ForwardX11 no +ForwardX11Trusted yes +RhostsRSAAuthentication no +RSAAuthentication yes +PasswordAuthentication {{ password_auth_allowed }} +HostbasedAuthentication no +GSSAPIAuthentication no +GSSAPIDelegateCredentials no +GSSAPIKeyExchange no +GSSAPITrustDNS no +BatchMode no +CheckHostIP yes +AddressFamily {{ addr_family }} +ConnectTimeout 0 +StrictHostKeyChecking ask +IdentityFile ~/.ssh/identity +IdentityFile ~/.ssh/id_rsa +IdentityFile ~/.ssh/id_dsa +# The port at the destination should be defined +{% for port in ports -%} +Port {{ port }} +{% endfor %} +Protocol 2 +Cipher 3des +{% if ciphers -%} +Ciphers {{ ciphers }} +{%- endif %} +{% if macs -%} +MACs {{ macs }} +{%- endif %} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{%- endif %} +EscapeChar ~ +Tunnel no +TunnelDevice any:any +PermitLocalCommand no +VisualHostKey no +RekeyLimit 1G 1h +SendEnv LANG LC_* +HashKnownHosts yes +{% if roaming -%} +UseRoaming {{ roaming }} +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/templates/sshd_config new file mode 100644 index 0000000000000000000000000000000000000000..5f87298a8119bcab1d2578bcaefd068e5af167c4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/ssh/templates/sshd_config @@ -0,0 +1,159 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +{% for port in ports -%} +Port {{ port }} +{% endfor -%} +AddressFamily {{ addr_family }} +# Use these options to restrict which interfaces/protocols sshd will bind to +{% if ssh_ip -%} +{% for ip in ssh_ip -%} +ListenAddress {{ ip }} +{% endfor %} +{%- else -%} +ListenAddress :: +ListenAddress 0.0.0.0 +{% endif -%} +Protocol 2 +{% if ciphers -%} +Ciphers {{ ciphers }} +{% endif -%} +{% if macs -%} +MACs {{ macs }} +{% endif -%} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{% endif -%} +# HostKeys for protocol version 2 +{% for keyfile in host_key_files -%} +HostKey {{ keyfile }} +{% endfor -%} + +# Privilege Separation is turned on for security +{% if use_priv_sep -%} +UsePrivilegeSeparation {{ use_priv_sep }} +{% endif -%} + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel VERBOSE + +# Authentication: +LoginGraceTime 30s +{% if allow_root_with_key -%} +PermitRootLogin without-password +{% else -%} +PermitRootLogin no +{% endif %} +PermitTunnel no +PermitUserEnvironment no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication {{ password_authentication }} + +# Kerberos options +KerberosAuthentication no +KerberosGetAFSToken no +KerberosOrLocalPasswd no +KerberosTicketCleanup yes + +# GSSAPI options +GSSAPIAuthentication no +GSSAPICleanupCredentials yes + +X11Forwarding {{ allow_x11_forwarding }} +X11DisplayOffset 10 +X11UseLocalhost yes +GatewayPorts no +PrintMotd {{ print_motd }} +PrintLastLog {{ print_last_log }} +TCPKeepAlive no +UseLogin no + +ClientAliveInterval {{ client_alive_interval }} +ClientAliveCountMax {{ client_alive_count }} +AllowTcpForwarding {{ allow_tcp_forwarding }} +AllowAgentForwarding {{ allow_agent_forwarding }} + +MaxStartups 10:30:100 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM {{ use_pam }} + +{% if deny_users -%} +DenyUsers {{ deny_users }} +{% endif -%} +{% if allow_users -%} +AllowUsers {{ allow_users }} +{% endif -%} +{% if deny_groups -%} +DenyGroups {{ deny_groups }} +{% endif -%} +{% if allow_groups -%} +AllowGroups allow_groups +{% endif -%} +UseDNS {{ use_dns }} +MaxAuthTries {{ max_auth_tries }} +MaxSessions {{ max_sessions }} + +{% if sftp_enable -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +Subsystem sftp internal-sftp -l VERBOSE + +## These lines must appear at the *end* of sshd_config +Match Group {{ sftp_group }} +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory {{ sftp_chroot }} +{% else -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +## These lines must appear at the *end* of sshd_config +Match Group sftponly +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory /sftpchroot/home/%u +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/templating.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/templating.py new file mode 100644 index 0000000000000000000000000000000000000000..5b6765f7edeee4bed739fd354c6f7bdf0a8c952e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/templating.py @@ -0,0 +1,73 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) + +try: + from jinja2 import FileSystemLoader, Environment +except ImportError: + from charmhelpers.fetch import apt_install + from charmhelpers.fetch import apt_update + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment + + +# NOTE: function separated from main rendering code to facilitate easier +# mocking in unit tests. +def write(path, data): + with open(path, 'wb') as out: + out.write(data) + + +def get_template_path(template_dir, path): + """Returns the template file which would be used to render the path. + + The path to the template file is returned. + :param template_dir: the directory the templates are located in + :param path: the file path to be written to. + :returns: path to the template file + """ + return os.path.join(template_dir, os.path.basename(path)) + + +def render_and_write(template_dir, path, context): + """Renders the specified template into the file. + + :param template_dir: the directory to load the template from + :param path: the path to write the templated contents to + :param context: the parameters to pass to the rendering engine + """ + env = Environment(loader=FileSystemLoader(template_dir)) + template_file = os.path.basename(path) + template = env.get_template(template_file) + log('Rendering from template: %s' % template.name, level=DEBUG) + rendered_content = template.render(context) + if not rendered_content: + log("Render returned None - skipping '%s'" % path, + level=WARNING) + return + + write(path, rendered_content.encode('utf-8').strip()) + log('Wrote template %s' % path, level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7485c28c8748ba366dba54f1e3b8f7e6a7c619 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/hardening/utils.py @@ -0,0 +1,155 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import grp +import os +import pwd +import six +import yaml + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) + + +# Global settings cache. Since each hook fire entails a fresh module import it +# is safe to hold this in memory and not risk missing config changes (since +# they will result in a new hook fire and thus re-import). +__SETTINGS__ = {} + + +def _get_defaults(modules): + """Load the default config for the provided modules. + + :param modules: stack modules config defaults to lookup. + :returns: modules default config dictionary. + """ + default = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml' % (modules)) + return yaml.safe_load(open(default)) + + +def _get_schema(modules): + """Load the config schema for the provided modules. + + NOTE: this schema is intended to have 1-1 relationship with they keys in + the default config and is used a means to verify valid overrides provided + by the user. + + :param modules: stack modules config schema to lookup. + :returns: modules default schema dictionary. + """ + schema = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml.schema' % (modules)) + return yaml.safe_load(open(schema)) + + +def _get_user_provided_overrides(modules): + """Load user-provided config overrides. + + :param modules: stack modules to lookup in user overrides yaml file. + :returns: overrides dictionary. + """ + overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], + 'hardening.yaml') + if os.path.exists(overrides): + log("Found user-provided config overrides file '%s'" % + (overrides), level=DEBUG) + settings = yaml.safe_load(open(overrides)) + if settings and settings.get(modules): + log("Applying '%s' overrides" % (modules), level=DEBUG) + return settings.get(modules) + + log("No overrides found for '%s'" % (modules), level=DEBUG) + else: + log("No hardening config overrides file '%s' found in charm " + "root dir" % (overrides), level=DEBUG) + + return {} + + +def _apply_overrides(settings, overrides, schema): + """Get overrides config overlayed onto modules defaults. + + :param modules: require stack modules config. + :returns: dictionary of modules config with user overrides applied. + """ + if overrides: + for k, v in six.iteritems(overrides): + if k in schema: + if schema[k] is None: + settings[k] = v + elif type(schema[k]) is dict: + settings[k] = _apply_overrides(settings[k], overrides[k], + schema[k]) + else: + raise Exception("Unexpected type found in schema '%s'" % + type(schema[k]), level=ERROR) + else: + log("Unknown override key '%s' - ignoring" % (k), level=INFO) + + return settings + + +def get_settings(modules): + global __SETTINGS__ + if modules in __SETTINGS__: + return __SETTINGS__[modules] + + schema = _get_schema(modules) + settings = _get_defaults(modules) + overrides = _get_user_provided_overrides(modules) + __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) + return __SETTINGS__[modules] + + +def ensure_permissions(path, user, group, permissions, maxdepth=-1): + """Ensure permissions for path. + + If path is a file, apply to file and return. If path is a directory, + apply recursively (if required) to directory contents and return. + + :param user: user name + :param group: group name + :param permissions: octal permissions + :param maxdepth: maximum recursion depth. A negative maxdepth allows + infinite recursion and maxdepth=0 means no recursion. + :returns: None + """ + if not os.path.exists(path): + log("File '%s' does not exist - cannot set permissions" % (path), + level=WARNING) + return + + _user = pwd.getpwnam(user) + os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) + os.chmod(path, permissions) + + if maxdepth == 0: + log("Max recursion depth reached - skipping further recursion", + level=DEBUG) + return + elif maxdepth > 0: + maxdepth -= 1 + + if os.path.isdir(path): + contents = glob.glob("%s/*" % (path)) + for c in contents: + ensure_permissions(c, user=user, group=group, + permissions=permissions, maxdepth=maxdepth) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/mellanox/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/mellanox/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b088de84e4b288b551603816fc10eebfa7b1503 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/mellanox/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/mellanox/infiniband.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/mellanox/infiniband.py new file mode 100644 index 0000000000000000000000000000000000000000..0edb2314114738c79859f51e505de05e5c7c8fcc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/mellanox/infiniband.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = "Jorge Niedbalski " + +import six + +from charmhelpers.fetch import ( + apt_install, + apt_update, +) + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + +try: + from netifaces import interfaces as network_interfaces +except ImportError: + if six.PY2: + apt_install('python-netifaces') + else: + apt_install('python3-netifaces') + from netifaces import interfaces as network_interfaces + +import os +import re +import subprocess + +from charmhelpers.core.kernel import modprobe + +REQUIRED_MODULES = ( + "mlx4_ib", + "mlx4_en", + "mlx4_core", + "ib_ipath", + "ib_mthca", + "ib_srpt", + "ib_srp", + "ib_ucm", + "ib_isert", + "ib_iser", + "ib_ipoib", + "ib_cm", + "ib_uverbs" + "ib_umad", + "ib_sa", + "ib_mad", + "ib_core", + "ib_addr", + "rdma_ucm", +) + +REQUIRED_PACKAGES = ( + "ibutils", + "infiniband-diags", + "ibverbs-utils", +) + +IPOIB_DRIVERS = ( + "ib_ipoib", +) + +ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version" + + +class DeviceInfo(object): + pass + + +def install_packages(): + apt_update() + apt_install(REQUIRED_PACKAGES, fatal=True) + + +def load_modules(): + for module in REQUIRED_MODULES: + modprobe(module, persist=True) + + +def is_enabled(): + """Check if infiniband is loaded on the system""" + return os.path.exists(ABI_VERSION_FILE) + + +def stat(): + """Return full output of ibstat""" + return subprocess.check_output(["ibstat"]) + + +def devices(): + """Returns a list of IB enabled devices""" + return subprocess.check_output(['ibstat', '-l']).splitlines() + + +def device_info(device): + """Returns a DeviceInfo object with the current device settings""" + + status = subprocess.check_output([ + 'ibstat', device, '-s']).splitlines() + + regexes = { + "CA type: (.*)": "device_type", + "Number of ports: (.*)": "num_ports", + "Firmware version: (.*)": "fw_ver", + "Hardware version: (.*)": "hw_ver", + "Node GUID: (.*)": "node_guid", + "System image GUID: (.*)": "sys_guid", + } + + device = DeviceInfo() + + for line in status: + for expression, key in regexes.items(): + matches = re.search(expression, line) + if matches: + setattr(device, key, matches.group(1)) + + return device + + +def ipoib_interfaces(): + """Return a list of IPOIB capable ethernet interfaces""" + interfaces = [] + + for interface in network_interfaces(): + try: + driver = re.search('^driver: (.+)$', subprocess.check_output([ + 'ethtool', '-i', + interface]), re.M).group(1) + + if driver in IPOIB_DRIVERS: + interfaces.append(interface) + except Exception: + log("Skipping interface %s" % interface, level=INFO) + continue + + return interfaces diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ip.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ip.py new file mode 100644 index 0000000000000000000000000000000000000000..b13277bb57c9227b1d9dfecf4f6750740e5a262a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ip.py @@ -0,0 +1,602 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import re +import subprocess +import six +import socket + +from functools import partial + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + config, + log, + network_get_primary_address, + unit_get, + WARNING, + NoNetworkBinding, +) + +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) + +try: + import netifaces +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) + import netifaces + +try: + import netaddr +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network(s): %s" % network) + raise ValueError(errmsg) + + +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + +def get_address_in_network(network, fallback=None, fatal=False): + """Get an IPv4 or IPv6 address within the network from the host. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + if network is None: + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + else: + return None + + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + try: + addresses = netifaces.ifaddresses(iface) + except ValueError: + # If an instance was deleted between + # netifaces.interfaces() run and now, its interfaces are gone + continue + if network.version == 4 and netifaces.AF_INET in addresses: + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + + return None + + +def is_ipv6(address): + """Determine whether provided address is IPv6 or not.""" + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] + return None + + +get_iface_for_address = partial(_get_for_address, key='iface') + + +get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + +def format_ipv6_addr(address): + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. + """ + if is_ipv6(address): + return "[%s]" % address + + return None + + +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError: + return True + + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """Return the assigned IP address for a given interface, if any. + + :param iface: network interface on which address(es) are expected to + be found. + :param inet_type: inet address family + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :return: list of ip addresses + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + + if not exc_list: + exc_list = [] + + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception("Unknown inet type '%s'" % str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("Interface '%s' not found " % (iface)) + else: + return [] + + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + + return sorted(addresses) + + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. + + :param iface: network interface on which ipv6 address(es) are expected to + be found. + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :param dynamic_only: only recognise dynamic addresses + :return: list of ipv6 addresses + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd).decode('UTF-8') + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' does not have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] + + +def is_bridge_member(nic): + """Check if a given nic is a member of a bridge.""" + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + + return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4/IPv6 address + address = netaddr.IPAddress(address) + return True + except (netaddr.AddrFormatError, ValueError): + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except Exception: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + + if not result: + try: + result = socket.gethostbyaddr(address)[0] + except Exception: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including passed cidr network and + IPv6. + + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') + + @param interface: string name of the relation. + @param cidr_network: string CIDR Network to select an address from. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) + except NoNetworkBinding: + log("No network binding for {}".format(interface), WARNING) + address = get_host_ip(unit_get('private-address')) + + if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fd001bc2eaa9b1b511bbd1816e1089521935a50a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/__init__.py @@ -0,0 +1,541 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' Helpers for interacting with OpenvSwitch ''' +import hashlib +import subprocess +import os +import six + +from charmhelpers.fetch import apt_install + + +from charmhelpers.core.hookenv import ( + log, WARNING, INFO, DEBUG +) +from charmhelpers.core.host import ( + service +) + + +BRIDGE_TEMPLATE = """\ +# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067 + +auto {linuxbridge_port} +iface {linuxbridge_port} inet manual + pre-up ip link add name {linuxbridge_port} type veth peer name {ovsbridge_port} + pre-up ip link set {ovsbridge_port} master {bridge} + pre-up ip link set {ovsbridge_port} up + up ip link set {linuxbridge_port} up + down ip link del {linuxbridge_port} +""" + +MAX_KERNEL_INTERFACE_NAME_LEN = 15 + + +def get_bridges(): + """Return list of the bridges on the default openvswitch + + :returns: List of bridge names + :rtype: List[str] + :raises: subprocess.CalledProcessError if ovs-vsctl fails + """ + cmd = ["ovs-vsctl", "list-br"] + lines = subprocess.check_output(cmd).decode('utf-8').split("\n") + maybe_bridges = [l.strip() for l in lines] + return [b for b in maybe_bridges if b] + + +def get_bridge_ports(name): + """Return a list the ports on a named bridge + + :param name: the name of the bridge to list + :type name: str + :returns: List of ports on the named bridge + :rtype: List[str] + :raises: subprocess.CalledProcessError if the ovs-vsctl command fails. If + the named bridge doesn't exist, then the exception will be raised. + """ + cmd = ["ovs-vsctl", "--", "list-ports", name] + lines = subprocess.check_output(cmd).decode('utf-8').split("\n") + maybe_ports = [l.strip() for l in lines] + return [p for p in maybe_ports if p] + + +def get_bridges_and_ports_map(): + """Return dictionary of bridge to ports for the default openvswitch + + :returns: a mapping of bridge name to a list of ports. + :rtype: Dict[str, List[str]] + :raises: subprocess.CalledProcessError if any of the underlying ovs-vsctl + command fail. + """ + return {b: get_bridge_ports(b) for b in get_bridges()} + + +def _dict_to_vsctl_set(data, table, entity): + """Helper that takes dictionary and provides ``ovs-vsctl set`` commands + + :param data: Additional data to attach to interface + The keys in the data dictionary map directly to column names in the + OpenvSwitch table specified as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type data: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param table: Name of table to operate on + :type table: str + :param entity: Name of entity to operate on + :type entity: str + :returns: '--' separated ``ovs-vsctl set`` commands + :rtype: Iterator[Tuple[str, str, str, str, str]] + """ + for (k, v) in data.items(): + if isinstance(v, dict): + entries = { + '{}:{}'.format(k, dk): dv for (dk, dv) in v.items()} + else: + entries = {k: v} + for (colk, colv) in entries.items(): + yield ('--', 'set', table, entity, '{}={}'.format(colk, colv)) + + +def add_bridge(name, datapath_type=None, brdata=None, exclusive=False): + """Add the named bridge to openvswitch and set/update bridge data for it + + :param name: Name of bridge to create + :type name: str + :param datapath_type: Add datapath_type to bridge (DEPRECATED, use brdata) + :type datapath_type: Optional[str] + :param brdata: Additional data to attach to bridge + The keys in the brdata dictionary map directly to column names in the + OpenvSwitch bridge table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type brdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param exclusive: If True, raise exception if bridge exists + :type exclusive: bool + :raises: subprocess.CalledProcessError + """ + log('Creating bridge {}'.format(name)) + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-br', name)) + if brdata: + for setcmd in _dict_to_vsctl_set(brdata, 'bridge', name): + cmd.extend(setcmd) + if datapath_type is not None: + log('DEPRECATION WARNING: add_bridge called with datapath_type, ' + 'please use the brdata keyword argument instead.') + cmd += ['--', 'set', 'bridge', name, + 'datapath_type={}'.format(datapath_type)] + subprocess.check_call(cmd) + + +def del_bridge(name): + """Delete the named bridge from openvswitch + + :param name: Name of bridge to remove + :type name: str + :raises: subprocess.CalledProcessError + """ + log('Deleting bridge {}'.format(name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name]) + + +def add_bridge_port(name, port, promisc=False, ifdata=None, exclusive=False, + linkup=True, portdata=None): + """Add port to bridge and optionally set/update interface data for it + + :param name: Name of bridge to attach port to + :type name: str + :param port: Name of port as represented in netdev + :type port: str + :param promisc: Whether to set promiscuous mode on interface + True=on, False=off, None leave untouched + :type promisc: Optional[bool] + :param ifdata: Additional data to attach to interface + The keys in the ifdata dictionary map directly to column names in the + OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param exclusive: If True, raise exception if port exists + :type exclusive: bool + :param linkup: Bring link up + :type linkup: bool + :param portdata: Additional data to attach to port. Similar to ifdata. + :type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-port', name, port)) + for ovs_table, data in (('Interface', ifdata), ('Port', portdata)): + if data: + for setcmd in _dict_to_vsctl_set(data, ovs_table, port): + cmd.extend(setcmd) + + log('Adding port {} to bridge {}'.format(port, name)) + subprocess.check_call(cmd) + if linkup: + # This is mostly a workaround for CI environments, in the real world + # the bare metal provider would most likely have configured and brought + # up the link for us. + subprocess.check_call(["ip", "link", "set", port, "up"]) + if promisc: + subprocess.check_call(["ip", "link", "set", port, "promisc", "on"]) + elif promisc is False: + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def del_bridge_port(name, port): + """Delete a port from the named openvswitch bridge + + :param name: Name of bridge to remove port from + :type name: str + :param port: Name of port to remove + :type port: str + :raises: subprocess.CalledProcessError + """ + log('Deleting port {} from bridge {}'.format(port, name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port", + name, port]) + subprocess.check_call(["ip", "link", "set", port, "down"]) + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def add_bridge_bond(bridge, port, interfaces, portdata=None, ifdatamap=None, + exclusive=False): + """Add bonded port in bridge from interfaces. + + :param bridge: Name of bridge to add bonded port to + :type bridge: str + :param port: Name of created port + :type port: str + :param interfaces: Underlying interfaces that make up the bonded port + :type interfaces: Iterator[str] + :param portdata: Additional data to attach to the created bond port + See _dict_to_vsctl_set() for detailed description. + Example: + { + 'bond-mode': 'balance-tcp', + 'lacp': 'active', + 'other-config': { + 'lacp-time': 'fast', + }, + } + :type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param ifdatamap: Map of data to attach to created bond interfaces + See _dict_to_vsctl_set() for detailed description. + Example: + { + 'eth0': { + 'type': 'dpdk', + 'mtu-request': '9000', + 'options': { + 'dpdk-devargs': '0000:01:00.0', + }, + }, + } + :type ifdatamap: Optional[Dict[str,Dict[str,Union[str,Dict[str,str]]]]] + :param exclusive: If True, raise exception if port exists + :type exclusive: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-bond', bridge, port)) + cmd.extend(interfaces) + if portdata: + for setcmd in _dict_to_vsctl_set(portdata, 'port', port): + cmd.extend(setcmd) + if ifdatamap: + for ifname, ifdata in ifdatamap.items(): + for setcmd in _dict_to_vsctl_set(ifdata, 'Interface', ifname): + cmd.extend(setcmd) + subprocess.check_call(cmd) + + +def add_ovsbridge_linuxbridge(name, bridge, ifdata=None): + """Add linux bridge to the named openvswitch bridge + + :param name: Name of ovs bridge to be added to Linux bridge + :type name: str + :param bridge: Name of Linux bridge to be added to ovs bridge + :type name: str + :param ifdata: Additional data to attach to interface + The keys in the ifdata dictionary map directly to column names in the + OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + """ + try: + import netifaces + except ImportError: + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) + import netifaces + + # NOTE(jamespage): + # Older code supported addition of a linuxbridge directly + # to an OVS bridge; ensure we don't break uses on upgrade + existing_ovs_bridge = port_to_br(bridge) + if existing_ovs_bridge is not None: + log('Linuxbridge {} is already directly in use' + ' by OVS bridge {}'.format(bridge, existing_ovs_bridge), + level=INFO) + return + + # NOTE(jamespage): + # preserve existing naming because interfaces may already exist. + ovsbridge_port = "veth-" + name + linuxbridge_port = "veth-" + bridge + if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or + len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN): + # NOTE(jamespage): + # use parts of hashed bridgename (openstack style) when + # a bridge name exceeds 15 chars + hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest() + base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:]) + ovsbridge_port = "cvo{}".format(base) + linuxbridge_port = "cvb{}".format(base) + + interfaces = netifaces.interfaces() + for interface in interfaces: + if interface == ovsbridge_port or interface == linuxbridge_port: + log('Interface {} already exists'.format(interface), level=INFO) + return + + log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name), + level=INFO) + + check_for_eni_source() + + with open('/etc/network/interfaces.d/{}.cfg'.format( + linuxbridge_port), 'w') as config: + config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port, + ovsbridge_port=ovsbridge_port, + bridge=bridge)) + + subprocess.check_call(["ifup", linuxbridge_port]) + add_bridge_port(name, linuxbridge_port, ifdata=ifdata) + + +def is_linuxbridge_interface(port): + ''' Check if the interface is a linuxbridge bridge + :param port: Name of an interface to check whether it is a Linux bridge + :returns: True if port is a Linux bridge''' + + if os.path.exists('/sys/class/net/' + port + '/bridge'): + log('Interface {} is a Linux bridge'.format(port), level=DEBUG) + return True + else: + log('Interface {} is not a Linux bridge'.format(port), level=DEBUG) + return False + + +def set_manager(manager): + ''' Set the controller for the local openvswitch ''' + log('Setting manager for local ovs to {}'.format(manager)) + subprocess.check_call(['ovs-vsctl', 'set-manager', + 'ssl:{}'.format(manager)]) + + +def set_Open_vSwitch_column_value(column_value): + """ + Calls ovs-vsctl and sets the 'column_value' in the Open_vSwitch table. + + :param column_value: + See http://www.openvswitch.org//ovs-vswitchd.conf.db.5.pdf for + details of the relevant values. + :type str + :raises CalledProcessException: possibly ovsdb-server is not running + """ + log('Setting {} in the Open_vSwitch table'.format(column_value)) + subprocess.check_call(['ovs-vsctl', 'set', 'Open_vSwitch', '.', column_value]) + + +CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem' + + +def get_certificate(): + ''' Read openvswitch certificate from disk ''' + if os.path.exists(CERT_PATH): + log('Reading ovs certificate from {}'.format(CERT_PATH)) + with open(CERT_PATH, 'r') as cert: + full_cert = cert.read() + begin_marker = "-----BEGIN CERTIFICATE-----" + end_marker = "-----END CERTIFICATE-----" + begin_index = full_cert.find(begin_marker) + end_index = full_cert.rfind(end_marker) + if end_index == -1 or begin_index == -1: + raise RuntimeError("Certificate does not contain valid begin" + " and end markers.") + full_cert = full_cert[begin_index:(end_index + len(end_marker))] + return full_cert + else: + log('Certificate not found', level=WARNING) + return None + + +def check_for_eni_source(): + ''' Juju removes the source line when setting up interfaces, + replace if missing ''' + + with open('/etc/network/interfaces', 'r') as eni: + for line in eni: + if line == 'source /etc/network/interfaces.d/*': + return + with open('/etc/network/interfaces', 'a') as eni: + eni.write('\nsource /etc/network/interfaces.d/*') + + +def full_restart(): + ''' Full restart and reload of openvswitch ''' + if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'): + service('start', 'openvswitch-force-reload-kmod') + else: + service('force-reload-kmod', 'openvswitch-switch') + + +def enable_ipfix(bridge, target, + cache_active_timeout=60, + cache_max_flows=128, + sampling=64): + '''Enable IPFIX on bridge to target. + :param bridge: Bridge to monitor + :param target: IPFIX remote endpoint + :param cache_active_timeout: The maximum period in seconds for + which an IPFIX flow record is cached + and aggregated before being sent + :param cache_max_flows: The maximum number of IPFIX flow records + that can be cached at a time + :param sampling: The rate at which packets should be sampled and + sent to each target collector + ''' + cmd = [ + 'ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--', + '--id=@i', 'create', 'IPFIX', + 'targets="{}"'.format(target), + 'sampling={}'.format(sampling), + 'cache_active_timeout={}'.format(cache_active_timeout), + 'cache_max_flows={}'.format(cache_max_flows), + ] + log('Enabling IPfix on {}.'.format(bridge)) + subprocess.check_call(cmd) + + +def disable_ipfix(bridge): + '''Diable IPFIX on target bridge. + :param bridge: Bridge to modify + ''' + cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix'] + subprocess.check_call(cmd) + + +def port_to_br(port): + '''Determine the bridge that contains a port + :param port: Name of port to check for + :returns str: OVS bridge containing port or None if not found + ''' + try: + return subprocess.check_output( + ['ovs-vsctl', 'port-to-br', port] + ).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def ovs_appctl(target, args): + """Run `ovs-appctl` for target with args and return output. + + :param target: Name of daemon to contact. Unless target begins with '/', + `ovs-appctl` looks for a pidfile and will build the path to + a /var/run/openvswitch/target.pid.ctl for you. + :type target: str + :param args: Command and arguments to pass to `ovs-appctl` + :type args: Tuple[str, ...] + :returns: Output from command + :rtype: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-appctl', '-t', target] + cmd.extend(args) + return subprocess.check_output(cmd, universal_newlines=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/ovn.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/ovn.py new file mode 100644 index 0000000000000000000000000000000000000000..2075f11acfbeb3d0d614cf3db5a1b535bf128824 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/ovn.py @@ -0,0 +1,233 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import subprocess +import uuid + +from . import utils + + +OVN_RUNDIR = '/var/run/ovn' +OVN_SYSCONFDIR = '/etc/ovn' + + +def ovn_appctl(target, args, rundir=None, use_ovs_appctl=False): + """Run ovn/ovs-appctl for target with args and return output. + + :param target: Name of daemon to contact. Unless target begins with '/', + `ovn-appctl` looks for a pidfile and will build the path to + a /var/run/ovn/target.pid.ctl for you. + :type target: str + :param args: Command and arguments to pass to `ovn-appctl` + :type args: Tuple[str, ...] + :param rundir: Override path to sockets + :type rundir: Optional[str] + :param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03, + set this to True to use ``ovs-appctl`` instead. + :type use_ovs_appctl: bool + :returns: Output from command + :rtype: str + :raises: subprocess.CalledProcessError + """ + # NOTE(fnordahl): The ovsdb-server processes for the OVN databases use a + # non-standard naming scheme for their daemon control socket and we need + # to pass the full path to the socket. + if target in ('ovnnb_db', 'ovnsb_db',): + target = os.path.join(rundir or OVN_RUNDIR, target + '.ctl') + + if use_ovs_appctl: + tool = 'ovs-appctl' + else: + tool = 'ovn-appctl' + + return utils._run(tool, '-t', target, *args) + + +class OVNClusterStatus(object): + + def __init__(self, name, cluster_id, server_id, address, status, role, + term, leader, vote, election_timer, log, + entries_not_yet_committed, entries_not_yet_applied, + connections, servers): + """Initialize and populate OVNClusterStatus object. + + Use class initializer so we can define types in a compatible manner. + + :param name: Name of schema used for database + :type name: str + :param cluster_id: UUID of cluster + :type cluster_id: uuid.UUID + :param server_id: UUID of server + :type server_id: uuid.UUID + :param address: OVSDB connection method + :type address: str + :param status: Status text + :type status: str + :param role: Role of server + :type role: str + :param term: Election term + :type term: int + :param leader: Short form UUID of leader + :type leader: str + :param vote: Vote + :type vote: str + :param election_timer: Current value of election timer + :type election_timer: int + :param log: Log + :type log: str + :param entries_not_yet_committed: Entries not yet committed + :type entries_not_yet_committed: int + :param entries_not_yet_applied: Entries not yet applied + :type entries_not_yet_applied: int + :param connections: Connections + :type connections: str + :param servers: Servers in the cluster + [('0ea6', 'ssl:192.0.2.42:6643')] + :type servers: List[Tuple[str,str]] + """ + self.name = name + self.cluster_id = cluster_id + self.server_id = server_id + self.address = address + self.status = status + self.role = role + self.term = term + self.leader = leader + self.vote = vote + self.election_timer = election_timer + self.log = log + self.entries_not_yet_committed = entries_not_yet_committed + self.entries_not_yet_applied = entries_not_yet_applied + self.connections = connections + self.servers = servers + + def __eq__(self, other): + return ( + self.name == other.name and + self.cluster_id == other.cluster_id and + self.server_id == other.server_id and + self.address == other.address and + self.status == other.status and + self.role == other.role and + self.term == other.term and + self.leader == other.leader and + self.vote == other.vote and + self.election_timer == other.election_timer and + self.log == other.log and + self.entries_not_yet_committed == other.entries_not_yet_committed and + self.entries_not_yet_applied == other.entries_not_yet_applied and + self.connections == other.connections and + self.servers == other.servers) + + @property + def is_cluster_leader(self): + """Retrieve status information from clustered OVSDB. + + :returns: Whether target is cluster leader + :rtype: bool + """ + return self.leader == 'self' + + +def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None): + """Retrieve status information from clustered OVSDB. + + :param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can + also be full path to control socket. + :type target: str + :param schema: Database schema name, deduced from target if not provided + :type schema: Optional[str] + :param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03, + set this to True to use ``ovs-appctl`` instead. + :type use_ovs_appctl: bool + :param rundir: Override path to sockets + :type rundir: Optional[str] + :returns: cluster status data object + :rtype: OVNClusterStatus + :raises: subprocess.CalledProcessError, KeyError, RuntimeError + """ + schema_map = { + 'ovnnb_db': 'OVN_Northbound', + 'ovnsb_db': 'OVN_Southbound', + } + if schema and schema not in schema_map.keys(): + raise RuntimeError('Unknown schema provided: "{}"'.format(schema)) + + status = {} + k = '' + for line in ovn_appctl(target, + ('cluster/status', schema or schema_map[target]), + rundir=rundir, + use_ovs_appctl=use_ovs_appctl).splitlines(): + if k and line.startswith(' '): + # there is no key which means this is a instance of a multi-line/ + # multi-value item, populate the List which is already stored under + # the key. + if k == 'servers': + status[k].append( + tuple(line.replace(')', '').lstrip().split()[0:4:3])) + else: + status[k].append(line.lstrip()) + elif ':' in line: + # this is a line with a key + k, v = line.split(':', 1) + k = k.lower() + k = k.replace(' ', '_') + if v: + # this is a line with both key and value + if k in ('cluster_id', 'server_id',): + v = v.replace('(', '') + v = v.replace(')', '') + status[k] = tuple(v.split()) + else: + status[k] = v.lstrip() + else: + # this is a line with only key which means a multi-line/ + # multi-value item. Store key as List which will be + # populated on subsequent iterations. + status[k] = [] + return OVNClusterStatus( + status['name'], + uuid.UUID(status['cluster_id'][1]), + uuid.UUID(status['server_id'][1]), + status['address'], + status['status'], + status['role'], + int(status['term']), + status['leader'], + status['vote'], + int(status['election_timer']), + status['log'], + int(status['entries_not_yet_committed']), + int(status['entries_not_yet_applied']), + status['connections'], + status['servers']) + + +def is_northd_active(): + """Query `ovn-northd` for active status. + + Note that the active status information for ovn-northd is available for + OVN 20.03 and onward. + + :returns: True if local `ovn-northd` instance is active, False otherwise + :rtype: bool + """ + try: + for line in ovn_appctl('ovn-northd', ('status',)).splitlines(): + if line.startswith('Status:') and 'active' in line: + return True + except subprocess.CalledProcessError: + pass + return False diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/ovsdb.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/ovsdb.py new file mode 100644 index 0000000000000000000000000000000000000000..5e50bc36333bde56674a82ad6c88e0d5de44ee07 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/ovsdb.py @@ -0,0 +1,206 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import uuid + +from . import utils + + +class SimpleOVSDB(object): + """Simple interface to OVSDB through the use of command line tools. + + OVS and OVN is managed through a set of databases. These databases have + similar command line tools to manage them. We make use of the similarity + to provide a generic class that can be used to manage them. + + The OpenvSwitch project does provide a Python API, but on the surface it + appears to be a bit too involved for our simple use case. + + Examples: + sbdb = SimpleOVSDB('ovn-sbctl') + for chs in sbdb.chassis: + print(chs) + + ovsdb = SimpleOVSDB('ovs-vsctl') + for br in ovsdb.bridge: + if br['name'] == 'br-test': + ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed') + """ + + # For validation we keep a complete map of currently known good tool and + # table combinations. This requires maintenance down the line whenever + # upstream adds things that downstream wants, and the cost of maintaining + # that will most likely be lower then the cost of finding the needle in + # the haystack whenever downstream code misspells something. + _tool_table_map = { + 'ovs-vsctl': ( + 'autoattach', + 'bridge', + 'ct_timeout_policy', + 'ct_zone', + 'controller', + 'datapath', + 'flow_sample_collector_set', + 'flow_table', + 'ipfix', + 'interface', + 'manager', + 'mirror', + 'netflow', + 'open_vswitch', + 'port', + 'qos', + 'queue', + 'ssl', + 'sflow', + ), + 'ovn-nbctl': ( + 'acl', + 'address_set', + 'connection', + 'dhcp_options', + 'dns', + 'forwarding_group', + 'gateway_chassis', + 'ha_chassis', + 'ha_chassis_group', + 'load_balancer', + 'load_balancer_health_check', + 'logical_router', + 'logical_router_policy', + 'logical_router_port', + 'logical_router_static_route', + 'logical_switch', + 'logical_switch_port', + 'meter', + 'meter_band', + 'nat', + 'nb_global', + 'port_group', + 'qos', + 'ssl', + ), + 'ovn-sbctl': ( + 'address_set', + 'chassis', + 'connection', + 'controller_event', + 'dhcp_options', + 'dhcpv6_options', + 'dns', + 'datapath_binding', + 'encap', + 'gateway_chassis', + 'ha_chassis', + 'ha_chassis_group', + 'igmp_group', + 'ip_multicast', + 'logical_flow', + 'mac_binding', + 'meter', + 'meter_band', + 'multicast_group', + 'port_binding', + 'port_group', + 'rbac_permission', + 'rbac_role', + 'sb_global', + 'ssl', + 'service_monitor', + ), + } + + def __init__(self, tool): + """SimpleOVSDB constructor. + + :param tool: Which tool with database commands to operate on. + Usually one of `ovs-vsctl`, `ovn-nbctl`, `ovn-sbctl` + :type tool: str + """ + if tool not in self._tool_table_map: + raise RuntimeError( + 'tool must be one of "{}"'.format(self._tool_table_map.keys())) + self._tool = tool + + def __getattr__(self, table): + if table not in self._tool_table_map[self._tool]: + raise AttributeError( + 'table "{}" not known for use with "{}"' + .format(table, self._tool)) + return self.Table(self._tool, table) + + class Table(object): + """Methods to interact with contents of OVSDB tables. + + NOTE: At the time of this writing ``find`` is the only command + line argument to OVSDB manipulating tools that actually supports + JSON output. + """ + + def __init__(self, tool, table): + """SimpleOVSDBTable constructor. + + :param table: Which table to operate on + :type table: str + """ + self._tool = tool + self._table = table + + def _find_tbl(self, condition=None): + """Run and parse output of OVSDB `find` command. + + :param condition: An optional RFC 7047 5.1 match condition + :type condition: Optional[str] + :returns: Dictionary with data + :rtype: Dict[str, any] + """ + # When using json formatted output to OVS commands Internal OVSDB + # notation may occur that require further deserializing. + # Reference: https://tools.ietf.org/html/rfc7047#section-5.1 + ovs_type_cb_map = { + 'uuid': uuid.UUID, + # FIXME sets also appear to sometimes contain type/value tuples + 'set': list, + 'map': dict, + } + cmd = [self._tool, '-f', 'json', 'find', self._table] + if condition: + cmd.append(condition) + output = utils._run(*cmd) + data = json.loads(output) + for row in data['data']: + values = [] + for col in row: + if isinstance(col, list): + f = ovs_type_cb_map.get(col[0], str) + values.append(f(col[1])) + else: + values.append(col) + yield dict(zip(data['headings'], values)) + + def __iter__(self): + return self._find_tbl() + + def clear(self, rec, col): + utils._run(self._tool, 'clear', self._table, rec, col) + + def find(self, condition): + return self._find_tbl(condition=condition) + + def remove(self, rec, col, value): + utils._run(self._tool, 'remove', self._table, rec, col, value) + + def set(self, rec, col, value): + utils._run(self._tool, 'set', self._table, rec, + '{}={}'.format(col, value)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..53c9b4ddab6d68d2478d4161e658c70e2caa6a74 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ovs/utils.py @@ -0,0 +1,26 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess + + +def _run(*args): + """Run a process, check result, capture decoded output from STDOUT. + + :param args: Command and arguments to run + :type args: Tuple[str, ...] + :returns: Information about the completed process + :rtype: str + :raises subprocess.CalledProcessError + """ + return subprocess.check_output(args, universal_newlines=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ufw.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ufw.py new file mode 100644 index 0000000000000000000000000000000000000000..b9bf7c9df5576615e23225a7fdb6c11a28931ba4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/network/ufw.py @@ -0,0 +1,386 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains helpers to add and remove ufw rules. + +Examples: + +- open SSH port for subnet 10.0.3.0/24: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp') + +- open service by name as defined in /etc/services: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('ssh', 'open') + +- close service by port number: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('4949', 'close') # munin +""" +import os +import re +import subprocess + +from charmhelpers.core import hookenv +from charmhelpers.core.kernel import modprobe, is_module_loaded + +__author__ = "Felipe Reyes " + + +class UFWError(Exception): + pass + + +class UFWIPv6Error(UFWError): + pass + + +def is_enabled(): + """ + Check if `ufw` is enabled + + :returns: True if ufw is enabled + """ + output = subprocess.check_output(['ufw', 'status'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Status: active\n', output, re.M) + + return len(m) >= 1 + + +def is_ipv6_ok(soft_fail=False): + """ + Check if IPv6 support is present and ip6tables functional + + :param soft_fail: If set to True and IPv6 support is broken, then reports + that the host doesn't have IPv6 support, otherwise a + UFWIPv6Error exception is raised. + :returns: True if IPv6 is working, False otherwise + """ + + # do we have IPv6 in the machine? + if os.path.isdir('/proc/sys/net/ipv6'): + # is ip6tables kernel module loaded? + if not is_module_loaded('ip6_tables'): + # ip6tables support isn't complete, let's try to load it + try: + modprobe('ip6_tables') + # great, we can load the module + return True + except subprocess.CalledProcessError as ex: + hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, + level="WARN") + # we are in a world where ip6tables isn't working + if soft_fail: + # so we inform that the machine doesn't have IPv6 + return False + else: + raise UFWIPv6Error("IPv6 firewall support broken") + else: + # the module is present :) + return True + + else: + # the system doesn't have IPv6 + return False + + +def disable_ipv6(): + """ + Disable ufw IPv6 support in /etc/default/ufw + """ + exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g', + '/etc/default/ufw']) + if exit_code == 0: + hookenv.log('IPv6 support in ufw disabled', level='INFO') + else: + hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR") + raise UFWError("Couldn't disable IPv6 support in ufw") + + +def enable(soft_fail=False): + """ + Enable ufw + + :param soft_fail: If set to True silently disables IPv6 support in ufw, + otherwise a UFWIPv6Error exception is raised when IP6 + support is broken. + :returns: True if ufw is successfully enabled + """ + if is_enabled(): + return True + + if not is_ipv6_ok(soft_fail): + disable_ipv6() + + output = subprocess.check_output(['ufw', 'enable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall('^Firewall is active and enabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be enabled", level='WARN') + return False + else: + hookenv.log("ufw enabled", level='INFO') + return True + + +def reload(): + """ + Reload ufw + + :returns: True if ufw is successfully enabled + """ + output = subprocess.check_output(['ufw', 'reload'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall('^Firewall reloaded\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be reloaded", level='WARN') + return False + else: + hookenv.log("ufw reloaded", level='INFO') + return True + + +def disable(): + """ + Disable ufw + + :returns: True if ufw is successfully disabled + """ + if not is_enabled(): + return True + + output = subprocess.check_output(['ufw', 'disable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Firewall stopped and disabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be disabled", level='WARN') + return False + else: + hookenv.log("ufw disabled", level='INFO') + return True + + +def default_policy(policy='deny', direction='incoming'): + """ + Changes the default policy for traffic `direction` + + :param policy: allow, deny or reject + :param direction: traffic direction, possible values: incoming, outgoing, + routed + """ + if policy not in ['allow', 'deny', 'reject']: + raise UFWError(('Unknown policy %s, valid values: ' + 'allow, deny, reject') % policy) + + if direction not in ['incoming', 'outgoing', 'routed']: + raise UFWError(('Unknown direction %s, valid values: ' + 'incoming, outgoing, routed') % direction) + + output = subprocess.check_output(['ufw', 'default', policy, direction], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + hookenv.log(output, level='DEBUG') + + m = re.findall("^Default %s policy changed to '%s'\n" % (direction, + policy), + output, re.M) + if len(m) == 0: + hookenv.log("ufw couldn't change the default policy to %s for %s" + % (policy, direction), level='WARN') + return False + else: + hookenv.log("ufw default policy for %s changed to %s" + % (direction, policy), level='INFO') + return True + + +def modify_access(src, dst='any', port=None, proto=None, action='allow', + index=None, prepend=False, comment=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :type src: Optional[str] + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :type dst: Optional[str] + :param port: destiny port + :type port: Optional[int] + :param proto: protocol (tcp or udp) + :type proto: Optional[str] + :param action: `allow` or `delete` + :type action: str + :param index: if different from None the rule is inserted at the given + `index`. + :type index: Optional[int] + :param prepend: Whether to insert the rule before all other rules matching + the rule's IP type. + :type prepend: bool + :param comment: Create the rule with a comment + :type comment: Optional[str] + """ + if not is_enabled(): + hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') + return + + if action == 'delete': + if index is not None: + cmd = ['ufw', '--force', 'delete', str(index)] + else: + cmd = ['ufw', 'delete', 'allow'] + elif index is not None: + cmd = ['ufw', 'insert', str(index), action] + elif prepend: + cmd = ['ufw', 'prepend', action] + else: + cmd = ['ufw', action] + + if src is not None: + cmd += ['from', src] + + if dst is not None: + cmd += ['to', dst] + + if port is not None: + cmd += ['port', str(port)] + + if proto is not None: + cmd += ['proto', proto] + + if comment: + cmd.extend(['comment', comment]) + + hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG') + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + (stdout, stderr) = p.communicate() + + hookenv.log(stdout, level='INFO') + + if p.returncode != 0: + hookenv.log(stderr, level='ERROR') + hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd), + p.returncode), + level='ERROR') + + +def grant_access(src, dst='any', port=None, proto=None, index=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + :param index: if different from None the rule is inserted at the given + `index`. + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='allow', + index=index) + + +def revoke_access(src, dst='any', port=None, proto=None): + """ + Revoke access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='delete') + + +def service(name, action): + """ + Open/close access to a service + + :param name: could be a service name defined in `/etc/services` or a port + number. + :param action: `open` or `close` + """ + if action == 'open': + subprocess.check_output(['ufw', 'allow', str(name)], + universal_newlines=True) + elif action == 'close': + subprocess.check_output(['ufw', 'delete', 'allow', str(name)], + universal_newlines=True) + else: + raise UFWError(("'{}' not supported, use 'allow' " + "or 'delete'").format(action)) + + +def status(): + """Retrieve firewall rules as represented by UFW. + + :returns: Tuples with rule number and data + (1, {'to': '', 'action':, 'from':, '', ipv6: True, 'comment': ''}) + :rtype: Iterator[Tuple[int, Dict[str, Union[bool, str]]]] + """ + cp = subprocess.check_output(('ufw', 'status', 'numbered',), + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in cp.splitlines(): + if not line.startswith('['): + continue + ipv6 = True if '(v6)' in line else False + line = line.replace('(v6)', '') + line = line.replace('[', '') + line = line.replace(']', '') + line = line.replace('Anywhere', 'any') + row = line.split() + yield (int(row[0]), { + 'to': row[1], + 'action': ' '.join(row[2:4]).lower(), + 'from': row[4], + 'ipv6': ipv6, + 'comment': row[6] if len(row) > 5 and row[5] == '#' else '', + }) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/alternatives.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/alternatives.py new file mode 100644 index 0000000000000000000000000000000000000000..547de09c6d818772191b519618fa32b08b0e6eff --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/alternatives.py @@ -0,0 +1,44 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/deployment.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..dd3aebe97dd04bf42a2c4ee7c7b7b83b1917a678 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,384 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + use_source = use_source or [] + no_origin = no_origin or [] + + # Charms which should use the source config option + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy', 'percona-cluster', 'lxd'])) + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=None): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait(timeout=timeout) + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) + # Check for ready messages + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) + + releases = { + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, + ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('bionic', 'cloud:bionic-stein'): self.bionic_stein, + ('bionic', 'cloud:bionic-train'): self.bionic_train, + ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, + ('cosmic', None): self.cosmic_rocky, + ('disco', None): self.disco_stein, + ('eoan', None): self.eoan_train, + ('focal', None): self.focal_ussuri, + } + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('trusty', 'icehouse'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_percona_service_entry(self, memory_constraint=None): + """Return a amulet service entry for percona cluster. + + :param memory_constraint: Override the default memory constraint + in the service entry. + :type memory_constraint: str + :returns: Amulet service entry. + :rtype: dict + """ + memory_constraint = memory_constraint or '3072M' + svc_entry = { + 'name': 'percona-cluster', + 'constraints': {'mem': memory_constraint}} + if self._get_openstack_release() <= self.trusty_mitaka: + svc_entry['location'] = 'cs:trusty/percona-cluster' + return svc_entry + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder-ceph', + 'glance' + ] + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata + pools = [ + 'rbd', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..14864198926b82afe382d07263034b020ad43c09 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1593 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib +import urlparse + +import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 +import heatclient.v1.client as heat_client +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions + +import novaclient.client as nova_client +import novaclient +import pika +import swiftclient + +from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) +from charmhelpers.core.host import CompareHostReleases + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', + 'xenial_newton', 'yakkety_newton', + 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', + 'xenial_queens', 'bionic_queens', + 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein', + 'bionic_train', 'eoan_train', + 'bionic_ussuri', 'focal_ussuri', +] + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, expected_num_eps=3): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != expected_num_eps: + return 'Unexpected number of endpoints found' + + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + @retry_on_exception(num_retries=5, base_delay=1) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel.get('api_version') != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel.get('api_version'), api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + + def authenticate_cinder_admin(self, keystone, api_version=2): + """Authenticates admin user with cinder.""" + self.log.debug('Authenticating cinder admin...') + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](session=keystone.session) + + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + else: + auth = v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None, api_version=2): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + # 11 => xenial_queens + if api_version == 3 or (openstack_release and openstack_release >= 11): + client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + if not keystone_ip: + keystone_ip = keystone_sentry.info['public-address'] + + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: + user_domain_name = 'admin_domain' + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) + + def authenticate_glance_admin(self, keystone, force_v1_client=False): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + interface='adminURL') + if not force_v1_client and keystone.session: + return glance_clientv2.Client("2", session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + + def glance_create_image(self, glance, image_name, image_url, + download_dir='tests', + hypervisor_type=None, + disk_format='qcow2', + architecture='x86_64', + container_format='bare'): + """Download an image and upload it to glance, validate its status + and return an image object pointer. KVM defaults, can override for + LXD. + + :param glance: pointer to authenticated glance api connection + :param image_name: display name for new image + :param image_url: url to retrieve + :param download_dir: directory to store downloaded image file + :param hypervisor_type: glance image hypervisor property + :param disk_format: glance image disk format + :param architecture: glance image architecture property + :param container_format: glance image container format + :returns: glance image pointer + """ + self.log.debug('Creating glance image ({}) from ' + '{}...'.format(image_name, image_url)) + + # Download image + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + abs_file_name = os.path.join(download_dir, image_name) + if not os.path.exists(abs_file_name): + opener.retrieve(image_url, abs_file_name) + + # Create glance image + glance_properties = { + 'architecture': architecture, + } + if hypervisor_type: + glance_properties['hypervisor_type'] = hypervisor_type + # Create glance image + if float(glance.version) < 2.0: + with open(abs_file_name) as f: + image = glance.images.create( + name=image_name, + is_public=True, + disk_format=disk_format, + container_format=container_format, + properties=glance_properties, + data=f) + else: + image = glance.images.create( + name=image_name, + visibility="public", + disk_format=disk_format, + container_format=container_format) + glance.images.upload(image.id, open(abs_file_name, 'rb')) + glance.images.update(image.id, **glance_properties) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == container_format \ + and val_img_dfmt == disk_format: + self.log.debug(msg_attr) + else: + msg = ('Image validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def create_cirros_image(self, glance, image_name, hypervisor_type=None): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :param hypervisor_type: glance image hypervisor property + :returns: glance image pointer + """ + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'glance_create_image instead of ' + 'create_cirros_image.') + + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Get cirros image URL + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + f.close() + + return self.glance_create_image( + glance, + image_name, + cirros_url, + hypervisor_type=hypervisor_type) + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.glance.find_image(image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except Exception: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.is_open is True + assert connection.is_closing is False + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if CompareHostReleases(ubuntu_release) <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/audits/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/audits/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7f7e5f79a5d5fe3cb374814e32ea16f6060f4f27 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/audits/__init__.py @@ -0,0 +1,212 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenStack Security Audit code""" + +import collections +from enum import Enum +import traceback + +from charmhelpers.core.host import cmp_pkgrevno +import charmhelpers.contrib.openstack.utils as openstack_utils +import charmhelpers.core.hookenv as hookenv + + +class AuditType(Enum): + OpenStackSecurityGuide = 1 + + +_audits = {} + +Audit = collections.namedtuple('Audit', 'func filters') + + +def audit(*args): + """Decorator to register an audit. + + These are used to generate audits that can be run on a + deployed system that matches the given configuration + + :param args: List of functions to filter tests against + :type args: List[Callable[Dict]] + """ + def wrapper(f): + test_name = f.__name__ + if _audits.get(test_name): + raise RuntimeError( + "Test name '{}' used more than once" + .format(test_name)) + non_callables = [fn for fn in args if not callable(fn)] + if non_callables: + raise RuntimeError( + "Configuration includes non-callable filters: {}" + .format(non_callables)) + _audits[test_name] = Audit(func=f, filters=args) + return f + return wrapper + + +def is_audit_type(*args): + """This audit is included in the specified kinds of audits. + + :param *args: List of AuditTypes to include this audit in + :type args: List[AuditType] + :rtype: Callable[Dict] + """ + def _is_audit_type(audit_options): + if audit_options.get('audit_type') in args: + return True + else: + return False + return _is_audit_type + + +def since_package(pkg, pkg_version): + """This audit should be run after the specified package version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _since_package(audit_options=None): + return cmp_pkgrevno(pkg, pkg_version) >= 0 + + return _since_package + + +def before_package(pkg, pkg_version): + """This audit should be run before the specified package version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _before_package(audit_options=None): + return not since_package(pkg, pkg_version)() + + return _before_package + + +def since_openstack_release(pkg, release): + """This audit should run after the specified OpenStack version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _since_openstack_release(audit_options=None): + _release = openstack_utils.get_os_codename_package(pkg) + return openstack_utils.CompareOpenStackReleases(_release) >= release + + return _since_openstack_release + + +def before_openstack_release(pkg, release): + """This audit should run before the specified OpenStack version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _before_openstack_release(audit_options=None): + return not since_openstack_release(pkg, release)() + + return _before_openstack_release + + +def it_has_config(config_key): + """This audit should be run based on specified config keys. + + :param config_key: Config key to look for + :type config_key: str + :rtype: Callable[Dict] + """ + def _it_has_config(audit_options): + return audit_options.get(config_key) is not None + + return _it_has_config + + +def run(audit_options): + """Run the configured audits with the specified audit_options. + + :param audit_options: Configuration for the audit + :type audit_options: Config + + :rtype: Dict[str, str] + """ + errors = {} + results = {} + for name, audit in sorted(_audits.items()): + result_name = name.replace('_', '-') + if result_name in audit_options.get('excludes', []): + print( + "Skipping {} because it is" + "excluded in audit config" + .format(result_name)) + continue + if all(p(audit_options) for p in audit.filters): + try: + audit.func(audit_options) + print("{}: PASS".format(name)) + results[result_name] = { + 'success': True, + } + except AssertionError as e: + print("{}: FAIL ({})".format(name, e)) + results[result_name] = { + 'success': False, + 'message': e, + } + except Exception as e: + print("{}: ERROR ({})".format(name, e)) + errors[name] = e + results[result_name] = { + 'success': False, + 'message': e, + } + for name, error in errors.items(): + print("=" * 20) + print("Error in {}: ".format(name)) + traceback.print_tb(error.__traceback__) + print() + return results + + +def action_parse_results(result): + """Parse the result of `run` in the context of an action. + + :param result: The result of running the security-checklist + action on a unit + :type result: Dict[str, Dict[str, str]] + :rtype: int + """ + passed = True + for test, result in result.items(): + if result['success']: + hookenv.action_set({test: 'PASS'}) + else: + hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) + passed = False + if not passed: + hookenv.action_fail("One or more tests failed") + return 0 if passed else 1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/audits/openstack_security_guide.py new file mode 100644 index 0000000000000000000000000000000000000000..79740ed0c103e841b6e280af920f8be65e3d1d0d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -0,0 +1,270 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import configparser +import glob +import os.path +import subprocess + +from charmhelpers.contrib.openstack.audits import ( + audit, + AuditType, + # filters + is_audit_type, + it_has_config, +) + +from charmhelpers.core.hookenv import ( + cached, +) + +""" +The Security Guide suggests a specific list of files inside the +config directory for the service having 640 specifically, but +by ensuring the containing directory is 750, only the owner can +write, and only the group can read files within the directory. + +By restricting access to the containing directory, we can more +effectively ensure that there is no accidental leakage if a new +file is added to the service without being added to the security +guide, and to this check. +""" +FILE_ASSERTIONS = { + 'barbican': { + '/etc/barbican': {'group': 'barbican', 'mode': '750'}, + }, + 'ceph-mon': { + '/var/lib/charm/ceph-mon/ceph.conf': + {'owner': 'root', 'group': 'root', 'mode': '644'}, + '/etc/ceph/ceph.client.admin.keyring': + {'owner': 'ceph', 'group': 'ceph'}, + '/etc/ceph/rbdmap': {'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} + }, + 'ceph-osd': { + '/var/lib/charm/ceph-osd/ceph.conf': + {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, + '/var/lib/ceph/radosgw': + {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + }, + 'cinder': { + '/etc/cinder': {'group': 'cinder', 'mode': '750'}, + }, + 'glance': { + '/etc/glance': {'group': 'glance', 'mode': '750'}, + }, + 'keystone': { + '/etc/keystone': + {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, + }, + 'manilla': { + '/etc/manila': {'group': 'manilla', 'mode': '750'}, + }, + 'neutron-gateway': { + '/etc/neutron': {'group': 'neutron', 'mode': '750'}, + }, + 'neutron-api': { + '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, + }, + 'nova-cloud-controller': { + '/etc/nova': {'group': 'nova', 'mode': '750'}, + }, + 'nova-compute': { + '/etc/nova/': {'group': 'nova', 'mode': '750'}, + }, + 'openstack-dashboard': { + # From security guide + '/etc/openstack-dashboard/local_settings.py': + {'group': 'horizon', 'mode': '640'}, + }, +} + +Ownership = collections.namedtuple('Ownership', 'owner group mode') + + +@cached +def _stat(file): + """ + Get the Ownership information from a file. + + :param file: The path to a file to stat + :type file: str + :returns: owner, group, and mode of the specified file + :rtype: Ownership + :raises subprocess.CalledProcessError: If the underlying stat fails + """ + out = subprocess.check_output( + ['stat', '-c', '%U %G %a', file]).decode('utf-8') + return Ownership(*out.strip().split(' ')) + + +@cached +def _config_ini(path): + """ + Parse an ini file + + :param path: The path to a file to parse + :type file: str + :returns: Configuration contained in path + :rtype: Dict + """ + # When strict is enabled, duplicate options are not allowed in the + # parsed INI; however, Oslo allows duplicate values. This change + # causes us to ignore the duplicate values which is acceptable as + # long as we don't validate any multi-value options + conf = configparser.ConfigParser(strict=False) + conf.read(path) + return dict(conf) + + +def _validate_file_ownership(owner, group, file_name, optional=False): + """ + Validate that a specified file is owned by `owner:group`. + + :param owner: Name of the owner + :type owner: str + :param group: Name of the group + :type group: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert owner == ownership.owner, \ + "{} has an incorrect owner: {} should be {}".format( + file_name, ownership.owner, owner) + assert group == ownership.group, \ + "{} has an incorrect group: {} should be {}".format( + file_name, ownership.group, group) + print("Validate ownership of {}: PASS".format(file_name)) + + +def _validate_file_mode(mode, file_name, optional=False): + """ + Validate that a specified file has the specified permissions. + + :param mode: file mode that is desires + :type owner: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert mode == ownership.mode, \ + "{} has an incorrect mode: {} should be {}".format( + file_name, ownership.mode, mode) + print("Validate mode of {}: PASS".format(file_name)) + + +@cached +def _config_section(config, section): + """Read the configuration file and return a section.""" + path = os.path.join(config.get('config_path'), config.get('config_file')) + conf = _config_ini(path) + return conf.get(section) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_ownership(config): + """Verify that configuration files are owned by the correct user/group.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + owner = options.get('owner', config.get('owner', 'root')) + group = options.get('group', config.get('group', 'root')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_ownership(owner, group, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_ownership(owner, group, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_permissions(config): + """Verify that permissions on configuration files are secure enough.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + mode = options.get('mode', config.get('permissions', '600')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_mode(mode, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_mode(mode, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_keystone(audit_options): + """Validate that the service uses Keystone for authentication.""" + section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'api / DEFAULT'" + assert section.get('auth_strategy') == "keystone", \ + "Application is not using Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_keystone(audit_options): + """Verify that TLS is used to communicate with Keystone.""" + section = _config_section(audit_options, 'keystone_authtoken') + assert section is not None, "Missing section 'keystone_authtoken'" + assert not section.get('insecure') and \ + "https://" in section.get("auth_uri"), \ + "TLS is not used for Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_glance(audit_options): + """Verify that TLS is used to communicate with Glance.""" + section = _config_section(audit_options, 'glance') + assert section is not None, "Missing section 'glance'" + assert not section.get('insecure') and \ + "https://" in section.get("api_servers"), \ + "TLS is not used for Glance" diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/cert_utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/cert_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b494af64aeae55db44b669990725de81705104b2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/cert_utils.py @@ -0,0 +1,289 @@ +# Copyright 2014-2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charm certificats. + +import os +import json + +from charmhelpers.contrib.network.ip import ( + get_hostname, + resolve_network_cidr, +) +from charmhelpers.core.hookenv import ( + local_unit, + network_get_primary_address, + config, + related_units, + relation_get, + relation_ids, + unit_get, + NoNetworkBinding, + log, + WARNING, +) +from charmhelpers.contrib.openstack.ip import ( + ADMIN, + resolve_address, + get_vip_in_network, + INTERNAL, + PUBLIC, + ADDRESS_MAP) + +from charmhelpers.core.host import ( + mkdir, + write_file, +) + +from charmhelpers.contrib.hahelpers.apache import ( + install_ca_cert +) + + +class CertRequest(object): + + """Create a request for certificates to be generated + """ + + def __init__(self, json_encode=True): + self.entries = [] + self.hostname_entry = None + self.json_encode = json_encode + + def add_entry(self, net_type, cn, addresses): + """Add a request to the batch + + :param net_type: str netwrok space name request is for + :param cn: str Canonical Name for certificate + :param addresses: [] List of addresses to be used as SANs + """ + self.entries.append({ + 'cn': cn, + 'addresses': addresses}) + + def add_hostname_cn(self): + """Add a request for the hostname of the machine""" + ip = unit_get('private-address') + addresses = [ip] + # If a vip is being used without os-hostname config or + # network spaces then we need to ensure the local units + # cert has the approriate vip in the SAN list + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + self.hostname_entry = { + 'cn': get_hostname(ip), + 'addresses': addresses} + + def add_hostname_cn_ip(self, addresses): + """Add an address to the SAN list for the hostname request + + :param addr: [] List of address to be added + """ + for addr in addresses: + if addr not in self.hostname_entry['addresses']: + self.hostname_entry['addresses'].append(addr) + + def get_request(self): + """Generate request from the batched up entries + + """ + if self.hostname_entry: + self.entries.append(self.hostname_entry) + request = {} + for entry in self.entries: + sans = sorted(list(set(entry['addresses']))) + request[entry['cn']] = {'sans': sans} + if self.json_encode: + req = {'cert_requests': json.dumps(request, sort_keys=True)} + else: + req = {'cert_requests': request} + req['unit_name'] = local_unit().replace('/', '_') + return req + + +def get_certificate_request(json_encode=True): + """Generate a certificatee requests based on the network confioguration + + """ + req = CertRequest(json_encode=json_encode) + req.add_hostname_cn() + # Add os-hostname entries + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['override']) + try: + net_addr = resolve_address(endpoint_type=net_type) + ip = network_get_primary_address( + ADDRESS_MAP[net_type]['binding']) + addresses = [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + if net_config: + req.add_entry( + net_type, + net_config, + addresses) + else: + # There is network address with no corresponding hostname. + # Add the ip to the hostname cert to allow for this. + req.add_hostname_cn_ip(addresses) + except NoNetworkBinding: + log("Skipping request for certificate for ip in {} space, no " + "local address found".format(net_type), WARNING) + return req.get_request() + + +def create_ip_cert_links(ssl_dir, custom_hostname_link=None): + """Create symlinks for SAN records + + :param ssl_dir: str Directory to create symlinks in + :param custom_hostname_link: str Additional link to be created + """ + hostname = get_hostname(unit_get('private-address')) + hostname_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(hostname)) + hostname_key = os.path.join( + ssl_dir, + 'key_{}'.format(hostname)) + # Add links to hostname cert, used if os-hostname vars not set + for net_type in [INTERNAL, ADMIN, PUBLIC]: + try: + addr = resolve_address(endpoint_type=net_type) + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(hostname_cert) and not os.path.isfile(cert): + os.symlink(hostname_cert, cert) + os.symlink(hostname_key, key) + except NoNetworkBinding: + log("Skipping creating cert symlink for ip in {} space, no " + "local address found".format(net_type), WARNING) + if custom_hostname_link: + custom_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(custom_hostname_link)) + custom_key = os.path.join( + ssl_dir, + 'key_{}'.format(custom_hostname_link)) + if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): + os.symlink(hostname_cert, custom_cert) + os.symlink(hostname_key, custom_key) + + +def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): + """Install the certs passed into the ssl dir and append the chain if + provided. + + :param ssl_dir: str Directory to create symlinks in + :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} + :param chain: str Chain to be appended to certs + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str + """ + for cn, bundle in certs.items(): + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + cert_data = bundle['cert'] + if chain: + # Append chain file so that clients that trust the root CA will + # trust certs signed by an intermediate in the chain + cert_data = cert_data + os.linesep + chain + write_file( + path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, + content=cert_data, perms=0o640) + write_file( + path=os.path.join(ssl_dir, key_filename), owner=user, group=group, + content=bundle['key'], perms=0o640) + + +def process_certificates(service_name, relation_id, unit, + custom_hostname_link=None, user='root', group='root'): + """Process the certificates supplied down the relation + + :param service_name: str Name of service the certifcates are for. + :param relation_id: str Relation id providing the certs + :param unit: str Unit providing the certs + :param custom_hostname_link: str Name of custom link to create + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str + :returns: True if certificates processed for local unit or False + :rtype: bool + """ + data = relation_get(rid=relation_id, unit=unit) + ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) + mkdir(path=ssl_dir) + name = local_unit().replace('/', '_') + certs = data.get('{}.processed_requests'.format(name)) + chain = data.get('chain') + ca = data.get('ca') + if certs: + certs = json.loads(certs) + install_ca_cert(ca.encode()) + install_certs(ssl_dir, certs, chain, user=user, group=group) + create_ip_cert_links( + ssl_dir, + custom_hostname_link=custom_hostname_link) + return True + return False + + +def get_requests_for_local_unit(relation_name=None): + """Extract any certificates data targeted at this unit down relation_name. + + :param relation_name: str Name of relation to check for data. + :returns: List of bundles of certificates. + :rtype: List of dicts + """ + local_name = local_unit().replace('/', '_') + raw_certs_key = '{}.processed_requests'.format(local_name) + relation_name = relation_name or 'certificates' + bundles = [] + for rid in relation_ids(relation_name): + for unit in related_units(rid): + data = relation_get(rid=rid, unit=unit) + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key])}) + return bundles + + +def get_bundle_for_cn(cn, relation_name=None): + """Extract certificates for the given cn. + + :param cn: str Canonical Name on certificate. + :param relation_name: str Relation to check for certificates down. + :returns: Dictionary of certificate data, + :rtype: dict. + """ + entries = get_requests_for_local_unit(relation_name) + cert_bundle = {} + for entry in entries: + for _cn, bundle in entry['certs'].items(): + if _cn == cn: + cert_bundle = { + 'cert': bundle['cert'], + 'key': bundle['key'], + 'chain': entry['chain'], + 'ca': entry['ca']} + break + if cert_bundle: + break + return cert_bundle diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/context.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/context.py new file mode 100644 index 0000000000000000000000000000000000000000..42abccf7cb9eabc063bc8d87f016e59f4dc8f898 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,3177 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import copy +import enum +import glob +import hashlib +import json +import math +import os +import re +import socket +import time + +from base64 import b64decode +from subprocess import check_call, CalledProcessError + +import six + +from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( + _config_ini as config_ini +) + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) +from charmhelpers.core.hookenv import ( + NoNetworkBinding, + config, + is_relation_made, + local_unit, + log, + relation_get, + relation_ids, + related_units, + relation_set, + unit_get, + unit_private_ip, + charm_name, + DEBUG, + INFO, + ERROR, + status_set, + network_get_primary_address, + WARNING, +) + +from charmhelpers.core.sysctl import create as sysctl_create +from charmhelpers.core.strutils import bool_from_string +from charmhelpers.contrib.openstack.exceptions import OSContextError + +from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, + list_nics, + get_nic_hwaddr, + mkdir, + write_file, + pwgen, + lsb_release, + CompareHostReleases, + is_container, +) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, + determine_api_port, + https, + is_clustered, +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, + install_ca_cert, +) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, + parse_data_port_mappings, +) +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + INTERNAL, + ADMIN, + PUBLIC, + ADDRESS_MAP, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv4_addr, + get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_bridge_member, + is_ipv6_disabled, + get_relation_ip, +) +from charmhelpers.contrib.openstack.utils import ( + config_flags_parser, + get_os_codename_install_source, + enable_memcache, + CompareOpenStackReleases, + os_release, +) +from charmhelpers.core.unitdata import kv + +try: + from sriov_netplan_shim import pci +except ImportError: + # The use of the function and contexts that require the pci module is + # optional. + pass + +try: + import psutil +except ImportError: + if six.PY2: + apt_install('python-psutil', fatal=True) + else: + apt_install('python3-psutil', fatal=True) + import psutil + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] +HAPROXY_RUN_DIR = '/var/run/haproxy/' +DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" + + +def ensure_packages(packages): + """Install but do not upgrade required plugin packages.""" + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + _missing.append(k) + + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level=INFO) + return False + + return True + + +class OSContextGenerator(object): + """Base class for all context generators.""" + interfaces = [] + related = False + complete = False + missing_data = [] + + def __call__(self): + raise NotImplementedError + + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), + level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, database=None, user=None, relation_prefix=None, + ssl_dir=None, relation_id=None): + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ + self.relation_prefix = relation_prefix + self.database = database + self.user = user + self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] + self.relation_id = relation_id + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) + raise OSContextError + + ctxt = {} + + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network( + access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return None # Defer any further hook execution for now.... + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.interfaces[0]) + + rel = (get_os_codename_install_source(config('openstack-origin')) or + 'icehouse') + for rid in rids: + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host + ctxt = { + 'database_host': host, + 'database': self.database, + 'database_user': self.user, + 'database_password': rdata.get(password_setting), + 'database_type': 'mysql+pymysql' + } + # Port is being introduced with LP Bug #1876188 + # but it not currently required and may not be set in all + # cases, particularly in classic charms. + port = rdata.get('db_port') + if port: + ctxt['database_port'] = port + if CompareOpenStackReleases(rel) < 'queens': + ctxt['database_type'] = 'mysql' + if self.context_complete(ctxt): + db_ssl(rdata, ctxt, self.ssl_dir) + return ctxt + return {} + + +class PostgresqlDBContext(OSContextGenerator): + interfaces = ['pgsql-db'] + + def __init__(self, database=None): + self.database = database + + def __call__(self): + self.database = self.database or config('database') + if self.database is None: + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.interfaces[0]): + self.related = True + for unit in related_units(rid): + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} + if self.context_complete(ctxt): + return ctxt + + return {} + + +def db_ssl(rdata, ctxt, ssl_dir): + if 'ssl_ca' in rdata and ssl_dir: + ca_path = os.path.join(ssl_dir, 'db-client.ca') + with open(ca_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_ca'])) + + ctxt['database_ssl_ca'] = ca_path + elif 'ssl_ca' in rdata: + log("Charm not setup for ssl support but ssl ca found", level=INFO) + return ctxt + + if 'ssl_cert' in rdata: + cert_path = os.path.join( + ssl_dir, 'db-client.cert') + if not os.path.exists(cert_path): + log("Waiting 1m for ssl client cert validity", level=INFO) + time.sleep(60) + + with open(cert_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_cert'])) + + ctxt['database_ssl_cert'] = cert_path + key_path = os.path.join(ssl_dir, 'db-client.key') + with open(key_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_key'])) + + ctxt['database_ssl_key'] = key_path + + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-service'): + self.service = service + self.service_user = service_user + self.rel_name = rel_name + self.interfaces = [self.rel_name] + + def _setup_pki_cache(self): + if self.service and self.service_user: + # This is required for pki token signing if we don't want /tmp to + # be used. + cachedir = '/var/cache/%s' % (self.service) + if not os.path.isdir(cachedir): + log("Creating service cache dir %s" % (cachedir), level=DEBUG) + mkdir(path=cachedir, owner=self.service_user, + group=self.service_user, perms=0o700) + + return cachedir + return None + + def _get_pkg_name(self, python_name='keystonemiddleware'): + """Get corresponding distro installed package for python + package name. + + :param python_name: nameof the python package + :type: string + """ + pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) + + for pkg in pkg_names: + if not filter_installed_packages((pkg,)): + return pkg + + return None + + def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): + """Build Jinja2 context for full rendering of [keystone_authtoken] + section with variable names included. Re-constructed from former + template 'section-keystone-auth-mitaka'. + + :param ctxt: Jinja2 context returned from self.__call__() + :type: dict + :param keystonemiddleware_os_rel: OpenStack release name of + keystonemiddleware package installed + """ + c = collections.OrderedDict((('auth_type', 'password'),)) + + # 'www_authenticate_uri' replaced 'auth_uri' since Stein, + # see keystonemiddleware upstream sources for more info + if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + else: + c.update(( + ('auth_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))), + ('project_domain_name', ctxt.get('admin_domain_name', '')), + ('user_domain_name', ctxt.get('admin_domain_name', '')), + ('project_name', ctxt.get('admin_tenant_name', '')), + ('username', ctxt.get('admin_user', '')), + ('password', ctxt.get('admin_password', '')), + ('signing_dir', ctxt.get('signing_dir', '')),)) + + return c + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + keystonemiddleware_os_release = None + if self._get_pkg_name(): + keystonemiddleware_os_release = os_release(self._get_pkg_name()) + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version}) + + if float(api_version) > 2: + ctxt.update({ + 'admin_domain_name': rdata.get('service_domain'), + 'service_project_id': rdata.get('service_tenant_id'), + 'service_domain_id': rdata.get('service_domain_id')}) + + # we keep all veriables in ctxt for compatibility and + # add nested dictionary for keystone_authtoken generic + # templating + if keystonemiddleware_os_release: + ctxt['keystone_authtoken'] = \ + self._get_keystone_authtoken_ctxt( + ctxt, keystonemiddleware_os_release) + + if self.context_complete(ctxt): + # NOTE(jamespage) this is required for >= icehouse + # so a missing value just indicates keystone needs + # upgrading + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') + return ctxt + + return {} + + +class IdentityCredentialsContext(IdentityServiceContext): + '''Context for identity-credentials interface type''' + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-credentials'): + super(IdentityCredentialsContext, self).__init__(service, + service_user, + rel_name) + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + credentials_host = rdata.get('credentials_host') + credentials_host = ( + format_ipv6_addr(credentials_host) or credentials_host + ) + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('credentials_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({ + 'service_port': rdata.get('credentials_port'), + 'service_host': credentials_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('credentials_project'), + 'admin_tenant_id': rdata.get('credentials_project_id'), + 'admin_user': rdata.get('credentials_username'), + 'admin_password': rdata.get('credentials_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version + }) + + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('domain')}) + + if self.context_complete(ctxt): + return ctxt + + return {} + + +class NovaVendorMetadataContext(OSContextGenerator): + """Context used for configuring nova vendor metadata on nova.conf file.""" + + def __init__(self, os_release_pkg, interfaces=None): + """Initialize the NovaVendorMetadataContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + :param interfaces: list of string values to be used as the Context's + relation interfaces. + :type interfaces: List[str] + """ + self.os_release_pkg = os_release_pkg + if interfaces is not None: + self.interfaces = interfaces + + def __call__(self): + cmp_os_release = CompareOpenStackReleases( + os_release(self.os_release_pkg)) + ctxt = {'vendor_data': False} + + vdata_providers = [] + vdata = config('vendor-data') + vdata_url = config('vendor-data-url') + + if vdata: + try: + # validate the JSON. If invalid, we do not set anything here + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data'] = True + # Mitaka does not support DynamicJSON + # so vendordata_providers is not needed + if cmp_os_release > 'mitaka': + vdata_providers.append('StaticJSON') + + if vdata_url: + if cmp_os_release > 'mitaka': + ctxt['vendor_data_url'] = vdata_url + vdata_providers.append('DynamicJSON') + else: + log('Dynamic vendor data unsupported' + ' for {}.'.format(cmp_os_release), level=ERROR) + if vdata_providers: + ctxt['vendordata_providers'] = ','.join(vdata_providers) + + return ctxt + + +class NovaVendorMetadataJSONContext(OSContextGenerator): + """Context used for writing nova vendor metadata json file.""" + + def __init__(self, os_release_pkg): + """Initialize the NovaVendorMetadataJSONContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + """ + self.os_release_pkg = os_release_pkg + + def __call__(self): + ctxt = {'vendor_data_json': '{}'} + + vdata = config('vendor-data') + if vdata: + try: + # validate the JSON. If invalid, we return empty. + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data_json'] = vdata + + return ctxt + + +class AMQPContext(OSContextGenerator): + + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, + relation_id=None): + self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] + self.relation_id = relation_id + + def __call__(self): + log('Generating template context for amqp', level=DEBUG) + conf = config() + if self.relation_prefix: + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + + try: + username = conf[user_setting] + vhost = conf[vhost_setting] + except KeyError as e: + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) + raise OSContextError + + ctxt = {} + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.rel_name) + for rid in rids: + ha_vip_only = False + self.related = True + transport_hosts = None + rabbitmq_port = '5672' + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip + transport_hosts = [vip] + else: + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + transport_hosts = [host] + + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) + if ssl_port: + ctxt['rabbit_ssl_port'] = ssl_port + rabbitmq_port = ssl_port + + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) + if ssl_ca: + ctxt['rabbit_ssl_ca'] = ssl_ca + + if relation_get('ha_queues', rid=rid, unit=unit) is not None: + ctxt['rabbitmq_ha_queues'] = True + + ha_vip_only = relation_get('ha-vip-only', + rid=rid, unit=unit) is not None + + if self.context_complete(ctxt): + if 'rabbit_ssl_ca' in ctxt: + if not self.ssl_dir: + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) + break + + ca_path = os.path.join( + self.ssl_dir, 'rabbit-client-ca.pem') + with open(ca_path, 'wb') as fh: + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) + ctxt['rabbit_ssl_ca'] = ca_path + + # Sufficient information found = break out! + break + + # Used for active/active rabbitmq >= grizzly + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): + rabbitmq_hosts = [] + for unit in related_units(rid): + host = relation_get('private-address', rid=rid, unit=unit) + if not relation_get('password', rid=rid, unit=unit): + log( + ("Skipping {} password not sent which indicates " + "unit is not ready.".format(host)), + level=DEBUG) + continue + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + rabbitmq_hosts = sorted(rabbitmq_hosts) + ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) + transport_hosts = rabbitmq_hosts + + if transport_hosts: + transport_url_hosts = ','.join([ + "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], + ctxt['rabbitmq_password'], + host_, + rabbitmq_port) + for host_ in transport_hosts]) + ctxt['transport_url'] = "rabbit://{}/{}".format( + transport_url_hosts, vhost) + + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) + if oslo_messaging_flags: + ctxt['oslo_messaging_flags'] = config_flags_parser( + oslo_messaging_flags) + + oslo_messaging_driver = conf.get( + 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) + if oslo_messaging_driver: + ctxt['oslo_messaging_driver'] = oslo_messaging_driver + + notification_format = conf.get('notification-format', None) + if notification_format: + ctxt['notification_format'] = notification_format + + notification_topics = conf.get('notification-topics', None) + if notification_topics: + ctxt['notification_topics'] = notification_topics + + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) + if send_notifications_to_logs: + ctxt['send_notifications_to_logs'] = send_notifications_to_logs + + if not self.complete: + return {} + + return ctxt + + +class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" + interfaces = ['ceph'] + + def __call__(self): + if not relation_ids('ceph'): + return {} + + log('Generating template context for ceph', level=DEBUG) + mon_hosts = [] + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } + for rid in relation_ids('ceph'): + for unit in related_units(rid): + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('rbd_features'): + default_features = relation_get('rbd-features', rid=rid, unit=unit) + if default_features is not None: + ctxt['rbd_features'] = default_features + + ceph_addrs = relation_get('ceph-public-address', rid=rid, + unit=unit) + if ceph_addrs: + for addr in ceph_addrs.split(' '): + mon_hosts.append(format_ipv6_addr(addr) or addr) + else: + priv_addr = relation_get('private-address', rid=rid, + unit=unit) + mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) + + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not self.context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + return ctxt + + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'auth' not in ctxt or 'key' not in ctxt: + return False + return super(CephContext, self).context_complete(ctxt) + + +class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + + :side effect: mkdir is called on HAPROXY_RUN_DIR + """ + interfaces = ['cluster'] + + def __init__(self, singlenode_mode=False, + address_types=ADDRESS_TYPES): + self.address_types = address_types + self.singlenode_mode = singlenode_mode + + def __call__(self): + if not os.path.isdir(HAPROXY_RUN_DIR): + mkdir(path=HAPROXY_RUN_DIR) + if not relation_ids('cluster') and not self.singlenode_mode: + return {} + + l_unit = local_unit().replace('/', '-') + cluster_hosts = collections.OrderedDict() + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in self.address_types: + cfg_opt = 'os-{}-network'.format(addr_type) + # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather + # than 'internal' + if addr_type == 'internal': + _addr_map_type = INTERNAL + else: + _addr_map_type = addr_type + # Network spaces aware + laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], + config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = { + 'network': "{}/{}".format(laddr, + netmask), + 'backends': collections.OrderedDict([(l_unit, + laddr)]) + } + for rid in relation_ids('cluster'): + for unit in sorted(related_units(rid)): + # API Charms will need to set {addr_type}-address with + # get_relation_ip(addr_type) + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) add backend based on get_relation_ip - this + # will either be the only backend or the fallback if no acls + # match in the frontend + # Network spaces aware + addr = get_relation_ip('cluster') + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = { + 'network': "{}/{}".format(addr, netmask), + 'backends': collections.OrderedDict([(l_unit, + addr)]) + } + for rid in relation_ids('cluster'): + for unit in sorted(related_units(rid)): + # API Charms will need to set their private-address with + # get_relation_ip('cluster') + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + + if config('haproxy-queue-timeout'): + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') + + if config('haproxy-connect-timeout'): + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') + + if config('prefer-ipv6'): + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + + ctxt['ipv6_enabled'] = not is_ipv6_disabled() + + ctxt['stat_port'] = '8888' + + db = kv() + ctxt['stat_password'] = db.get('stat-password') + if not ctxt['stat_password']: + ctxt['stat_password'] = db.set('stat-password', + pwgen(32)) + db.flush() + + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) + rids = relation_ids('image-service') + if not rids: + return {} + + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) + return {} + + +class ApacheSSLContext(OSContextGenerator): + """Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + user = group = 'root' + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] + check_call(cmd) + + def configure_cert(self, cn=None): + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cert and key: + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert), owner=self.user, + group=self.group, perms=0o640) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key), owner=self.user, + group=self.group, perms=0o640) + + def configure_ca(self): + ca_cert = get_ca_cert() + if ca_cert: + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and + hostnamr or vip (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, hostname_in_net_a), + (address_in_net_b, hostname_in_net_b), + ...] + + or, if no hostnames(s) available: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['config']) + # NOTE(jamespage): Fallback must always be private address + # as this is used to bind services on the + # local unit. + fallback = unit_get("private-address") + if net_config: + addr = get_address_in_network(net_config, + fallback) + else: + try: + addr = network_get_primary_address( + ADDRESS_MAP[net_type]['binding'] + ) + except (NotImplementedError, NoNetworkBinding): + addr = fallback + + endpoint = resolve_address(net_type) + addresses.append((addr, endpoint)) + + return sorted(set(addresses)) + + def __call__(self): + if isinstance(self.external_ports, six.string_types): + self.external_ports = [self.external_ports] + + if not self.external_ports or not https(): + return {} + + use_keystone_ca = True + for rid in relation_ids('certificates'): + if related_units(rid): + use_keystone_ca = False + + if use_keystone_ca: + self.configure_ca() + + self.enable_modules() + + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + if use_keystone_ca: + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in addresses: + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) + return ctxt + + +class NeutronContext(OSContextGenerator): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + for pkgs in self.packages: + ensure_packages(pkgs) + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def nuage_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nuage_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'vsp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nuage_ctxt + + def nvp_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nvp_ctxt + + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags + + return n1kv_ctxt + + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + + def neutron_ctxt(self): + if https(): + proto = 'https' + else: + proto = 'http' + + if is_clustered(): + host = config('vip') + else: + host = unit_get('private-address') + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} + return ctxt + + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + + def midonet_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + midonet_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + mido_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'midonet', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': midonet_config} + + return mido_ctxt + + def __call__(self): + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = self.neutron_ctxt() + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + elif self.plugin in ['nvp', 'nsx']: + ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) + elif self.plugin == 'vsp': + ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) + elif self.plugin == 'midonet': + ctxt.update(self.midonet_ctxt()) + + alchemy_flags = config('neutron-alchemy-flags') + if alchemy_flags: + flags = config_flags_parser(alchemy_flags) + ctxt['neutron_alchemy_flags'] = flags + + return ctxt + + +class NeutronPortContext(OSContextGenerator): + + def resolve_ports(self, ports): + """Resolve NICs not yet bound to bridge(s) + + If hwaddress provided then returns resolved hwaddress otherwise NIC. + """ + if not ports: + return None + + hwaddr_to_nic = {} + hwaddr_to_ip = {} + extant_nics = list_nics() + + for nic in extant_nics: + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + + hwaddr = get_nic_hwaddr(nic) + hwaddr_to_nic[hwaddr] = nic + addresses = get_ipv4_addr(nic, fatal=False) + addresses += get_ipv6_addr(iface=nic, fatal=False) + hwaddr_to_ip[hwaddr] = addresses + + resolved = [] + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) + for entry in ports: + if re.match(mac_regex, entry): + # NIC is in known NICs and does NOT hace an IP address + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: + # If the nic is part of a bridge then don't use it + if is_bridge_member(hwaddr_to_nic[entry]): + continue + + # Entry is a MAC address for a valid interface that doesn't + # have an IP address assigned yet. + resolved.append(hwaddr_to_nic[entry]) + elif entry in extant_nics: + # If the passed entry is not a MAC address and the interface + # exists, assume it's a valid interface, and that the user put + # it there on purpose (we can trust it to be the real external + # network). + resolved.append(entry) + + # Ensure no duplicates + return list(set(resolved)) + + +class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} + + return {self._template_flag: + config_flags_parser(config_flags)} + + +class LibvirtConfigFlagsContext(OSContextGenerator): + """ + This context provides support for extending + the libvirt section through user-defined flags. + """ + def __call__(self): + ctxt = {} + libvirt_flags = config('libvirt-flags') + if libvirt_flags: + ctxt['libvirt_flags'] = config_flags_parser( + libvirt_flags) + return ctxt + + +class SubordinateConfigContext(OSContextGenerator): + + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple services. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json:: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as:: + + ctxt = { + ... other context ... + 'subordinate_configuration': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + """ + + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.config_file = config_file + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] + + def __call__(self): + ctxt = {'sections': {}} + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except Exception: + log('Could not parse JSON from ' + 'subordinate_configuration setting from %s' + % rid, level=ERROR) + continue + + for service in self.services: + if service not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s service' + % (rid, service), level=INFO) + continue + + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s' + % (rid, self.config_file), level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + + +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + + return ctxt + + +class SyslogContext(OSContextGenerator): + + def __call__(self): + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +MAX_DEFAULT_WORKERS = 4 +DEFAULT_MULTIPLIER = 2 + + +def _calculate_workers(): + ''' + Determine the number of worker processes based on the CPU + count of the unit containing the application. + + Workers will be limited to MAX_DEFAULT_WORKERS in + container environments where no worker-multipler configuration + option been set. + + @returns int: number of worker processes to use + ''' + multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) + if multiplier > 0 and count == 0: + count = 1 + + if config('worker-multiplier') is None and is_container(): + # NOTE(jamespage): Limit unconfigured worker-multiplier + # to MAX_DEFAULT_WORKERS to avoid insane + # worker configuration in LXD containers + # on large servers + # Reference: https://pad.lv/1665270 + count = min(count, MAX_DEFAULT_WORKERS) + + return count + + +def _num_cpus(): + ''' + Compatibility wrapper for calculating the number of CPU's + a unit has. + + @returns: int: number of CPU cores detected + ''' + try: + return psutil.cpu_count() + except AttributeError: + return psutil.NUM_CPUS + + +class WorkerConfigContext(OSContextGenerator): + + def __call__(self): + ctxt = {"workers": _calculate_workers()} + return ctxt + + +class WSGIWorkerConfigContext(WorkerConfigContext): + + def __init__(self, name=None, script=None, admin_script=None, + public_script=None, user=None, group=None, + process_weight=1.00, + admin_process_weight=0.25, public_process_weight=0.75): + self.service_name = name + self.user = user or name + self.group = group or name + self.script = script + self.admin_script = admin_script + self.public_script = public_script + self.process_weight = process_weight + self.admin_process_weight = admin_process_weight + self.public_process_weight = public_process_weight + + def __call__(self): + total_processes = _calculate_workers() + ctxt = { + "service_name": self.service_name, + "user": self.user, + "group": self.group, + "script": self.script, + "admin_script": self.admin_script, + "public_script": self.public_script, + "processes": int(math.ceil(self.process_weight * total_processes)), + "admin_processes": int(math.ceil(self.admin_process_weight * + total_processes)), + "public_processes": int(math.ceil(self.public_process_weight * + total_processes)), + "threads": 1, + } + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + + return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} + + +class NeutronAPIContext(OSContextGenerator): + ''' + Inspects current neutron-plugin-api relation for neutron settings. Return + defaults if it is not present. + ''' + interfaces = ['neutron-plugin-api'] + + def __call__(self): + self.neutron_defaults = { + 'l2_population': { + 'rel_key': 'l2-population', + 'default': False, + }, + 'overlay_network_type': { + 'rel_key': 'overlay-network-type', + 'default': 'gre', + }, + 'neutron_security_groups': { + 'rel_key': 'neutron-security-groups', + 'default': False, + }, + 'network_device_mtu': { + 'rel_key': 'network-device-mtu', + 'default': None, + }, + 'enable_dvr': { + 'rel_key': 'enable-dvr', + 'default': False, + }, + 'enable_l3ha': { + 'rel_key': 'enable-l3ha', + 'default': False, + }, + 'dns_domain': { + 'rel_key': 'dns-domain', + 'default': None, + }, + 'polling_interval': { + 'rel_key': 'polling-interval', + 'default': 2, + }, + 'rpc_response_timeout': { + 'rel_key': 'rpc-response-timeout', + 'default': 60, + }, + 'report_interval': { + 'rel_key': 'report-interval', + 'default': 30, + }, + 'enable_qos': { + 'rel_key': 'enable-qos', + 'default': False, + }, + 'enable_nsg_logging': { + 'rel_key': 'enable-nsg-logging', + 'default': False, + }, + 'enable_nfg_logging': { + 'rel_key': 'enable-nfg-logging', + 'default': False, + }, + 'enable_port_forwarding': { + 'rel_key': 'enable-port-forwarding', + 'default': False, + }, + 'global_physnet_mtu': { + 'rel_key': 'global-physnet-mtu', + 'default': 1500, + }, + 'physical_network_mtus': { + 'rel_key': 'physical-network-mtus', + 'default': None, + }, + } + ctxt = self.get_neutron_options({}) + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + # The l2-population key is used by the context as a way of + # checking if the api service on the other end is sending data + # in a recent format. + if 'l2-population' in rdata: + ctxt.update(self.get_neutron_options(rdata)) + + extension_drivers = [] + + if ctxt['enable_qos']: + extension_drivers.append('qos') + + if ctxt['enable_nsg_logging']: + extension_drivers.append('log') + + ctxt['extension_drivers'] = ','.join(extension_drivers) + + l3_extension_plugins = [] + + if ctxt['enable_port_forwarding']: + l3_extension_plugins.append('port_forwarding') + + ctxt['l3_extension_plugins'] = l3_extension_plugins + + return ctxt + + def get_neutron_options(self, rdata): + settings = {} + for nkey in self.neutron_defaults.keys(): + defv = self.neutron_defaults[nkey]['default'] + rkey = self.neutron_defaults[nkey]['rel_key'] + if rkey in rdata.keys(): + if type(defv) is bool: + settings[nkey] = bool_from_string(rdata[rkey]) + else: + settings[nkey] = rdata[rkey] + else: + settings[nkey] = defv + return settings + + +class ExternalPortContext(NeutronPortContext): + + def __call__(self): + ctxt = {} + ports = config('ext-port') + if ports: + ports = [p.strip() for p in ports.split()] + ports = self.resolve_ports(ports) + if ports: + ctxt = {"ext_port": ports[0]} + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt['ext_port_mtu'] = mtu + + return ctxt + + +class DataPortContext(NeutronPortContext): + + def __call__(self): + ports = config('data-port') + if ports: + # Map of {bridge:port/mac} + portmap = parse_data_port_mappings(ports) + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. + resolved = self.resolve_ports(ports) + # Rebuild port index using resolved and filtered ports. + normalized = {get_nic_hwaddr(port): port for port in resolved + if port not in ports} + normalized.update({port: port for port in resolved + if port in ports}) + if resolved: + return {normalized[port]: bridge for port, bridge in + six.iteritems(portmap) if port in normalized.keys()} + + return None + + +class PhyNICMTUContext(DataPortContext): + + def __call__(self): + ctxt = {} + mappings = super(PhyNICMTUContext, self).__call__() + if mappings and mappings.keys(): + ports = sorted(mappings.keys()) + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + all_ports = set() + # If any of ports is a vlan device, its underlying device must have + # mtu applied first. + for port in ports: + for lport in glob.glob("/sys/class/net/%s/lower_*" % port): + lport = os.path.basename(lport) + all_ports.add(lport.split('_')[1]) + + all_ports = list(all_ports) + all_ports.extend(ports) + if mtu: + ctxt["devs"] = '\\n'.join(all_ports) + ctxt['mtu'] = mtu + + return ctxt + + +class NetworkServiceContext(OSContextGenerator): + + def __init__(self, rel_name='quantum-network-service'): + self.rel_name = rel_name + self.interfaces = [rel_name] + + def __call__(self): + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + ctxt = { + 'keystone_host': rdata.get('keystone_host'), + 'service_port': rdata.get('service_port'), + 'auth_port': rdata.get('auth_port'), + 'service_tenant': rdata.get('service_tenant'), + 'service_username': rdata.get('service_username'), + 'service_password': rdata.get('service_password'), + 'quantum_host': rdata.get('quantum_host'), + 'quantum_port': rdata.get('quantum_port'), + 'quantum_url': rdata.get('quantum_url'), + 'region': rdata.get('region'), + 'service_protocol': + rdata.get('service_protocol') or 'http', + 'auth_protocol': + rdata.get('auth_protocol') or 'http', + 'api_version': + rdata.get('api_version') or '2.0', + } + if self.context_complete(ctxt): + return ctxt + return {} + + +class InternalEndpointContext(OSContextGenerator): + """Internal endpoint context. + + This context provides the endpoint type used for communication between + services e.g. between Nova and Cinder internally. Openstack uses Public + endpoints by default so this allows admins to optionally use internal + endpoints. + """ + def __call__(self): + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class VolumeAPIContext(InternalEndpointContext): + """Volume API context. + + This context provides information regarding the volume endpoint to use + when communicating between services. It determines which version of the + API is appropriate for use. + + This value will be determined in the resulting context dictionary + returned from calling the VolumeAPIContext object. Information provided + by this context is as follows: + + volume_api_version: the volume api version to use, currently + 'v2' or 'v3' + volume_catalog_info: the information to use for a cinder client + configuration that consumes API endpoints from the keystone + catalog. This is defined as the type:name:endpoint_type string. + """ + # FIXME(wolsen) This implementation is based on the provider being able + # to specify the package version to check but does not guarantee that the + # volume service api version selected is available. In practice, it is + # quite likely the volume service *is* providing the v3 volume service. + # This should be resolved when the service-discovery spec is implemented. + def __init__(self, pkg): + """ + Creates a new VolumeAPIContext for use in determining which version + of the Volume API should be used for communication. A package codename + should be supplied for determining the currently installed OpenStack + version. + + :param pkg: the package codename to use in order to determine the + component version (e.g. nova-common). See + charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. + """ + super(VolumeAPIContext, self).__init__() + self._ctxt = None + if not pkg: + raise ValueError('package name must be provided in order to ' + 'determine current OpenStack version.') + self.pkg = pkg + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """Determines the Volume API endpoint information. + + Determines the appropriate version of the API that should be used + as well as the catalog_info string that would be supplied. Returns + a dict containing the volume_api_version and the volume_catalog_info. + """ + rel = os_release(self.pkg) + version = '2' + if CompareOpenStackReleases(rel) >= 'pike': + version = '3' + + service_type = 'volumev{version}'.format(version=version) + service_name = 'cinderv{version}'.format(version=version) + endpoint_type = 'publicURL' + if config('use-internal-endpoints'): + endpoint_type = 'internalURL' + catalog_info = '{type}:{name}:{endpoint}'.format( + type=service_type, name=service_name, endpoint=endpoint_type) + + return { + 'volume_api_version': version, + 'volume_catalog_info': catalog_info, + } + + def __call__(self): + return self.ctxt + + +class AppArmorContext(OSContextGenerator): + """Base class for apparmor contexts.""" + + def __init__(self, profile_name=None): + self._ctxt = None + self.aa_profile = profile_name + self.aa_utils_packages = ['apparmor-utils'] + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """ + Validate aa-profile-mode settings is disable, enforce, or complain. + + :return ctxt: Dictionary of the apparmor profile or None + """ + if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: + ctxt = {'aa_profile_mode': config('aa-profile-mode'), + 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} + if self.aa_profile: + ctxt['aa_profile'] = self.aa_profile + else: + ctxt = None + return ctxt + + def __call__(self): + return self.ctxt + + def install_aa_utils(self): + """ + Install packages required for apparmor configuration. + """ + log("Installing apparmor utils.") + ensure_packages(self.aa_utils_packages) + + def manually_disable_aa_profile(self): + """ + Manually disable an apparmor profile. + + If aa-profile-mode is set to disabled (default) this is required as the + template has been written but apparmor is yet unaware of the profile + and aa-disable aa-profile fails. Without this the profile would kick + into enforce mode on the next service restart. + + """ + profile_path = '/etc/apparmor.d' + disable_path = '/etc/apparmor.d/disable' + if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): + os.symlink(os.path.join(profile_path, self.aa_profile), + os.path.join(disable_path, self.aa_profile)) + + def setup_aa_profile(self): + """ + Setup an apparmor profile. + The ctxt dictionary will contain the apparmor profile mode and + the apparmor profile name. + Makes calls out to aa-disable, aa-complain, or aa-enforce to setup + the apparmor profile. + """ + self() + if not self.ctxt: + log("Not enabling apparmor Profile") + return + self.install_aa_utils() + cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] + cmd.append(self.ctxt['aa_profile']) + log("Setting up the apparmor profile for {} in {} mode." + "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) + try: + check_call(cmd) + except CalledProcessError as e: + # If aa-profile-mode is set to disabled (default) manual + # disabling is required as the template has been written but + # apparmor is yet unaware of the profile and aa-disable aa-profile + # fails. If aa-disable learns to read profile files first this can + # be removed. + if self.ctxt['aa_profile_mode'] == 'disable': + log("Manually disabling the apparmor profile for {}." + "".format(self.ctxt['aa_profile'])) + self.manually_disable_aa_profile() + return + status_set('blocked', "Apparmor profile {} failed to be set to {}." + "".format(self.ctxt['aa_profile'], + self.ctxt['aa_profile_mode'])) + raise e + + +class MemcacheContext(OSContextGenerator): + """Memcache context + + This context provides options for configuring a local memcache client and + server for both IPv4 and IPv6 + """ + + def __init__(self, package=None): + """ + @param package: Package to examine to extrapolate OpenStack release. + Used when charms have no openstack-origin config + option (ie subordinates) + """ + self.package = package + + def __call__(self): + ctxt = {} + ctxt['use_memcache'] = enable_memcache(package=self.package) + if ctxt['use_memcache']: + # Trusty version of memcached does not support ::1 as a listen + # address so use host file entry instead + release = lsb_release()['DISTRIB_CODENAME'].lower() + if is_ipv6_disabled(): + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '127.0.0.1' + else: + ctxt['memcache_server'] = 'localhost' + ctxt['memcache_server_formatted'] = '127.0.0.1' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = '{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + else: + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + return ctxt + + +class EnsureDirContext(OSContextGenerator): + ''' + Serves as a generic context to create a directory as a side-effect. + + Useful for software that supports drop-in files (.d) in conjunction + with config option-based templates. Examples include: + * OpenStack oslo.policy drop-in files; + * systemd drop-in config files; + * other software that supports overriding defaults with .d files + + Another use-case is when a subordinate generates a configuration for + primary to render in a separate directory. + + Some software requires a user to create a target directory to be + scanned for drop-in files with a specific format. This is why this + context is needed to do that before rendering a template. + ''' + + def __init__(self, dirname, **kwargs): + '''Used merely to ensure that a given directory exists.''' + self.dirname = dirname + self.kwargs = kwargs + + def __call__(self): + mkdir(self.dirname, **self.kwargs) + return {} + + +class VersionsContext(OSContextGenerator): + """Context to return the openstack and operating system versions. + + """ + def __init__(self, pkg='python-keystone'): + """Initialise context. + + :param pkg: Package to extrapolate openstack version from. + :type pkg: str + """ + self.pkg = pkg + + def __call__(self): + ostack = os_release(self.pkg) + osystem = lsb_release()['DISTRIB_CODENAME'].lower() + return { + 'openstack_release': ostack, + 'operating_system_release': osystem} + + +class LogrotateContext(OSContextGenerator): + """Common context generator for logrotate.""" + + def __init__(self, location, interval, count): + """ + :param location: Absolute path for the logrotate config file + :type location: str + :param interval: The interval for the rotations. Valid values are + 'daily', 'weekly', 'monthly', 'yearly' + :type interval: str + :param count: The logrotate count option configures the 'count' times + the log files are being rotated before being + :type count: int + """ + self.location = location + self.interval = interval + self.count = 'rotate {}'.format(count) + + def __call__(self): + ctxt = { + 'logrotate_logs_location': self.location, + 'logrotate_interval': self.interval, + 'logrotate_count': self.count, + } + return ctxt + + +class HostInfoContext(OSContextGenerator): + """Context to provide host information.""" + + def __init__(self, use_fqdn_hint_cb=None): + """Initialize HostInfoContext + + :param use_fqdn_hint_cb: Callback whose return value used to populate + `use_fqdn_hint` + :type use_fqdn_hint_cb: Callable[[], bool] + """ + # Store callback used to get hint for whether FQDN should be used + + # Depending on the workload a charm manages, the use of FQDN vs. + # shortname may be a deploy-time decision, i.e. behaviour can not + # change on charm upgrade or post-deployment configuration change. + + # The hint is passed on as a flag in the context to allow the decision + # to be made in the Jinja2 configuration template. + self.use_fqdn_hint_cb = use_fqdn_hint_cb + + def _get_canonical_name(self, name=None): + """Get the official FQDN of the host + + The implementation of ``socket.getfqdn()`` in the standard Python + library does not exhaust all methods of getting the official name + of a host ref Python issue https://bugs.python.org/issue5004 + + This function mimics the behaviour of a call to ``hostname -f`` to + get the official FQDN but returns an empty string if it is + unsuccessful. + + :param name: Shortname to get FQDN on + :type name: Optional[str] + :returns: The official FQDN for host or empty string ('') + :rtype: str + """ + name = name or socket.gethostname() + fqdn = '' + + if six.PY2: + exc = socket.error + else: + exc = OSError + + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) + except exc: + pass + else: + for addr in addrs: + if addr[3]: + if '.' in addr[3]: + fqdn = addr[3] + break + return fqdn + + def __call__(self): + name = socket.gethostname() + ctxt = { + 'host_fqdn': self._get_canonical_name(name) or name, + 'host': name, + 'use_fqdn_hint': ( + self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) + } + return ctxt + + +def validate_ovs_use_veth(*args, **kwargs): + """Validate OVS use veth setting for dhcp agents + + The ovs_use_veth setting is considered immutable as it will break existing + deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It + turns out this is no longer necessary. Ideally, all new deployments would + have this set to False. + + This function validates that the config value does not conflict with + previously deployed settings in dhcp_agent.ini. + + See LP Bug#1831935 for details. + + :returns: Status state and message + :rtype: Union[(None, None), (string, string)] + """ + existing_ovs_use_veth = ( + DHCPAgentContext.get_existing_ovs_use_veth()) + config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() + + # Check settings are set and not None + if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: + # Check for mismatch between existing config ini and juju config + if existing_ovs_use_veth != config_ovs_use_veth: + # Stop the line to avoid breakage + msg = ( + "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " + "does not match the juju config setting, {}. This may lead to " + "VMs being unable to receive a DHCP IP. Either change the " + "juju config setting or dhcp agents may need to be recreated." + .format(existing_ovs_use_veth, config_ovs_use_veth)) + log(msg, ERROR) + return ( + "blocked", + "Mismatched existing and configured ovs-use-veth. See log.") + + # Everything is OK + return None, None + + +class DHCPAgentContext(OSContextGenerator): + + def __call__(self): + """Return the DHCPAGentContext. + + Return all DHCP Agent INI related configuration. + ovs unit is attached to (as a subordinate) and the 'dns_domain' from + the neutron-plugin-api relations (if one is set). + + :returns: Dictionary context + :rtype: Dict + """ + + ctxt = {} + dnsmasq_flags = config('dnsmasq-flags') + if dnsmasq_flags: + ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) + ctxt['dns_servers'] = config('dns-servers') + + neutron_api_settings = NeutronAPIContext()() + + ctxt['debug'] = config('debug') + ctxt['instance_mtu'] = config('instance-mtu') + ctxt['ovs_use_veth'] = self.get_ovs_use_veth() + + ctxt['enable_metadata_network'] = config('enable-metadata-network') + ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') + + if neutron_api_settings.get('dns_domain'): + ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') + + # Override user supplied config for these plugins as these settings are + # mandatory + if config('plugin') in ['nvp', 'nsx', 'n1kv']: + ctxt['enable_metadata_network'] = True + ctxt['enable_isolated_metadata'] = True + + return ctxt + + @staticmethod + def get_existing_ovs_use_veth(): + """Return existing ovs_use_veth setting from dhcp_agent.ini. + + :returns: Boolean value of existing ovs_use_veth setting or None + :rtype: Optional[Bool] + """ + DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" + existing_ovs_use_veth = None + # If there is a dhcp_agent.ini file read the current setting + if os.path.isfile(DHCP_AGENT_INI): + # config_ini does the right thing and returns None if the setting is + # commented. + existing_ovs_use_veth = ( + config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) + # Convert to Bool if necessary + if isinstance(existing_ovs_use_veth, six.string_types): + return bool_from_string(existing_ovs_use_veth) + return existing_ovs_use_veth + + @staticmethod + def parse_ovs_use_veth(): + """Parse the ovs-use-veth config setting. + + Parse the string config setting for ovs-use-veth and return a boolean + or None. + + bool_from_string will raise a ValueError if the string is not falsy or + truthy. + + :raises: ValueError for invalid input + :returns: Boolean value of ovs-use-veth or None + :rtype: Optional[Bool] + """ + _config = config("ovs-use-veth") + # An unset parameter returns None. Just in case we will also check for + # an empty string: "". Ironically, (the problem we are trying to avoid) + # "False" returns True and "" returns False. + if _config is None or not _config: + # Return None + return + # bool_from_string handles many variations of true and false strings + # as well as upper and lowercases including: + # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] + return bool_from_string(_config) + + def get_ovs_use_veth(self): + """Return correct ovs_use_veth setting for use in dhcp_agent.ini. + + Get the right value from config or existing dhcp_agent.ini file. + Existing has precedence. Attempt to default to "False" without + disrupting existing deployments. Handle existing deployments and + upgrades safely. See LP Bug#1831935 + + :returns: Value to use for ovs_use_veth setting + :rtype: Bool + """ + _existing = self.get_existing_ovs_use_veth() + if _existing is not None: + return _existing + + _config = self.parse_ovs_use_veth() + if _config is None: + # New better default + return False + else: + return _config + + +EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) + + +def resolve_pci_from_mapping_config(config_key): + """Resolve local PCI devices from MAC addresses in mapping config. + + Note that this function keeps record of mac->PCI address lookups + in the local unit db as the devices will disappaear from the system + once bound. + + :param config_key: Configuration option key to parse data from + :type config_key: str + :returns: PCI device address to Tuple(entity, mac) map + :rtype: collections.OrderedDict[str,Tuple[str,str]] + """ + devices = pci.PCINetDevices() + resolved_devices = collections.OrderedDict() + db = kv() + # Note that ``parse_data_port_mappings`` returns Dict regardless of input + for mac, entity in parse_data_port_mappings(config(config_key)).items(): + pcidev = devices.get_device_from_mac(mac) + if pcidev: + # NOTE: store mac->pci allocation as post binding + # it disappears from PCIDevices. + db.set(mac, pcidev.pci_address) + db.flush() + + pci_address = db.get(mac) + if pci_address: + resolved_devices[pci_address] = EntityMac(entity, mac) + + return resolved_devices + + +class DPDKDeviceContext(OSContextGenerator): + + def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): + """Initialize DPDKDeviceContext. + + :param driver_key: Key to use when retrieving driver config. + :type driver_key: str + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.driver_key = driver_key or 'dpdk-driver' + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] + """ + driver = config(self.driver_key) + if driver is None: + return {} + # Resolve PCI devices for both directly used devices (_bridges) + # and devices for use in dpdk bonds (_bonds) + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return {'devices': pci_devices, + 'driver': driver} + + +class OVSDPDKDeviceContext(OSContextGenerator): + + def __init__(self, bridges_key=None, bonds_key=None): + """Initialize OVSDPDKDeviceContext. + + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + @staticmethod + def _parse_cpu_list(cpulist): + """Parses a linux cpulist for a numa node + + :returns: list of cores + :rtype: List[int] + """ + cores = [] + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + else: + cores.append(int(cpu_range)) + return cores + + def _numa_node_cores(self): + """Get map of numa node -> cpu core + + :returns: map of numa node -> cpu core + :rtype: Dict[str,List[int]] + """ + nodes = {} + node_regex = '/sys/devices/system/node/node*' + for node in glob.glob(node_regex): + index = node.lstrip('/sys/devices/system/node/node') + with open(os.path.join(node, 'cpulist')) as cpulist: + nodes[index] = self._parse_cpu_list(cpulist.read().strip()) + return nodes + + def cpu_mask(self): + """Get hex formatted CPU mask + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit. + :returns: hex formatted CPU mask + :rtype: str + """ + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') + + def socket_memory(self): + """Formatted list of socket memory configuration per NUMA node + + :returns: socket memory configuration per NUMA node + :rtype: str + """ + sm_size = config('dpdk-socket-memory') + node_regex = '/sys/devices/system/node/node*' + mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + if mem_list: + return ','.join(mem_list) + else: + return str(sm_size) + + def devices(self): + """List of PCI devices for use by DPDK + + :returns: List of PCI devices for use by DPDK + :rtype: collections.OrderedDict[str,str] + """ + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return pci_devices + + def _formatted_whitelist(self, flag): + """Flag formatted list of devices to whitelist + + :param flag: flag format to use + :type flag: str + :rtype: str + """ + whitelist = [] + for device in self.devices(): + whitelist.append(flag.format(device=device)) + return ' '.join(whitelist) + + def device_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the old style '-w' flag + + :returns: devices to whitelist prefixed by '-w ' + :rtype: str + """ + return self._formatted_whitelist('-w {device}') + + def pci_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the new style '--pci-whitelist' flag + + :returns: devices to whitelist prefixed by '--pci-whitelist ' + :rtype: str + """ + return self._formatted_whitelist('--pci-whitelist {device}') + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[bool,str]] + """ + ctxt = {} + whitelist = self.device_whitelist() + if whitelist: + ctxt['dpdk_enabled'] = config('enable-dpdk') + ctxt['device_whitelist'] = self.device_whitelist() + ctxt['socket_memory'] = self.socket_memory() + ctxt['cpu_mask'] = self.cpu_mask() + return ctxt + + +class BridgePortInterfaceMap(object): + """Build a map of bridge ports and interaces from charm configuration. + + NOTE: the handling of this detail in the charm is pre-deprecated. + + The long term goal is for network connectivity detail to be modelled in + the server provisioning layer (such as MAAS) which in turn will provide + a Netplan YAML description that will be used to drive Open vSwitch. + + Until we get to that reality the charm will need to configure this + detail based on application level configuration options. + + There is a established way of mapping interfaces to ports and bridges + in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we + will carry that forward. + + The relationship between bridge, port and interface(s). + +--------+ + | bridge | + +--------+ + | + +----------------+ + | port aka. bond | + +----------------+ + | | + +-+ +-+ + |i| |i| + |n| |n| + |t| |t| + |0| |N| + +-+ +-+ + """ + class interface_type(enum.Enum): + """Supported interface types. + + Supported interface types can be found in the ``iface_types`` column + in the ``Open_vSwitch`` table on a running system. + """ + dpdk = 'dpdk' + internal = 'internal' + system = 'system' + + def __str__(self): + """Return string representation of value. + + :returns: string representation of value. + :rtype: str + """ + return self.value + + def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, + global_mtu=None): + """Initialize map. + + :param bridges_key: Name of bridge:interface/port map config key + (default: 'data-port') + :type bridges_key: Optional[str] + :param bonds_key: Name of port-name:interface map config key + (default: 'dpdk-bond-mappings') + :type bonds_key: Optional[str] + :param enable_dpdk_key: Name of DPDK toggle config key + (default: 'enable-dpdk') + :type enable_dpdk_key: Optional[str] + :param global_mtu: Set a MTU on all interfaces at map initialization. + + The default is to have Open vSwitch get this from the underlying + interface as set up by bare metal provisioning. + + Note that you can augment the MTU on an individual interface basis + like this: + + ifdatamap = bpi.get_ifdatamap(bridge, port) + ifdatamap = { + port: { + **ifdata, + **{'mtu-request': my_individual_mtu_map[port]}, + } + for port, ifdata in ifdatamap.items() + } + :type global_mtu: Optional[int] + """ + bridges_key = bridges_key or 'data-port' + bonds_key = bonds_key or 'dpdk-bond-mappings' + enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' + self._map = collections.defaultdict( + lambda: collections.defaultdict(dict)) + self._ifname_mac_map = collections.defaultdict(list) + self._mac_ifname_map = {} + self._mac_pci_address_map = {} + + # First we iterate over the list of physical interfaces visible to the + # system and update interface name to mac and mac to interface name map + for ifname in list_nics(): + if not is_phy_iface(ifname): + continue + mac = get_nic_hwaddr(ifname) + self._ifname_mac_map[ifname] = [mac] + self._mac_ifname_map[mac] = ifname + + # check if interface is part of a linux bond + _bond_name = get_bond_master(ifname) + if _bond_name and _bond_name != ifname: + log('Add linux bond "{}" to map for physical interface "{}" ' + 'with mac "{}".'.format(_bond_name, ifname, mac), + level=DEBUG) + # for bonds we want to be able to get a list of the mac + # addresses for the physical interfaces the bond is made up of. + if self._ifname_mac_map.get(_bond_name): + self._ifname_mac_map[_bond_name].append(mac) + else: + self._ifname_mac_map[_bond_name] = [mac] + + # In light of the pre-deprecation notice in the docstring of this + # class we will expose the ability to configure OVS bonds as a + # DPDK-only feature, but generally use the data structures internally. + if config(enable_dpdk_key): + # resolve PCI address of interfaces listed in the bridges and bonds + # charm configuration options. Note that for already bound + # interfaces the helper will retrieve MAC address from the unit + # KV store as the information is no longer available in sysfs. + _pci_bridge_mac = resolve_pci_from_mapping_config( + bridges_key) + _pci_bond_mac = resolve_pci_from_mapping_config( + bonds_key) + + for pci_address, bridge_mac in _pci_bridge_mac.items(): + if bridge_mac.mac in self._mac_ifname_map: + # if we already have the interface name in our map it is + # visible to the system and therefore not bound to DPDK + continue + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[ifname] = [bridge_mac.mac] + self._mac_ifname_map[bridge_mac.mac] = ifname + self._mac_pci_address_map[bridge_mac.mac] = pci_address + + for pci_address, bond_mac in _pci_bond_mac.items(): + # for bonds we want to be able to get a list of macs from + # the bond name and also get at the interface name made up + # of the hash of the PCI address + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) + self._mac_ifname_map[bond_mac.mac] = ifname + self._mac_pci_address_map[bond_mac.mac] = pci_address + + config_bridges = config(bridges_key) or '' + for bridge, ifname_or_mac in ( + pair.split(':', 1) + for pair in config_bridges.split()): + if ':' in ifname_or_mac: + try: + ifname = self.ifname_from_mac(ifname_or_mac) + except KeyError: + # The interface is destined for a different unit in the + # deployment. + continue + macs = [ifname_or_mac] + else: + ifname = ifname_or_mac + macs = self.macs_from_ifname(ifname_or_mac) + + portname = ifname + for mac in macs: + try: + pci_address = self.pci_address_from_mac(mac) + iftype = self.interface_type.dpdk + ifname = self.ifname_from_mac(mac) + except KeyError: + pci_address = None + iftype = self.interface_type.system + + self.add_interface( + bridge, portname, ifname, iftype, pci_address, global_mtu) + + if not macs: + # We have not mapped the interface and it is probably some sort + # of virtual interface. Our user have put it in the config with + # a purpose so let's carry out their wish. LP: #1884743 + log('Add unmapped interface from config: name "{}" bridge "{}"' + .format(ifname, bridge), + level=DEBUG) + self.add_interface( + bridge, ifname, ifname, self.interface_type.system, None, + global_mtu) + + def __getitem__(self, key): + """Provide a Dict-like interface, get value of item. + + :param key: Key to look up value from. + :type key: any + :returns: Value + :rtype: any + """ + return self._map.__getitem__(key) + + def __iter__(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.__iter__() + + def __len__(self): + """Provide a Dict-like interface, measure the length of internal map. + + :returns: Length + :rtype: int + """ + return len(self._map) + + def items(self): + """Provide a Dict-like interface, iterate over items. + + :returns: Key Value pairs + :rtype: Iterator[any, any] + """ + return self._map.items() + + def keys(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.keys() + + def ifname_from_mac(self, mac): + """ + :returns: Name of interface + :rtype: str + :raises: KeyError + """ + return (get_bond_master(self._mac_ifname_map[mac]) or + self._mac_ifname_map[mac]) + + def macs_from_ifname(self, ifname): + """ + :returns: List of hardware address (MAC) of interface + :rtype: List[str] + :raises: KeyError + """ + return self._ifname_mac_map[ifname] + + def pci_address_from_mac(self, mac): + """ + :param mac: Hardware address (MAC) of interface + :type mac: str + :returns: PCI address of device associated with mac + :rtype: str + :raises: KeyError + """ + return self._mac_pci_address_map[mac] + + def add_interface(self, bridge, port, ifname, iftype, + pci_address, mtu_request): + """Add an interface to the map. + + :param bridge: Name of bridge on which the bond will be added + :type bridge: str + :param port: Name of port which will represent the bond on bridge + :type port: str + :param ifname: Name of interface that will make up the bonded port + :type ifname: str + :param iftype: Type of interface + :type iftype: BridgeBondMap.interface_type + :param pci_address: PCI address of interface + :type pci_address: Optional[str] + :param mtu_request: MTU to request for interface + :type mtu_request: Optional[int] + """ + self._map[bridge][port][ifname] = { + 'type': str(iftype), + } + if pci_address: + self._map[bridge][port][ifname].update({ + 'pci-address': pci_address, + }) + if mtu_request is not None: + self._map[bridge][port][ifname].update({ + 'mtu-request': str(mtu_request) + }) + + def get_ifdatamap(self, bridge, port): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bridge: Name of bridge on which the port will be added + :type bridge: str + :param port: Name of port which will represent one or more interfaces + :type port: str + """ + for _bridge, _ports in self.items(): + for _port, _interfaces in _ports.items(): + if _bridge == bridge and _port == port: + ifdatamap = {} + for name, data in _interfaces.items(): + ifdatamap.update({ + name: { + 'type': data['type'], + }, + }) + if data.get('mtu-request') is not None: + ifdatamap[name].update({ + 'mtu_request': data['mtu-request'], + }) + if data.get('pci-address'): + ifdatamap[name].update({ + 'options': { + 'dpdk-devargs': data['pci-address'], + }, + }) + return ifdatamap + + +class BondConfig(object): + """Container and helpers for bond configuration options. + + Data is put into a dictionary and a convenient config get interface is + provided. + """ + + DEFAULT_LACP_CONFIG = { + 'mode': 'balance-tcp', + 'lacp': 'active', + 'lacp-time': 'fast' + } + ALL_BONDS = 'ALL_BONDS' + + BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] + BOND_LACP = ['active', 'passive', 'off'] + BOND_LACP_TIME = ['fast', 'slow'] + + def __init__(self, config_key=None): + """Parse specified configuration option. + + :param config_key: Configuration key to retrieve data from + (default: ``dpdk-bond-config``) + :type config_key: Optional[str] + """ + self.config_key = config_key or 'dpdk-bond-config' + + self.lacp_config = { + self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) + } + + lacp_config = config(self.config_key) + if lacp_config: + lacp_config_map = lacp_config.split() + for entry in lacp_config_map: + bond, entry = entry.partition(':')[0:3:2] + if not bond: + bond = self.ALL_BONDS + + mode, entry = entry.partition(':')[0:3:2] + if not mode: + mode = self.DEFAULT_LACP_CONFIG['mode'] + assert mode in self.BOND_MODES, \ + "Bond mode {} is invalid".format(mode) + + lacp, entry = entry.partition(':')[0:3:2] + if not lacp: + lacp = self.DEFAULT_LACP_CONFIG['lacp'] + assert lacp in self.BOND_LACP, \ + "Bond lacp {} is invalid".format(lacp) + + lacp_time, entry = entry.partition(':')[0:3:2] + if not lacp_time: + lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] + assert lacp_time in self.BOND_LACP_TIME, \ + "Bond lacp-time {} is invalid".format(lacp_time) + + self.lacp_config[bond] = { + 'mode': mode, + 'lacp': lacp, + 'lacp-time': lacp_time + } + + def get_bond_config(self, bond): + """Get the LACP configuration for a bond + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Dict[str,str]] + """ + return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) + + def get_ovs_portdata(self, bond): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Union[str,Dict[str,str]]] + """ + bond_config = self.get_bond_config(bond) + return { + 'bond_mode': bond_config['mode'], + 'lacp': bond_config['lacp'], + 'other_config': { + 'lacp-time': bond_config['lacp-time'], + }, + } + + +class SRIOVContext(OSContextGenerator): + """Provide context for configuring SR-IOV devices.""" + + class sriov_config_mode(enum.Enum): + """Mode in which SR-IOV is configured. + + The configuration option identified by the ``numvfs_key`` parameter + is overloaded and defines in which mode the charm should interpret + the other SR-IOV-related configuration options. + """ + auto = 'auto' + blanket = 'blanket' + explicit = 'explicit' + + def _determine_numvfs(self, device, sriov_numvfs): + """Determine number of Virtual Functions (VFs) configured for device. + + :param device: Object describing a PCI Network interface card (NIC)/ + :type device: sriov_netplan_shim.pci.PCINetDevice + :param sriov_numvfs: Number of VFs requested for blanket configuration. + :type sriov_numvfs: int + :returns: Number of VFs to configure for device + :rtype: Optional[int] + """ + + def _get_capped_numvfs(requested): + """Get a number of VFs that does not exceed individual card limits. + + Depending and make and model of NIC the number of VFs supported + vary. Requesting more VFs than a card support would be a fatal + error, cap the requested number at the total number of VFs each + individual card supports. + + :param requested: Number of VFs requested + :type requested: int + :returns: Number of VFs allowed + :rtype: int + """ + actual = min(int(requested), int(device.sriov_totalvfs)) + if actual < int(requested): + log('Requested VFs ({}) too high for device {}. Falling back ' + 'to value supprted by device: {}' + .format(requested, device.interface_name, + device.sriov_totalvfs), + level=WARNING) + return actual + + if self._sriov_config_mode == self.sriov_config_mode.auto: + # auto-mode + # + # If device mapping configuration is present, return information + # on cards with mapping. + # + # If no device mapping configuration is present, return information + # for all cards. + # + # The maximum number of VFs supported by card will be used. + if (self._sriov_mapped_devices and + device.interface_name not in self._sriov_mapped_devices): + log('SR-IOV configured in auto mode: No device mapping for {}' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs(device.sriov_totalvfs) + elif self._sriov_config_mode == self.sriov_config_mode.blanket: + # blanket-mode + # + # User has specified a number of VFs that should apply to all + # cards with support for VFs. + return _get_capped_numvfs(sriov_numvfs) + elif self._sriov_config_mode == self.sriov_config_mode.explicit: + # explicit-mode + # + # User has given a list of interface names and associated number of + # VFs + if device.interface_name not in self._sriov_config_devices: + log('SR-IOV configured in explicit mode: No device:numvfs ' + 'pair for device {}, skipping.' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs( + self._sriov_config_devices[device.interface_name]) + else: + raise RuntimeError('This should not be reached') + + def __init__(self, numvfs_key=None, device_mappings_key=None): + """Initialize map from PCI devices and configuration options. + + :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') + :type numvfs_key: Optional[str] + :param device_mappings_key: Config key for device mappings + (default: 'sriov-device-mappings') + :type device_mappings_key: Optional[str] + :raises: RuntimeError + """ + numvfs_key = numvfs_key or 'sriov-numvfs' + device_mappings_key = device_mappings_key or 'sriov-device-mappings' + + devices = pci.PCINetDevices() + charm_config = config() + sriov_numvfs = charm_config.get(numvfs_key) or '' + sriov_device_mappings = charm_config.get(device_mappings_key) or '' + + # create list of devices from sriov_device_mappings config option + self._sriov_mapped_devices = [ + pair.split(':', 1)[1] + for pair in sriov_device_mappings.split() + ] + + # create map of device:numvfs from sriov_numvfs config option + self._sriov_config_devices = { + ifname: numvfs for ifname, numvfs in ( + pair.split(':', 1) for pair in sriov_numvfs.split() + if ':' in sriov_numvfs) + } + + # determine configuration mode from contents of sriov_numvfs + if sriov_numvfs == 'auto': + self._sriov_config_mode = self.sriov_config_mode.auto + elif sriov_numvfs.isdigit(): + self._sriov_config_mode = self.sriov_config_mode.blanket + elif ':' in sriov_numvfs: + self._sriov_config_mode = self.sriov_config_mode.explicit + else: + raise RuntimeError('Unable to determine mode of SR-IOV ' + 'configuration.') + + self._map = { + device.interface_name: self._determine_numvfs(device, sriov_numvfs) + for device in devices.pci_devices + if device.sriov and + self._determine_numvfs(device, sriov_numvfs) is not None + } + + def __call__(self): + """Provide SR-IOV context. + + :returns: Map interface name: min(configured, max) virtual functions. + Example: + { + 'eth0': 16, + 'eth1': 32, + 'eth2': 64, + } + :rtype: Dict[str,int] + """ + return self._map diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/exceptions.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..f85ae4f4cdbb6567cbdd896338bf88fbf3c9c0ec --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/exceptions.py @@ -0,0 +1,21 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class OSContextError(Exception): + """Raised when an error occurs during context generation. + + This exception is principally used in contrib.openstack.context + """ + pass diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9df5f746fbdf5491c640a77df907b71817cbc5af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/check_haproxy.sh new file mode 100755 index 0000000000000000000000000000000000000000..1df55db4816ec51d6732d68ea0a1e25e6f7b116e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -0,0 +1,34 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +CRITICAL=0 +NOTACTIVE='' +LOGFILE=/var/log/nagios/check_haproxy.log +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') + +typeset -i N_INSTANCES=0 +for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) +do + N_INSTANCES=N_INSTANCES+1 + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') + if [ $? != 0 ]; then + date >> $LOGFILE + echo $output >> $LOGFILE + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 + CRITICAL=1 + NOTACTIVE="${NOTACTIVE} $appserver" + fi +done + +if [ $CRITICAL = 1 ]; then + echo "CRITICAL:${NOTACTIVE}" + exit 2 +fi + +echo "OK: All haproxy instances ($N_INSTANCES) looking good" +exit 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh new file mode 100755 index 0000000000000000000000000000000000000000..91ce0246e66115994c3f518b36448f70100ecfc7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +# These should be config options at some stage +CURRQthrsh=0 +MAXQthrsh=100 + +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') + +HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) + +for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') +do + CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) + MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) + + if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then + echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" + exit 2 + fi +done + +echo "OK: All haproxy queue depths looking good" +exit 0 + diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ha/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b088de84e4b288b551603816fc10eebfa7b1503 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ha/utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a5cbdf535d495a09a0b91f41fdda09862e34140d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,348 @@ +# Copyright 2014-2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import hashlib +import json + +import re + +from charmhelpers.core.hookenv import ( + expected_related_units, + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, +) + +from charmhelpers.core.host import ( + lsb_release +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. + + @returns boolean + """ + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data + @returns dict: json encoded data for use with relation_set + """ + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { + 'cl_{}_haproxy'.format(service): _haproxy_res + } + + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname', 'os-access-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine endpoint_type name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_data['groups'] = { + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) + } + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) + + +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + """ + cluster_config = get_hacluster_config() + vip_group = [] + vips_to_delete = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface, netmask, fallback = get_vip_settings(vip) + + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' + if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) + + vip_group.append(vip_key) + + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + + if len(vip_group) >= 1: + key = VIP_GROUP_NAME.format(service=service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ip.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 0000000000000000000000000000000000000000..723aebc172e94a5b00c385d9861dd4d45c1bc753 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,197 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + NoNetworkBinding, + config, + unit_get, + service_name, + network_get_primary_address, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, + resolve_network_cidr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' +ACCESS = 'access' + +ADDRESS_MAP = { + PUBLIC: { + 'binding': 'public', + 'config': 'os-public-network', + 'fallback': 'public-address', + 'override': 'os-public-hostname', + }, + INTERNAL: { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, + ADMIN: { + 'binding': 'admin', + 'config': 'os-admin-network', + 'fallback': 'private-address', + 'override': 'os-admin-hostname', + }, + ACCESS: { + 'binding': 'access', + 'config': 'access-network', + 'fallback': 'private-address', + 'override': 'os-access-hostname', + }, +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = _get_scheme(configs) + + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + + return '%s://%s' % (scheme, address) + + +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + +def resolve_address(endpoint_type=PUBLIC, override=True): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured, or a Juju 2.0 extra-binding has been used. + + :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not + """ + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] + clustered = is_clustered() + + if clustered and vips: + if net_addr: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except (NotImplementedError, NoNetworkBinding): + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except (NotImplementedError, NoNetworkBinding): + resolved_address = fallback_addr + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address + + +def get_vip_in_network(network): + matching_vip = None + vips = config('vip') + if vips: + for vip in vips.split(): + if is_address_in_network(network, vip): + matching_vip = vip + return matching_vip diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/keystone.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e02ccd90155710901e444482b589aa264158e6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/neutron.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 0000000000000000000000000000000000000000..fb5607f3e73159d90236b2d7a4051aa82119e889 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,359 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Various utilies for dealing with Neutron and the renaming from Quantum. + +import six +from subprocess import check_output + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import ( + os_release, + CompareOpenStackReleases, +) + + +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + return 'linux-headers-%s' % kver + + +QUANTUM_CONF_DIR = '/etc/quantum' + + +def kernel_version(): + """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + kver = kver.split('.') + return (int(kver[0]), int(kver[1])) + + +def determine_dkms_package(): + """ Determine which DKMS package should be used based on kernel version """ + # NOTE: 3.13 kernels have support for GRE and VXLAN native + if kernel_version() >= (3, 13): + return [] + else: + return [headers_package(), 'openvswitch-datapath-dkms'] + + +# legacy + + +def quantum_plugins(): + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['quantum-plugin-openvswitch-agent']], + 'server_packages': ['quantum-server', + 'quantum-plugin-openvswitch'], + 'server_services': ['quantum-server'] + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['quantum-server', + 'quantum-plugin-nicira'], + 'server_services': ['quantum-server'] + } + } + + +NEUTRON_CONF_DIR = '/etc/neutron' + + +def neutron_plugins(): + release = os_release('nova-common') + plugins = { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['neutron-plugin-openvswitch-agent']], + 'server_packages': ['neutron-server', + 'neutron-plugin-openvswitch'], + 'server_services': ['neutron-server'] + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-nicira'], + 'server_services': ['neutron-server'] + }, + 'nsx': { + 'config': '/etc/neutron/plugins/vmware/nsx.ini', + 'driver': 'vmware', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-vmware'], + 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [], + 'services': [], + 'packages': [determine_dkms_package(), + ['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd'], + 'packages': [determine_dkms_package(), + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] + }, + 'vsp': { + 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', + 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], + 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' + '.plumgrid_plugin.NeutronPluginPLUMgridV2'), + 'contexts': [], + 'services': [], + 'packages': ['plumgrid-lxc', + 'iovisor-dkms'], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] + }, + 'midonet': { + 'config': '/etc/neutron/plugins/midonet/midonet.ini', + 'driver': 'midonet.neutron.plugin.MidonetPluginV2', + 'contexts': [], + 'services': [], + 'packages': [determine_dkms_package()], + 'server_packages': ['neutron-server', + 'python-neutron-plugin-midonet'], + 'server_services': ['neutron-server'] + } + } + if CompareOpenStackReleases(release) >= 'icehouse': + # NOTE: patch in ml2 plugin for icehouse onwards + plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['ovs']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + # NOTE: patch in vmware renames nvp->nsx for icehouse onwards + plugins['nvp'] = plugins['nsx'] + if CompareOpenStackReleases(release) >= 'kilo': + plugins['midonet']['driver'] = ( + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + if CompareOpenStackReleases(release) >= 'liberty': + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') + plugins['plumgrid']['driver'] = ( + 'networking_plumgrid.neutron.plugins' + '.plugin.NeutronPluginPLUMgridV2') + plugins['plumgrid']['server_packages'].remove( + 'neutron-plugin-plumgrid') + if CompareOpenStackReleases(release) >= 'mitaka': + plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') + plugins['nsx']['server_packages'].append('python-vmware-nsx') + plugins['nsx']['config'] = '/etc/neutron/nsx.ini' + plugins['vsp']['driver'] = ( + 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + if CompareOpenStackReleases(release) >= 'newton': + plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['vsp']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + return plugins + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise Exception + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise Exception + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' + + +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ + parsed = {} + if mappings: + mappings = mappings.split() + for m in mappings: + p = m.partition(':') + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue + else: + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() + + return parsed + + +def parse_bridge_mappings(mappings): + """Parse bridge mappings. + + Mappings must be a space-delimited list of provider:bridge mappings. + + Returns dict of the form {provider:bridge}. + """ + return parse_mappings(mappings) + + +def parse_data_port_mappings(mappings, default_bridge='br-data'): + """Parse data port mappings. + + Mappings must be a space-delimited list of bridge:port. + + Returns dict of the form {port:bridge} where ports may be mac addresses or + interface names. + """ + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) + if not _mappings or list(_mappings.values()) == ['']: + if not mappings: + return {} + + # For backwards-compatibility we need to support port-only provided in + # config. + _mappings = {mappings.split()[0]: default_bridge} + + ports = _mappings.keys() + if len(set(ports)) != len(ports): + raise Exception("It is not allowed to have the same port configured " + "on more than one bridge") + + return _mappings + + +def parse_vlan_range_mappings(mappings): + """Parse vlan range mappings. + + Mappings must be a space-delimited list of provider:start:end mappings. + + The start:end range is optional and may be omitted. + + Returns dict of the form {provider: (start, end)}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + return {} + + mappings = {} + for p, r in six.iteritems(_mappings): + mappings[p] = tuple(r.split(':')) + + return mappings diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/policyd.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 0000000000000000000000000000000000000000..f2bb21e9db926bd2c4de8ab3e8d10d0837af563a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,801 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc//policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. + +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: + + + juju attach-resource policyd-override=overrides.zip + +The policy override is enabled in the charm using: + + juju config use-policyd-override=true + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None, + user=None, + group=None, + config_changed=False): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool + """ + _user = service if user is None else user + _group = service if group is None else group + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() + remove_policy_success_file() + return + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) + return + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. + + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. + + See maybe_do_policyd_overrides() for more details on the params. + """ + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath, has_subdirs=False): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp, has_subdirs) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile, has_subdirs=False): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, + + This is to help with the special, additional, processing that the dashboard + charm requires. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + files = [] + for infolist_item in zipfile.infolist(): + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue + _dir, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + files.append((name, ext, name_ext, infolist_item)) + return files + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + """ + _user = service if user is None else user + _group = service if group is None else group + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) + if not os.path.exists(path): + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + +@contextlib.contextmanager +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] + """ + for f in os.listdir(path): + yield _FBDirectory(f) + + +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if is_policy_success_file_set(): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) + blacklist_paths = blacklist_paths or [] + completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group + try: + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + set_policy_success_file() + return completed diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ssh_migrations.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ssh_migrations.py new file mode 100644 index 0000000000000000000000000000000000000000..96b9f71d42d1c81539f78b8e1c4761f81d84c304 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/ssh_migrations.py @@ -0,0 +1,412 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + ERROR, + log, + relation_get, +) +from charmhelpers.contrib.network.ip import ( + is_ipv6, + ns_query, +) +from charmhelpers.contrib.openstack.utils import ( + get_hostname, + get_host_ip, + is_ip, +) + +NOVA_SSH_DIR = '/etc/nova/compute_ssh/' + + +def ssh_directory_for_unit(application_name, user=None): + """Return the directory used to store ssh assets for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified directory path. + :rtype: str + """ + if user: + application_name = "{}_{}".format(application_name, user) + _dir = os.path.join(NOVA_SSH_DIR, application_name) + for d in [NOVA_SSH_DIR, _dir]: + if not os.path.isdir(d): + os.mkdir(d) + for f in ['authorized_keys', 'known_hosts']: + f = os.path.join(_dir, f) + if not os.path.isfile(f): + open(f, 'w').close() + return _dir + + +def known_hosts(application_name, user=None): + """Return the known hosts file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'known_hosts') + + +def authorized_keys(application_name, user=None): + """Return the authorized keys file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'authorized_keys') + + +def ssh_known_host_key(host, application_name, user=None): + """Return the first entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Host key + :rtype: str or None + """ + cmd = [ + 'ssh-keygen', + '-f', known_hosts(application_name, user), + '-H', + '-F', + host] + try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. + output = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + # RC of 1 seems to be legitimate for most ssh-keygen -F calls. + if e.returncode == 1: + output = e.output + else: + raise + output = output.strip() + + if output: + # Bug #1500589 cmd has 0 rc on precise if entry not present + lines = output.split('\n') + if len(lines) >= 1: + return lines[0] + + return None + + +def remove_known_host(host, application_name, user=None): + """Remove the entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + log('Removing SSH known host entry for compute host at %s' % host) + cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] + subprocess.check_call(cmd) + + +def is_same_key(key_1, key_2): + """Extract the key from two host entries and compare them. + + :param key_1: Host key + :type key_1: str + :param key_2: Host key + :type key_2: str + """ + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + +def add_known_host(host, application_name, user=None): + """Add the given host key to the known hosts file. + + :param host: host name + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] + try: + remote_key = subprocess.check_output(cmd).strip() + except Exception as e: + log('Could not obtain SSH host key from %s' % host, level=ERROR) + raise e + + current_key = ssh_known_host_key(host, application_name, user) + if current_key and remote_key: + if is_same_key(remote_key, current_key): + log('Known host key for compute host %s up to date.' % host) + return + else: + remove_known_host(host, application_name, user) + + log('Adding SSH host key to known hosts for compute node at %s.' % host) + with open(known_hosts(application_name, user), 'a') as out: + out.write("{}\n".format(remote_key)) + + +def ssh_authorized_key_exists(public_key, application_name, user=None): + """Check if given key is in the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Whether given key is in the authorized_key file. + :rtype: boolean + """ + with open(authorized_keys(application_name, user)) as keys: + return ('%s' % public_key) in keys.read() + + +def add_authorized_key(public_key, application_name, user=None): + """Add given key to the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + with open(authorized_keys(application_name, user), 'a') as keys: + keys.write("{}\n".format(public_key)) + + +def ssh_compute_add_host_and_key(public_key, hostname, private_address, + application_name, user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param hostname: Hostname to collect host keys from. + :type hostname: str + :param private_address:aCorresponding private address for hostname + :type private_address: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + # If remote compute node hands us a hostname, ensure we have a + # known hosts entry for its IP, hostname and FQDN. + hosts = [private_address] + + if not is_ipv6(private_address): + if hostname: + hosts.append(hostname) + + if is_ip(private_address): + hn = get_hostname(private_address) + if hn: + hosts.append(hn) + short = hn.split('.')[0] + if ns_query(short): + hosts.append(short) + else: + hosts.append(get_host_ip(private_address)) + short = private_address.split('.')[0] + if ns_query(short): + hosts.append(short) + + for host in list(set(hosts)): + add_known_host(host, application_name, user) + + if not ssh_authorized_key_exists(public_key, application_name, user): + log('Saving SSH authorized key for compute host at %s.' % + private_address) + add_authorized_key(public_key, application_name, user) + + +def ssh_compute_add(public_key, application_name, rid=None, unit=None, + user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param rid: Relation id of the relation between this charm and the app. If + none is supplied it is assumed its the relation relating to + the current hook context. + :type rid: str + :param unit: Unit to add ssh asserts for if none is supplied it is assumed + its the unit relating to the current hook context. + :type unit: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + relation_data = relation_get(rid=rid, unit=unit) + ssh_compute_add_host_and_key( + public_key, + relation_data.get('hostname'), + relation_data.get('private-address'), + application_name, + user=user) + + +def ssh_known_hosts_lines(application_name, user=None): + """Return contents of known_hosts file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + known_hosts_list = [] + with open(known_hosts(application_name, user)) as hosts: + for hosts_line in hosts: + if hosts_line.rstrip(): + known_hosts_list.append(hosts_line.rstrip()) + return(known_hosts_list) + + +def ssh_authorized_keys_lines(application_name, user=None): + """Return contents of authorized_keys file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + authorized_keys_list = [] + + with open(authorized_keys(application_name, user)) as keys: + for authkey_line in keys: + if authkey_line.rstrip(): + authorized_keys_list.append(authkey_line.rstrip()) + return(authorized_keys_list) + + +def ssh_compute_remove(public_key, application_name, user=None): + """Remove given public key from authorized_keys file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + if not (os.path.isfile(authorized_keys(application_name, user)) or + os.path.isfile(known_hosts(application_name, user))): + return + + keys = ssh_authorized_keys_lines(application_name, user=None) + keys = [k.strip() for k in keys] + + if public_key not in keys: + return + + [keys.remove(key) for key in keys if key == public_key] + + with open(authorized_keys(application_name, user), 'w') as _keys: + keys = '\n'.join(keys) + if not keys.endswith('\n'): + keys += '\n' + _keys.write(keys) + + +def get_ssh_settings(application_name, user=None): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for the + app + user combination. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = {} + keys = {} + prefix = '' + if user: + prefix = '{}_'.format(user) + + for i, line in enumerate(ssh_known_hosts_lines( + application_name=application_name, user=user)): + settings['{}known_hosts_{}'.format(prefix, i)] = line + if settings: + settings['{}known_hosts_max_index'.format(prefix)] = len( + settings.keys()) + + for i, line in enumerate(ssh_authorized_keys_lines( + application_name=application_name, user=user)): + keys['{}authorized_keys_{}'.format(prefix, i)] = line + if keys: + keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) + settings.update(keys) + return settings + + +def get_all_user_ssh_settings(application_name): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for root user + and nova user. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = get_ssh_settings(application_name) + settings.update(get_ssh_settings(application_name, user='nova')) + return settings diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9df5f746fbdf5491c640a77df907b71817cbc5af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/ceph.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 0000000000000000000000000000000000000000..a11ce8ab85654a4d838e448f36fe3698b59f9531 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,24 @@ +############################################################################### +# [ WARNING ] +# ceph configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +[global] +{% if auth -%} +auth_supported = {{ auth }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} +{% endif -%} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} +{% if rbd_features %} +rbd default features = {{ rbd_features }} +{% endif %} + +[client] +{% if rbd_client_cache_settings -%} +{% for key, value in rbd_client_cache_settings.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{%- endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/git.upstart b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/git.upstart new file mode 100644 index 0000000000000000000000000000000000000000..4bed404bc01087c4dec6a44a56d17ed122e1d1e3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/git.upstart @@ -0,0 +1,17 @@ +description "{{ service_description }}" +author "Juju {{ service_name }} Charm " + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +exec start-stop-daemon --start --chuid {{ user_name }} \ + --chdir {{ start_dir }} --name {{ process_name }} \ + --exec {{ executable_name }} -- \ + {% for config_file in config_files -%} + --config-file={{ config_file }} \ + {% endfor -%} + {% if log_file -%} + --log-file={{ log_file }} + {% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d36af2aa86f91b2ee1249594fffd106843dd0676 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,77 @@ +global + log /var/lib/haproxy/dev/log local0 + log /var/lib/haproxy/dev/log local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + stats socket /var/run/haproxy/admin.sock mode 600 level admin + stats timeout 2m + +defaults + log global + mode tcp + option tcplog + option dontlognull + retries 3 +{%- if haproxy_queue_timeout %} + timeout queue {{ haproxy_queue_timeout }} +{%- else %} + timeout queue 9000 +{%- endif %} +{%- if haproxy_connect_timeout %} + timeout connect {{ haproxy_connect_timeout }} +{%- else %} + timeout connect 9000 +{%- endif %} +{%- if haproxy_client_timeout %} + timeout client {{ haproxy_client_timeout }} +{%- else %} + timeout client 90000 +{%- endif %} +{%- if haproxy_server_timeout %} + timeout server {{ haproxy_server_timeout }} +{%- else %} + timeout server 90000 +{%- endif %} + +listen stats + bind {{ local_host }}:{{ stat_port }} + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:{{ stat_password }} + +{% if frontends -%} +{% for service, ports in service_ports.items() -%} +frontend tcp-in_{{ service }} + bind *:{{ ports[0] }} + {% if ipv6_enabled -%} + bind :::{{ ports[0] }} + {% endif -%} + {% for frontend in frontends -%} + acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} + use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} + +{% for frontend in frontends -%} +backend {{ service }}_{{ frontend }} + balance leastconn + {% if backend_options -%} + {% if backend_options[service] -%} + {% for option in backend_options[service] -%} + {% for key, value in option.items() -%} + {{ key }} {{ value }} + {% endfor -%} + {% endfor -%} + {% endif -%} + {% endif -%} + {% for unit, address in frontends[frontend]['backends'].items() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor -%} +{% endfor -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/logrotate b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/logrotate new file mode 100644 index 0000000000000000000000000000000000000000..b2900d09a4ec2d04152ed7ce25bdc7346c349675 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/logrotate @@ -0,0 +1,9 @@ +/var/log/{{ logrotate_logs_location }}/*.log { + {{ logrotate_interval }} + {{ logrotate_count }} + compress + delaycompress + missingok + notifempty + copytruncate +} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/memcached.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/memcached.conf new file mode 100644 index 0000000000000000000000000000000000000000..26cb037c72beccdb1a4f9269844abdbac2406369 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/memcached.conf @@ -0,0 +1,53 @@ +############################################################################### +# [ WARNING ] +# memcached configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### + +# memcached default config file +# 2003 - Jay Bonci +# This configuration file is read by the start-memcached script provided as +# part of the Debian GNU/Linux distribution. + +# Run memcached as a daemon. This command is implied, and is not needed for the +# daemon to run. See the README.Debian that comes with this package for more +# information. +-d + +# Log memcached's output to /var/log/memcached +logfile /var/log/memcached.log + +# Be verbose +# -v + +# Be even more verbose (print client commands as well) +# -vv + +# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default +# Note that the daemon will grow to this size, but does not start out holding this much +# memory +-m 64 + +# Default connection port is 11211 +-p {{ memcache_port }} + +# Run the daemon as root. The start-memcached will default to running as root if no +# -u command is present in this config file +-u memcache + +# Specify which IP address to listen on. The default is to listen on all IP addresses +# This parameter is one of the only security measures that memcached has, so make sure +# it's listening on a firewalled interface. +-l {{ memcache_server }} + +# Limit the number of simultaneous incoming connections. The daemon default is 1024 +# -c 1024 + +# Lock down all paged memory. Consult with the README and homepage before you do this +# -k + +# Return error when memory is exhausted (rather than removing items) +# -M + +# Maximize core file limit +# -r diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 0000000000000000000000000000000000000000..f614b3fa71928b615a1f058c6928d3c98bed9575 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,29 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 0000000000000000000000000000000000000000..f614b3fa71928b615a1f058c6928d3c98bed9575 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,29 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken new file mode 100644 index 0000000000000000000000000000000000000000..5dcebe7c863728b040337616b492296f536b3ef3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -0,0 +1,12 @@ +{% if auth_host -%} +[keystone_authtoken] +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_plugin = password +project_domain_id = default +user_domain_id = default +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy new file mode 100644 index 0000000000000000000000000000000000000000..9356b2be4e7dabe089b4d7d39793146bb92f3f48 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy @@ -0,0 +1,10 @@ +{% if auth_host -%} +[keystone_authtoken] +# Juno specific config (Bug #1557223) +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} +identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka new file mode 100644 index 0000000000000000000000000000000000000000..c281868b16a885cd01a234984974af0349a5d242 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -0,0 +1,22 @@ +{% if auth_host -%} +[keystone_authtoken] +auth_type = password +{% if api_version == "3" -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3 +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3 +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only new file mode 100644 index 0000000000000000000000000000000000000000..d26a91fe1f00e2b12d094be727a53eddc87b829d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only @@ -0,0 +1,9 @@ +{% if auth_host -%} +[keystone_authtoken] +{% for option_name, option_value in keystone_authtoken.items() -%} +{{ option_name }} = {{ option_value }} +{% endfor -%} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-cache b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-cache new file mode 100644 index 0000000000000000000000000000000000000000..e056a32aafe32413cefb40b9414d064f2b0c8fd2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-cache @@ -0,0 +1,6 @@ +[cache] +{% if memcache_url %} +enabled = true +backend = oslo_cache.memcache_pool +memcache_servers = {{ memcache_url }} +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit new file mode 100644 index 0000000000000000000000000000000000000000..bed2216aba7217022ded17dec4cdb0871f513b40 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata new file mode 100644 index 0000000000000000000000000000000000000000..365f43757719b2de3c601ea6a9752dac8a8b3545 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-middleware new file mode 100644 index 0000000000000000000000000000000000000000..dd73230a42aa037582989979c1bc8132d30b9b38 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-middleware @@ -0,0 +1,5 @@ +[oslo_middleware] + +# Bug #1758675 +enable_proxy_headers_parsing = true + diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-notifications new file mode 100644 index 0000000000000000000000000000000000000000..71c7eb068eace94e8986d7a868bded236f75128c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -0,0 +1,15 @@ +{% if transport_url -%} +[oslo_messaging_notifications] +driver = {{ oslo_messaging_driver }} +transport_url = {{ transport_url }} +{% if send_notifications_to_logs %} +driver = log +{% endif %} +{% if notification_topics -%} +topics = {{ notification_topics }} +{% endif -%} +{% if notification_format -%} +[notifications] +notification_format = {{ notification_format }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-placement b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-placement new file mode 100644 index 0000000000000000000000000000000000000000..97724bdb5af6e0352d0b920600d7cbbc8318fa7c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-placement @@ -0,0 +1,19 @@ +[placement] +{% if auth_host -%} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_type = password +{% if api_version == "3" -%} +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +{% endif -%} +{% if region -%} +os_region_name = {{ region }} +{% endif -%} +randomize_allocation_candidates = true diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo new file mode 100644 index 0000000000000000000000000000000000000000..b444c9c99bd179eaa0be31af462576e4ca9c45b5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo @@ -0,0 +1,22 @@ +{% if rabbitmq_host or rabbitmq_hosts -%} +[oslo_messaging_rabbit] +rabbit_userid = {{ rabbitmq_user }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +rabbit_password = {{ rabbitmq_password }} +{% if rabbitmq_hosts -%} +rabbit_hosts = {{ rabbitmq_hosts }} +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +rabbit_durable_queues = False +{% endif -%} +{% else -%} +rabbit_host = {{ rabbitmq_host }} +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +rabbit_port = {{ rabbit_ssl_port }} +{% if rabbit_ssl_ca -%} +kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} +{% endif -%} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-zeromq b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-zeromq new file mode 100644 index 0000000000000000000000000000000000000000..95f1a76ce87f9babdcba58da79c68d34c2776eea --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/section-zeromq @@ -0,0 +1,14 @@ +{% if zmq_host -%} +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) +rpc_backend = zmq +rpc_zmq_host = {{ zmq_host }} +{% if zmq_redis_address -%} +rpc_zmq_matchmaker = redis +matchmaker_heartbeat_freq = 15 +matchmaker_heartbeat_ttl = 30 +[matchmaker_redis] +host = {{ zmq_redis_address }} +{% else -%} +rpc_zmq_matchmaker = ring +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/vendor_data.json b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/vendor_data.json new file mode 100644 index 0000000000000000000000000000000000000000..904f612a7f74490d4508920400d18d493d056477 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/vendor_data.json @@ -0,0 +1 @@ +{{ vendor_data_json }} \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf new file mode 100644 index 0000000000000000000000000000000000000000..23b62a385283e6b8f1a6af0fcebccac747031b26 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} + + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if admin_port -%} + + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if public_port -%} + + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf new file mode 100644 index 0000000000000000000000000000000000000000..23b62a385283e6b8f1a6af0fcebccac747031b26 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} + + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if admin_port -%} + + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if public_port -%} + + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templating.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 0000000000000000000000000000000000000000..050f8af5c9135db4fafdbf5098edcd22c7157ff8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,379 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import six + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO, + TRACE +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + # demote this log to the lowest level; we don't really need to see these + # lots in production even when debugging. + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=TRACE) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + + def __init__(self, config_file, contexts, config_template=None): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + self.config_template = config_template + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have satisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + @property + def is_string_template(self): + """:returns: Boolean if this instance is a template initialised with a string""" + return self.config_template is not None + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + configs.register(config_file='/etc/keystone/policy.d/extra.cfg', + contexts=[context.ExtraPolicyContext() + context.KeystoneContext()], + config_template=hookenv.config('extra-policy')) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first searches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') + + def register(self, config_file, contexts, config_template=None): + """ + Register a config file with a list of context generators to be called + during rendering. + config_template can be used to load a template from a string instead of + using template loaders and template files. + :param config_file (str): a path where a config file will be rendered + :param contexts (list): a list of context dictionaries with kv pairs + :param config_template (str): an optional template string to use + """ + self.templates[config_file] = OSConfigTemplate( + config_file=config_file, + contexts=contexts, + config_template=config_template + ) + log('Registered config file: {}'.format(config_file), + level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from {}'.format(template.filename), + level=INFO) + return template + + def _get_template_from_string(self, ostmpl): + ''' + Get a jinja2 template object from a string. + :param ostmpl: OSConfigTemplate to use as a data source. + ''' + self._get_tmpl_env() + template = self._tmpl_env.from_string(ostmpl.config_template) + log('Loaded a template from a string for {}'.format( + ostmpl.config_file), + level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: {}'.format(config_file), level=ERROR) + raise OSConfigException + + ostmpl = self.templates[config_file] + ctxt = ostmpl.context() + + if ostmpl.is_string_template: + template = self._get_template_from_string(ostmpl) + log('Rendering from a string template: ' + '{}'.format(config_file), + level=INFO) + else: + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking + # for it using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from {} by {} or {}.' + ''.format( + self.templates_dir, + os.path.basename(config_file), + _tmpl + ), + level=ERROR) + raise e + + log('Rendering from template: {}'.format(config_file), + level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf0156108537f9caae35e193c346b0e4ee237b9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,2352 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict, namedtuple +from functools import wraps + +import subprocess +import json +import os +import sys +import re +import itertools +import functools + +import six +import traceback +import uuid +import yaml + +from charmhelpers import deprecate + +from charmhelpers.contrib.network import ip + +from charmhelpers.core import unitdata + +from charmhelpers.core.hookenv import ( + WORKLOAD_STATES, + action_fail, + action_set, + config, + expected_peer_units, + expected_related_units, + log as juju_log, + charm_dir, + INFO, + ERROR, + metadata, + related_units, + relation_get, + relation_id, + relation_ids, + relation_set, + status_set, + hook_name, + application_version_set, + cached, + leader_set, + leader_get, + local_unit, +) + +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, + is_ipv6, + port_has_listener, +) + +from charmhelpers.core.host import ( + lsb_release, + mounts, + umount, + service_running, + service_pause, + service_resume, + service_stop, + service_start, + restart_on_change_helper, +) +from charmhelpers.fetch import ( + apt_cache, + import_key as fetch_import_key, + add_source as fetch_add_source, + SourceConfigError, + GPGKeyError, + get_upstream_version, + filter_missing_packages, + ubuntu_apt_pkg as apt, +) + +from charmhelpers.fetch.snap import ( + snap_install, + snap_refresh, + valid_snap_channel, +) + +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device +from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) + +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', +) + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), + ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), + ('2016.2', 'newton'), + ('2017.1', 'ocata'), + ('2017.2', 'pike'), + ('2018.1', 'queens'), + ('2018.2', 'rocky'), + ('2019.1', 'stein'), + ('2019.2', 'train'), + ('2020.1', 'ussuri'), +]) + +# The ugly duckling - must list releases oldest to newest +SWIFT_CODENAMES = OrderedDict([ + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0', '2.6.0', '2.7.0']), + ('newton', + ['2.8.0', '2.9.0', '2.10.0']), + ('ocata', + ['2.11.0', '2.12.0', '2.13.0']), + ('pike', + ['2.13.0', '2.15.0']), + ('queens', + ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0', '2.19.0']), + ('stein', + ['2.20.0', '2.21.0']), + ('train', + ['2.22.0', '2.23.0']), + ('ussuri', + ['2.24.0', '2.25.0']), +]) + +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12', 'liberty'), + ('13', 'mitaka'), + ('14', 'newton'), + ('15', 'ocata'), + ('16', 'pike'), + ('17', 'queens'), + ('18', 'rocky'), + ('19', 'stein'), + ('20', 'train'), + ('21', 'ussuri'), + ]), + 'neutron-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), + ('16', 'ussuri'), + ]), + 'cinder-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), + ('16', 'ussuri'), + ]), + 'keystone': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('17', 'ussuri'), + ]), + 'horizon-common': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('18', 'ussuri'), + ]), + 'ceilometer-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), + ('14', 'ussuri'), + ]), + 'heat-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), + ('14', 'ussuri'), + ]), + 'glance-common': OrderedDict([ + ('11', 'liberty'), + ('12', 'mitaka'), + ('13', 'newton'), + ('14', 'ocata'), + ('15', 'pike'), + ('16', 'queens'), + ('17', 'rocky'), + ('18', 'stein'), + ('19', 'train'), + ('20', 'ussuri'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('18', 'ussuri'), + ]), +} + +DEFAULT_LOOPBACK_SIZE = '5G' + +DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' + +DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] + + +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed', 'proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('-')[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if (src.startswith('deb') or + src.startswith('ppa') or + src.startswith('snap')): + for v in OPENSTACK_CODENAMES.values(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(version_map): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + + # NOTE: fallback - attempt to match with just major.minor version + match = re.match(r'^(\d+)\.(\d+)', version) + if match: + major_minor_version = match.group(0) + for codename, versions in six.iteritems(SWIFT_CODENAMES): + for release_version in versions: + if release_version.startswith(major_minor_version): + return codename + + return None + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + + if snap_install_requested(): + cmd = ['snap', 'list', package] + try: + out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') + except subprocess.CalledProcessError: + return None + lines = out.split('\n') + for line in lines: + if package in line: + # Second item in list is Version + return line.split()[1] + + cache = apt_cache() + + try: + pkg = cache[package] + except Exception: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match(r'^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + + # Generate a major version number for newer semantic + # versions of openstack projects + major_vers = vers.split('.')[0] + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + major_vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][major_vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + return get_swift_codename(vers) + else: + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] + else: + vers_map = OPENSTACK_CODENAMES + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +# Module local cache variable for the os_release. +_os_rel = None + + +def reset_os_release(): + '''Unset the cached os_release version''' + global _os_rel + _os_rel = None + + +def os_release(package, base=None, reset_cache=False, source_key=None): + """Returns OpenStack release codename from a cached global. + + If reset_cache then unset the cached os_release version and return the + freshly determined version. + + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + + :param package: Name of package to determine release from + :type package: str + :param base: Fallback codename if endavours to determine from package fail + :type base: Optional[str] + :param reset_cache: Reset any cached codename value + :type reset_cache: bool + :param source_key: Name of source configuration option + (default: 'openstack-origin') + :type source_key: Optional[str] + :returns: OpenStack release codename + :rtype: str + """ + source_key = source_key or 'openstack-origin' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] + global _os_rel + if reset_cache: + reset_os_release() + if _os_rel: + return _os_rel + _os_rel = ( + get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config(source_key)) or + base) + return _os_rel + + +@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) +def import_key(keyid): + """Import a key, either ASCII armored, or a GPG key id. + + @param keyid: the key in ASCII armor format, or a GPG key id. + @raises SystemExit() via sys.exit() on failure. + """ + try: + return fetch_import_key(keyid) + except GPGKeyError as e: + error_out("Could not import key: {}".format(str(e))) + + +def get_source_and_pgp_key(source_and_key): + """Look for a pgp key ID or ascii-armor key in the given input. + + :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + optional. + :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id + if there was no '|' in the source_and_key string. + """ + try: + source, key = source_and_key.split('|', 2) + return source, key or None + except ValueError: + return source_and_key, None + + +@deprecate("use charmhelpers.fetch.add_source() instead.", + "2017-07", log=juju_log) +def configure_installation_source(source_plus_key): + """Configure an installation source. + + The functionality is provided by charmhelpers.fetch.add_source() + The difference between the two functions is that add_source() signature + requires the key to be passed directly, whereas this function passes an + optional key by appending '|' to the end of the source specificiation + 'source'. + + Another difference from add_source() is that the function calls sys.exit(1) + if the configuration fails, whereas add_source() raises + SourceConfigurationError(). Another difference, is that add_source() + silently fails (with a juju_log command) if there is no matching source to + configure, whereas this function fails with a sys.exit(1) + + :param source: String_plus_key -- see above for details. + + Note that the behaviour on error is to log the error to the juju log and + then call sys.exit(1). + """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return + # extract the key if there is one, denoted by a '|' in the rel + source, key = get_source_and_pgp_key(source_plus_key) + + # handle the ordinary sources via add_source + try: + fetch_add_source(source, key, fail_invalid=True) + except SourceConfigError as se: + error_out(str(se)) + + +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + +def get_endpoint_key(service_name, relation_id, unit_name): + """Return the key used to refer to an ep changed notification from a unit. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param relation_id: The id of the relation the unit is on. + :type relation_id: str + :param unit_name: The name of the unit publishing the notification. + :type unit_name: str + :returns: The key used to refer to an ep changed notification from a unit + :rtype: str + """ + return '{}-{}-{}'.format( + service_name, + relation_id.replace(':', '_'), + unit_name.replace('/', '_')) + + +def get_endpoint_notifications(service_names, rel_name='identity-service'): + """Return all notifications for the given services. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: A dict containing the source of the notification and its nonce. + :rtype: Dict[str, str] + """ + notifications = {} + for rid in relation_ids(rel_name): + for unit in related_units(relid=rid): + ep_changed_json = relation_get( + rid=rid, + unit=unit, + attribute='ep_changed') + if ep_changed_json: + ep_changed = json.loads(ep_changed_json) + for service in service_names: + if ep_changed.get(service): + key = get_endpoint_key(service, rid, unit) + notifications[key] = ep_changed[service] + return notifications + + +def endpoint_changed(service_name, rel_name='identity-service'): + """Whether a new notification has been recieved for an endpoint. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: Whether endpoint has changed + :rtype: bool + """ + changed = False + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + [service_name], + rel_name=rel_name) + for key, nonce in notifications.items(): + if db.get(key) != nonce: + juju_log(('New endpoint change notification found: ' + '{}={}').format(key, nonce), + 'INFO') + changed = True + break + return changed + + +def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): + """Save the enpoint triggers in db so it can be tracked if they changed. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + """ + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + service_names, + rel_name=rel_name) + for key, nonce in notifications.items(): + db.set(key, nonce) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wt') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + if not cur_vers: + # The package has not been installed yet do not attempt upgrade + return False + if "swift" in package: + codename = get_os_codename_install_source(src) + avail_vers = get_os_version_codename_swift(codename) + else: + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers + apt.init() + return apt.version_compare(avail_vers, cur_vers) >= 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + + +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Set the state of the workload status for the charm. + + This calls _determine_os_workload_status() to get the new state, message + and sets the status using status_set() + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _determine_os_workload_status( + configs, required_interfaces, charm_func, services, ports) + status_set(state, message) + + +def _determine_os_workload_status( + configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Determine the state of the workload status for the charm. + + This function returns the new workload status for the charm based + on the state of the interfaces, the paused state and whether the + services are actually running and any specified ports are open. + + This checks: + + 1. if the unit should be paused, that it is actually paused. If so the + state is 'maintenance' + message, else 'broken'. + 2. that the interfaces/relations are complete. If they are not then + it sets the state to either 'broken' or 'waiting' and an appropriate + message. + 3. If all the relation data is set, then it checks that the actual + services really are running. If not it sets the state to 'broken'. + + If everything is okay then the state returns 'active'. + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _ows_check_if_paused(services, ports) + + if state is None: + state, message = _ows_check_generic_interfaces( + configs, required_interfaces) + + if state != 'maintenance' and charm_func: + # _ows_check_charm_func() may modify the state, message + state, message = _ows_check_charm_func( + state, message, lambda: charm_func(configs)) + + if state is None: + state, message = _ows_check_services_running(services, ports) + + if state is None: + state = 'active' + message = "Unit is ready" + juju_log(message, 'INFO') + + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + + return state, message + + +def _ows_check_if_paused(services=None, ports=None): + """Check if the unit is supposed to be paused, and if so check that the + services/ports (if passed) are actually stopped/not being listened to. + + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. + + @param services: OPTIONAL services spec or list of service names. + @param ports: OPTIONAL list of port numbers. + @returns state, message or None, None + """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + + if is_unit_paused_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "maintenance" + message = "Paused. Use 'resume' action to resume normal service." + return state, message + return None, None + + +def _ows_check_generic_interfaces(configs, required_interfaces): + """Check the complete contexts to determine the workload status. + + - Checks for missing or incomplete contexts + - juju log details of missing required data. + - determines the correct workload status + - creates an appropriate message for status_set(...) + + if there are no problems then the function returns None, None + + @param configs: a templating.OSConfigRenderer() object + @params required_interfaces: {generic_interface: [specific_interface], } + @returns state, message or None, None + """ + incomplete_rel_data = incomplete_relation_data(configs, + required_interfaces) + state = None + message = None + missing_relations = set() + incomplete_relations = set() + + for generic_interface, relations_states in incomplete_rel_data.items(): + related_interface = None + missing_data = {} + # Related or not? + for interface, relation_state in relations_states.items(): + if relation_state.get('related'): + related_interface = interface + missing_data = relation_state.get('missing_data') + break + # No relation ID for the generic_interface? + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + missing_relations.add(generic_interface) + else: + # Relation ID eists but no related unit + if not missing_data: + # Edge case - relation ID exists but departings + _hook_name = hook_name() + if (('departed' in _hook_name or 'broken' in _hook_name) and + related_interface in _hook_name): + state = 'blocked' + missing_relations.add(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), + "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has" + " no units in the relation." + "".format(generic_interface, related_interface), + "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in missing_relations: + incomplete_relations.add(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + return state, message + + +def _ows_check_charm_func(state, message, charm_func_with_configs): + """Run a custom check function for the charm to see if it wants to + change the state. This is only run if not in 'maintenance' and + tests to see if the new state is more important that the previous + one determined by the interfaces/relations check. + + @param state: the previously determined state so far. + @param message: the user orientated message so far. + @param charm_func: a callable function that returns state, message + @returns state, message strings. + """ + if charm_func_with_configs: + charm_state, charm_message = charm_func_with_configs() + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): + state = workload_state_compare(state, charm_state) + if message: + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + return state, message + + +def _ows_check_services_running(services, ports): + """Check that the services that should be running are actually running + and that any ports specified are being listened to. + + @param services: list of strings OR dictionary specifying services/ports + @param ports: list of ports + @returns state, message: strings or None, None + """ + messages = [] + state = None + if services is not None: + services = _extract_services_list_helper(services) + services_running, running = _check_running_services(services) + if not all(running): + messages.append( + "Services not running that should be: {}" + .format(", ".join(_filter_tuples(services_running, False)))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + map_not_open, ports_open = ( + _check_listening_on_services_ports(services)) + if not all(ports_open): + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in map_not_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "Services with ports not open that should be: {}" + .format(message)) + state = 'blocked' + + if ports is not None: + # and we can also check ports which we don't know the service for + ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + if not all(ports_open_bools): + messages.append( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in ports_open + if not v]))) + state = 'blocked' + + if state is not None: + message = "; ".join(messages) + return state, message + + return None, None + + +def _extract_services_list_helper(services): + """Extract a OrderedDict of {service: [ports]} of the supplied services + for use by the other functions. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param services: see above + @returns OrderedDict(service: [ports], ...) + """ + if services is None: + return {} + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = OrderedDict() + for s in services: + if isinstance(s, dict) and 'service' in s: + _s[s['service']] = s.get('ports', []) + if isinstance(s, str): + _s[s] = [] + return _s + + +def _check_running_services(services): + """Check that the services dict provided is actually running and provide + a list of (service, boolean) tuples for each service. + + Returns both a zipped list of (service, boolean) and a list of booleans + in the same order as the services. + + @param services: OrderedDict of strings: [ports], one for each service to + check. + @returns [(service, boolean), ...], : results for checks + [boolean] : just the result of the service checks + """ + services_running = [service_running(s) for s in services] + return list(zip(services, services_running)), services_running + + +def _check_listening_on_services_ports(services, test=False): + """Check that the unit is actually listening (has the port open) on the + ports that the service specifies are open. If test is True then the + function returns the services with ports that are open rather than + closed. + + Returns an OrderedDict of service: ports and a list of booleans + + @param services: OrderedDict(service: [port, ...], ...) + @param test: default=False, if False, test for closed, otherwise open. + @returns OrderedDict(service: [port-not-open, ...]...), [boolean] + """ + test = not(not(test)) # ensure test is True or False + all_ports = list(itertools.chain(*services.values())) + ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + map_ports = OrderedDict() + matched_ports = [p for p, opened in zip(all_ports, ports_states) + if opened == test] # essentially opened xor test + for service, ports in services.items(): + set_ports = set(ports).intersection(matched_ports) + if set_ports: + map_ports[service] = set_ports + return map_ports, ports_states + + +def _check_listening_on_ports_list(ports): + """Check that the ports list given are being listened to + + Returns a list of ports being listened to and a list of the + booleans. + + @param ports: LIST or port numbers. + @returns [(port_num, boolean), ...], [boolean] + """ + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + return zip(ports, ports_open), ports_open + + +def _filter_tuples(services_states, state): + """Return a simple list from a list of tuples according to the condition + + @param services_states: LIST of (string, boolean): service and running + state. + @param state: Boolean to match the tuple against. + @returns [LIST of strings] that matched the tuple RHS. + """ + return [s for s, b in services_states if b == state] + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [ + svc_type + for svc_type, interfaces in required_interfaces.items() + if not set(interfaces).intersection(complete_ctxts)] + return { + i: configs.get_incomplete_context_data(required_interfaces[i]) + for i in incomplete_relations} + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) + + +def check_actually_paused(services=None, ports=None): + """Check that services listed in the services object and ports + are actually closed (not listened to), to verify that the unit is + properly paused. + + @param services: See _extract_services_list_helper + @returns status, : string for status (None if okay) + message : string for problem for status_set + """ + state = None + message = None + messages = [] + if services is not None: + services = _extract_services_list_helper(services) + services_running, services_states = _check_running_services(services) + if any(services_states): + # there shouldn't be any running so this is a problem + messages.append("these services running: {}" + .format(", ".join( + _filter_tuples(services_running, True)))) + state = "blocked" + ports_open, ports_open_bools = ( + _check_listening_on_services_ports(services, True)) + if any(ports_open_bools): + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in ports_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "these service:ports are open: {}".format(message)) + state = 'blocked' + if ports is not None: + ports_open, bools = _check_listening_on_ports_list(ports) + if any(bools): + messages.append( + "these ports which should be closed, but are open: {}" + .format(", ".join([str(p) for p, v in ports_open if v]))) + state = 'blocked' + if messages: + message = ("Services should be paused but {}" + .format(", ".join(messages))) + return state, message + + +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except Exception: + return False + + +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + +def pause_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Pause a unit by stopping the services and setting 'unit-paused' + in the local kv() store. + + Also checks that the services have stopped and ports are no longer + being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None, None to indicate that the unit + didn't pause cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm pausing. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) + set_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages and not is_unit_upgrading_set(): + raise Exception("Couldn't pause: {}".format("; ".join(messages))) + + +def resume_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Resume a unit by starting the services and clearning 'unit-paused' + in the local kv() store. + + Also checks that the services have started and ports are being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None to indicate that the unit + didn't resume cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are started, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm resuming. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) + clear_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't resume: {}".format("; ".join(messages))) + + +def make_assess_status_func(*args, **kwargs): + """Creates an assess_status_func() suitable for handing to pause_unit() + and resume_unit(). + + This uses the _determine_os_workload_status(...) function to determine + what the workload_status should be for the unit. If the unit is + not in maintenance or active states, then the message is returned to + the caller. This is so an action that doesn't result in either a + complete pause or complete resume can signal failure with an action_fail() + """ + def _assess_status_func(): + state, message = _determine_os_workload_status(*args, **kwargs) + status_set(state, message) + if state not in ['maintenance', 'active']: + return message + return None + + return _assess_status_func + + +def pausable_restart_on_change(restart_map, stopstart=False, + restart_functions=None): + """A restart_on_change decorator that checks to see if the unit is + paused. If it is paused then the decorated function doesn't fire. + + This is provided as a helper, as the @restart_on_change(...) decorator + is in core.host, yet the openstack specific helpers are in this file + (contrib.openstack.utils). Thus, this needs to be an optional feature + for openstack charms (or charms that wish to use the openstack + pause/resume type features). + + It is used as follows: + + from contrib.openstack.utils import ( + pausable_restart_on_change as restart_on_change) + + @restart_on_change(restart_map, stopstart=) + def some_hook(...): + pass + + see core.utils.restart_on_change() for more details. + + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + + @param f: the function to decorate + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} + @param stopstart: DEFAULT false; whether to stop, start or just restart + @returns decorator to use a restart_on_change with pausability + """ + def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + if is_unit_paused_set(): + return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map + # otherwise, normal restart_on_change functionality + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) + return wrapped_f + return wrap + + +def ordered(orderme): + """Converts the provided dictionary into a collections.OrderedDict. + + The items in the returned OrderedDict will be inserted based on the + natural sort order of the keys. Nested dictionaries will also be sorted + in order to ensure fully predictable ordering. + + :param orderme: the dict to order + :return: collections.OrderedDict + :raises: ValueError: if `orderme` isn't a dict instance. + """ + if not isinstance(orderme, dict): + raise ValueError('argument must be a dict type') + + result = OrderedDict() + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + if isinstance(v, dict): + result[k] = ordered(v) + else: + result[k] = v + + return result + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + + {'key1': 'value1', 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + + {'key1': 'value1', 'key2': 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return ordered(yaml.safe_load(config_flags)) + + if config_flags.find('==') >= 0: + juju_log("config_flags is not in expected format (key=value)", + level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = OrderedDict() + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + juju_log("Invalid config value(s) at index %s" % (i), + level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + application_version = get_upstream_version(package) + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(os_release(package)) + else: + application_version_set(application_version) + + +def os_application_status_set(check_function): + """Run the supplied function and set the application status accordingly. + + :param check_function: Function to run to get app states and messages. + :type check_function: function + """ + state, message = check_function() + status_set(state, message, application=True) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package) + if not _release: + _release = get_os_codename_install_source(source) + + return CompareOpenStackReleases(_release) >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages + + +def update_json_file(filename, items): + """Updates the json `filename` with a given dict. + :param filename: path to json file (e.g. /etc/glance/policy.json) + :param items: dict of items to update + """ + if not items: + return + + with open(filename) as fd: + policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) + policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + + with open(filename, "w") as fd: + fd.write(after) + + +@cached +def snap_install_requested(): + """ Determine if installing from snaps + + If openstack-origin is of the form snap:track/channel[/branch] + and channel is in SNAPS_CHANNELS return True. + """ + origin = config('openstack-origin') or "" + if not origin.startswith('snap:'): + return False + + _src = origin[5:] + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) + + +def get_snaps_install_info_from_origin(snaps, src, mode='classic'): + """Generate a dictionary of snap install information from origin + + @param snaps: List of snaps + @param src: String of openstack-origin or source of the form + snap:track/channel + @param mode: String classic, devmode or jailmode + @returns: Dictionary of snaps with channels and modes + """ + + if not src.startswith('snap:'): + juju_log("Snap source is not a snap origin", 'WARN') + return {} + + _src = src[5:] + channel = '--channel={}'.format(_src) + + return {snap: {'channel': channel, 'mode': mode} + for snap in snaps} + + +def install_os_snaps(snaps, refresh=False): + """Install OpenStack snaps from channel and with mode + + @param snaps: Dictionary of snaps with channels and modes of the form: + {'snap_name': {'channel': 'snap_channel', + 'mode': 'snap_mode'}} + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. + @param post_snap_install: Callback function to run after snaps have been + installed + """ + + def _ensure_flag(flag): + if flag.startswith('--'): + return flag + return '--{}'.format(flag) + + if refresh: + for snap in snaps.keys(): + snap_refresh(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + else: + for snap in snaps.keys(): + snap_install(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) + + +def is_db_maintenance_mode(relid=None): + """Check relation data from notifications of db in maintenance mode. + + :returns: Whether db has notified it is in maintenance mode. + :rtype: bool + """ + juju_log('Checking for maintenance notifications', 'DEBUG') + if relid: + r_ids = [relid] + else: + r_ids = relation_ids('shared-db') + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + notifications = [] + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + for key, value in settings.items(): + if value and key in DB_MAINTENANCE_KEYS: + juju_log( + 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), + 'DEBUG') + try: + notifications.append(bool_from_string(value)) + except ValueError: + juju_log( + 'Could not discern bool from {}'.format(value), + 'WARN') + pass + return True in notifications + + +@cached +def container_scoped_relations(): + """Get all the container scoped relations + + :returns: List of relation names + :rtype: List + """ + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_db_ready(use_current_context=False, rel_name=None): + """Check remote database is ready to be used. + + Database relations are expected to provide a list of 'allowed' units to + confirm that the database is ready for use by those units. + + If db relation has provided this information and local unit is a member, + returns True otherwise False. + + :param use_current_context: Whether to limit checks to current hook + context. + :type use_current_context: bool + :param rel_name: Name of relation to check + :type rel_name: string + :returns: Whether remote db is ready. + :rtype: bool + :raises: Exception + """ + key = 'allowed_units' + + rel_name = rel_name or 'shared-db' + this_unit = local_unit() + + if use_current_context: + if relation_id() in relation_ids(rel_name): + rids_units = [(None, None)] + else: + raise Exception("use_current_context=True but not in {} " + "rel hook contexts (currently in {})." + .format(rel_name, relation_id())) + else: + rids_units = [(r_id, u) + for r_id in relation_ids(rel_name) + for u in related_units(r_id)] + + for rid, unit in rids_units: + allowed_units = relation_get(rid=rid, unit=unit, attribute=key) + if allowed_units and this_unit in allowed_units.split(): + juju_log("This unit ({}) is in allowed unit list from {}".format( + this_unit, + unit), 'DEBUG') + return True + + juju_log("This unit was not found in any allowed unit list") + return False + + +def is_expected_scale(peer_relation_name='cluster'): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :param peer_relation_name: Name of peer relation + :type rel_name: string + :returns: True or False + :rtype: bool + """ + def _get_relation_id(rel_type): + return next((rid for rid in relation_ids(reltype=rel_type)), None) + + Relation = namedtuple('Relation', 'rel_type rel_id') + peer_rid = _get_relation_id(peer_relation_name) + # Units with no peers should still have a peer relation. + if not peer_rid: + juju_log('Not at expected scale, no peer relation found', 'DEBUG') + return False + expected_relations = [ + Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] + if expect_ha(): + expected_relations.append( + Relation( + rel_type='ha', + rel_id=_get_relation_id('ha'))) + juju_log( + 'Checking scale of {} relations'.format( + ','.join([r.rel_type for r in expected_relations])), + 'DEBUG') + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for rel in expected_relations: + if not rel.rel_id: + juju_log( + 'Expected to find {} relation, but it is missing'.format( + rel.rel_type), + 'DEBUG') + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if rel.rel_type in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=rel.rel_type))) + if len(related_units(relid=rel.rel_id)) < expected_count: + juju_log( + ('Not at expected scale, not enough units on {} ' + 'relation'.format(rel.rel_type)), + 'DEBUG') + return False + except NotImplementedError: + return True + juju_log('All checks have passed, unit is at expected scale', 'DEBUG') + return True + + +def get_peer_key(unit_name): + """Get the peer key for this unit. + + The peer key is the key a unit uses to publish its status down the peer + relation + + :param unit_name: Name of unit + :type unit_name: string + :returns: Peer key for given unit + :rtype: string + """ + return 'unit-state-{}'.format(unit_name.replace('/', '-')) + + +UNIT_READY = 'READY' +UNIT_NOTREADY = 'NOTREADY' +UNIT_UNKNOWN = 'UNKNOWN' +UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] + + +def inform_peers_unit_state(state, relation_name='cluster'): + """Inform peers of the state of this unit. + + :param state: State of unit to publish + :type state: string + :param relation_name: Name of relation to publish state on + :type relation_name: string + """ + if state not in UNIT_STATES: + raise ValueError( + "Setting invalid state {} for unit".format(state)) + for r_id in relation_ids(relation_name): + relation_set(relation_id=r_id, + relation_settings={ + get_peer_key(local_unit()): state}) + + +def get_peers_unit_state(relation_name='cluster'): + """Get the state of all peers. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Unit states keyed on unit name. + :rtype: dict + :raises: ValueError + """ + r_ids = relation_ids(relation_name) + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + unit_states = {} + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) + if unit_states[unit] not in UNIT_STATES: + raise ValueError( + "Unit in unknown state {}".format(unit_states[unit])) + return unit_states + + +def are_peers_ready(relation_name='cluster'): + """Check if all peers are ready. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Whether all units are ready. + :rtype: bool + """ + unit_states = get_peers_unit_state(relation_name) + return all(v == UNIT_READY for v in unit_states.values()) + + +def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): + """Inform peers if this unit is ready. + + The check function should return a tuple (state, message). A state + of 'READY' indicates the unit is READY. + + :param check_unit_ready_func: Function to run to check readiness + :type check_unit_ready_func: function + :param relation_name: Name of relation to check peers on. + :type relation_name: string + """ + unit_ready, msg = check_unit_ready_func() + if unit_ready: + state = UNIT_READY + else: + state = UNIT_NOTREADY + juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') + inform_peers_unit_state(state, relation_name) + + +def check_api_unit_ready(check_db_ready=True): + """Check if this unit is ready. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Whether unit state is ready and status message + :rtype: (bool, str) + """ + unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) + return unit_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_unit_status(check_db_ready=True): + """Return a workload status and message for this unit. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Workload state and message + :rtype: (bool, str) + """ + unit_state = WORKLOAD_STATES.ACTIVE + msg = 'Unit is ready' + if is_db_maintenance_mode(): + unit_state = WORKLOAD_STATES.MAINTENANCE + msg = 'Database in maintenance mode.' + elif is_unit_paused_set(): + unit_state = WORKLOAD_STATES.BLOCKED + msg = 'Unit paused.' + elif check_db_ready and not is_db_ready(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Allowed_units list provided but this unit not present' + elif not is_db_initialised(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Database not initialised' + elif not is_expected_scale(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Charm and its dependencies not yet at expected scale' + juju_log(msg, 'DEBUG') + return unit_state, msg + + +def check_api_application_ready(): + """Check if this application is ready. + + :returns: Whether application state is ready and status message + :rtype: (bool, str) + """ + app_state, msg = get_api_application_status() + return app_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_application_status(): + """Return a workload status and message for this application. + + :returns: Workload state and message + :rtype: (bool, str) + """ + app_state, msg = get_api_unit_status() + if app_state == WORKLOAD_STATES.ACTIVE: + if are_peers_ready(): + return WORKLOAD_STATES.ACTIVE, 'Application Ready' + else: + return WORKLOAD_STATES.WAITING, 'Some units are not ready' + return app_state, msg diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/vaultlocker.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 0000000000000000000000000000000000000000..4ee6c1dba910b824467c2c34e0a3d3d0f3fd906d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,179 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + try: + import hvac + except ImportError: + # BUG: #1862085 - if the relation is made to vault, but the + # 'encrypt' option is not made, then the charm errors with an + # import warning. This catches that, logs a warning, and returns + # with an empty context. + hookenv.log("VaultKVContext: trying to use hvac pythong module " + "but it's not available. Is secrets-stroage relation " + "made, but encrypt option not set?", + level=hookenv.WARNING) + # return an emptry context on hvac import error + return {} + ctxt = {} + # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 + db = unitdata.kv() + # currently known-good secret-id + secret_id = db.get('secret-id') + + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + token_success = False + try: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + token_success = True + except hvac.exceptions.InvalidRequest: + # Try next + pass + + if token_success: + db.set('secret-id', secret_id) + db.flush() + + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + self.complete = True + break + else: + if secret_id: + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + if self.complete: + break + + if ctxt: + self.complete = True + + return ctxt + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + try: + import hvac + except ImportError: + return False + try: + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + except hvac.exceptions.InvalidRequest: + return False + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + try: + # hvac 0.10.1 changed default adapter to JSONAdapter + client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) + except AttributeError: + # hvac < 0.6.2 doesn't have adapter but uses the same response interface + client = hvac.Client(url=url, token=token) + else: + # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate + if not isinstance(client.adapter, hvac.adapters.Request): + client.adapter = hvac.adapters.Request(base_uri=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/peerstorage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/peerstorage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a8fa60c2a3080d088b8e0abae370aa355c80ec56 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/peerstorage/__init__.py @@ -0,0 +1,267 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import six + +from charmhelpers.core.hookenv import relation_id as current_relation_id +from charmhelpers.core.hookenv import ( + is_relation_made, + relation_ids, + relation_get as _relation_get, + local_unit, + relation_set as _relation_set, + leader_get as _leader_get, + leader_set, + is_leader, +) + + +""" +This helper provides functions to support use of a peer relation +for basic key/value storage, with the added benefit that all storage +can be replicated across peer units. + +Requirement to use: + +To use this, the "peer_echo()" method has to be called form the peer +relation's relation-changed hook: + +@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name +def cluster_relation_changed(): + peer_echo() + +Once this is done, you can use peer storage from anywhere: + +@hooks.hook("some-hook") +def some_hook(): + # You can store and retrieve key/values this way: + if is_relation_made("cluster"): # from charmhelpers.core.hookenv + # There are peers available so we can work with peer storage + peer_store("mykey", "myvalue") + value = peer_retrieve("mykey") + print value + else: + print "No peers joind the relation, cannot share key/values :(" +""" + + +def leader_get(attribute=None, rid=None): + """Wrapper to ensure that settings are migrated from the peer relation. + + This is to support upgrading an environment that does not support + Juju leadership election to one that does. + + If a setting is not extant in the leader-get but is on the relation-get + peer rel, it is migrated and marked as such so that it is not re-migrated. + """ + migration_key = '__leader_get_migrated_settings__' + if not is_leader(): + return _leader_get(attribute=attribute) + + settings_migrated = False + leader_settings = _leader_get(attribute=attribute) + previously_migrated = _leader_get(attribute=migration_key) + + if previously_migrated: + migrated = set(json.loads(previously_migrated)) + else: + migrated = set([]) + + try: + if migration_key in leader_settings: + del leader_settings[migration_key] + except TypeError: + pass + + if attribute: + if attribute in migrated: + return leader_settings + + # If attribute not present in leader db, check if this unit has set + # the attribute in the peer relation + if not leader_settings: + peer_setting = _relation_get(attribute=attribute, unit=local_unit(), + rid=rid) + if peer_setting: + leader_set(settings={attribute: peer_setting}) + leader_settings = peer_setting + + if leader_settings: + settings_migrated = True + migrated.add(attribute) + else: + r_settings = _relation_get(unit=local_unit(), rid=rid) + if r_settings: + for key in set(r_settings.keys()).difference(migrated): + # Leader setting wins + if not leader_settings.get(key): + leader_settings[key] = r_settings[key] + + settings_migrated = True + migrated.add(key) + + if settings_migrated: + leader_set(**leader_settings) + + if migrated and settings_migrated: + migrated = json.dumps(list(migrated)) + leader_set(settings={migration_key: migrated}) + + return leader_settings + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Attempt to use leader-set if supported in the current version of Juju, + otherwise falls back on relation-set. + + Note that we only attempt to use leader-set if the provided relation_id is + a peer relation id or no relation id is provided (in which case we assume + we are within the peer relation context). + """ + try: + if relation_id in relation_ids('cluster'): + return leader_set(settings=relation_settings, **kwargs) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_set(relation_id=relation_id, + relation_settings=relation_settings, **kwargs) + + +def relation_get(attribute=None, unit=None, rid=None): + """Attempt to use leader-get if supported in the current version of Juju, + otherwise falls back on relation-get. + + Note that we only attempt to use leader-get if the provided rid is a peer + relation id or no relation id is provided (in which case we assume we are + within the peer relation context). + """ + try: + if rid in relation_ids('cluster'): + return leader_get(attribute, rid) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_get(attribute=attribute, rid=rid, unit=unit) + + +def peer_retrieve(key, relation_name='cluster'): + """Retrieve a named key from peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + return relation_get(attribute=key, rid=cluster_rid, + unit=local_unit()) + else: + raise ValueError('Unable to detect' + 'peer relation {}'.format(relation_name)) + + +def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_', + inc_list=None, exc_list=None): + """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """ + inc_list = inc_list if inc_list else [] + exc_list = exc_list if exc_list else [] + peerdb_settings = peer_retrieve('-', relation_name=relation_name) + matched = {} + if peerdb_settings is None: + return matched + for k, v in peerdb_settings.items(): + full_prefix = prefix + delimiter + if k.startswith(full_prefix): + new_key = k.replace(full_prefix, '') + if new_key in exc_list: + continue + if new_key in inc_list or len(inc_list) == 0: + matched[new_key] = v + return matched + + +def peer_store(key, value, relation_name='cluster'): + """Store the key/value pair on the named peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + relation_set(relation_id=cluster_rid, + relation_settings={key: value}) + else: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(relation_name)) + + +def peer_echo(includes=None, force=False): + """Echo filtered attributes back onto the same relation for storage. + + This is a requirement to use the peerstorage module - it needs to be called + from the peer relation's changed hook. + + If Juju leader support exists this will be a noop unless force is True. + """ + try: + is_leader() + except NotImplementedError: + pass + else: + if not force: + return # NOOP if leader-election is supported + + # Use original non-leader calls + relation_get = _relation_get + relation_set = _relation_set + + rdata = relation_get() + echo_data = {} + if includes is None: + echo_data = rdata.copy() + for ex in ['private-address', 'public-address']: + if ex in echo_data: + echo_data.pop(ex) + else: + for attribute, value in six.iteritems(rdata): + for include in includes: + if include in attribute: + echo_data[attribute] = value + if len(echo_data) > 0: + relation_set(relation_settings=echo_data) + + +def peer_store_and_set(relation_id=None, peer_relation_name='cluster', + peer_store_fatal=False, relation_settings=None, + delimiter='_', **kwargs): + """Store passed-in arguments both in argument relation and in peer storage. + + It functions like doing relation_set() and peer_store() at the same time, + with the same data. + + @param relation_id: the id of the relation to store the data on. Defaults + to the current relation. + @param peer_store_fatal: Set to True, the function will raise an exception + should the peer storage not be available.""" + + relation_settings = relation_settings if relation_settings else {} + relation_set(relation_id=relation_id, + relation_settings=relation_settings, + **kwargs) + if is_relation_made(peer_relation_name): + for key, value in six.iteritems(dict(list(kwargs.items()) + + list(relation_settings.items()))): + key_prefix = relation_id or current_relation_id() + peer_store(key_prefix + delimiter + key, + value, + relation_name=peer_relation_name) + else: + if peer_store_fatal: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(peer_relation_name)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/python.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/python.py new file mode 100644 index 0000000000000000000000000000000000000000..84cba8c4eba34fdd705f4ee39628ebd33b5175a2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/python.py @@ -0,0 +1,21 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/saltstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/saltstack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d74f4039379b608b5c076fd96c906ff5237f1f83 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/saltstack/__init__.py @@ -0,0 +1,116 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Charm Helpers saltstack - declare the state of your machines. + +This helper enables you to declare your machine state, rather than +program it procedurally (and have to test each change to your procedures). +Your install hook can be as simple as:: + + {{{ + from charmhelpers.contrib.saltstack import ( + install_salt_support, + update_machine_state, + ) + + + def install(): + install_salt_support() + update_machine_state('machine_states/dependencies.yaml') + update_machine_state('machine_states/installed.yaml') + }}} + +and won't need to change (nor will its tests) when you change the machine +state. + +It's using a python package called salt-minion which allows various formats for +specifying resources, such as:: + + {{{ + /srv/{{ basedir }}: + file.directory: + - group: ubunet + - user: ubunet + - require: + - user: ubunet + - recurse: + - user + - group + + ubunet: + group.present: + - gid: 1500 + user.present: + - uid: 1500 + - gid: 1500 + - createhome: False + - require: + - group: ubunet + }}} + +The docs for all the different state definitions are at: + http://docs.saltstack.com/ref/states/all/ + + +TODO: + * Add test helpers which will ensure that machine state definitions + are functionally (but not necessarily logically) correct (ie. getting + salt to parse all state defs. + * Add a link to a public bootstrap charm example / blogpost. + * Find a way to obviate the need to use the grains['charm_dir'] syntax + in templates. +""" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +import subprocess + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv + + +salt_grains_path = '/etc/salt/grains' + + +def install_salt_support(from_ppa=True): + """Installs the salt-minion helper for machine state. + + By default the salt-minion package is installed from + the saltstack PPA. If from_ppa is False you must ensure + that the salt-minion package is available in the apt cache. + """ + if from_ppa: + subprocess.check_call([ + '/usr/bin/add-apt-repository', + '--yes', + 'ppa:saltstack/salt', + ]) + subprocess.check_call(['/usr/bin/apt-get', 'update']) + # We install salt-common as salt-minion would run the salt-minion + # daemon. + charmhelpers.fetch.apt_install('salt-common') + + +def update_machine_state(state_path): + """Update the machine state using the provided state declaration.""" + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + salt_grains_path) + subprocess.check_call([ + 'salt-call', + '--local', + 'state.template', + state_path, + ]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ssl/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ssl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d238b529e44ad2761d86e96b4845589f8e951c4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ssl/__init__.py @@ -0,0 +1,92 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from charmhelpers.core import hookenv + + +def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None): + """Generate selfsigned SSL keypair + + You must provide one of the 3 optional arguments: + config, subject or cn + If more than one is provided the leftmost will be used + + Arguments: + keyfile -- (required) full path to the keyfile to be created + certfile -- (required) full path to the certfile to be created + keysize -- (optional) SSL key length + config -- (optional) openssl configuration file + subject -- (optional) dictionary with SSL subject variables + cn -- (optional) cerfificate common name + + Required keys in subject dict: + cn -- Common name (eq. FQDN) + + Optional keys in subject dict + country -- Country Name (2 letter code) + state -- State or Province Name (full name) + locality -- Locality Name (eg, city) + organization -- Organization Name (eg, company) + organizational_unit -- Organizational Unit Name (eg, section) + email -- Email Address + """ + + cmd = [] + if config: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-config", config] + elif subject: + ssl_subject = "" + if "country" in subject: + ssl_subject = ssl_subject + "/C={}".format(subject["country"]) + if "state" in subject: + ssl_subject = ssl_subject + "/ST={}".format(subject["state"]) + if "locality" in subject: + ssl_subject = ssl_subject + "/L={}".format(subject["locality"]) + if "organization" in subject: + ssl_subject = ssl_subject + "/O={}".format(subject["organization"]) + if "organizational_unit" in subject: + ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"]) + if "cn" in subject: + ssl_subject = ssl_subject + "/CN={}".format(subject["cn"]) + else: + hookenv.log("When using \"subject\" argument you must " + "provide \"cn\" field at very least") + return False + if "email" in subject: + ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"]) + + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", ssl_subject] + elif cn: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", "/CN={}".format(cn)] + + if not cmd: + hookenv.log("No config, subject or cn provided," + "unable to generate self signed SSL certificates") + return False + try: + subprocess.check_call(cmd) + return True + except Exception as e: + print("Execution of openssl command failed:\n{}".format(e)) + return False diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ssl/service.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ssl/service.py new file mode 100644 index 0000000000000000000000000000000000000000..06b534ffa3b0de97a56bac4060c39a9a4485b0c2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/ssl/service.py @@ -0,0 +1,277 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from os.path import join as path_join +from os.path import exists +import subprocess + +from charmhelpers.core.hookenv import log, DEBUG + +STD_CERT = "standard" + +# Mysql server is fairly picky about cert creation +# and types, spec its creation separately for now. +MYSQL_CERT = "mysql" + + +class ServiceCA(object): + + default_expiry = str(365 * 2) + default_ca_expiry = str(365 * 6) + + def __init__(self, name, ca_dir, cert_type=STD_CERT): + self.name = name + self.ca_dir = ca_dir + self.cert_type = cert_type + + ############### + # Hook Helper API + @staticmethod + def get_ca(type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca') + ca = ServiceCA(service_name, ca_path, type) + ca.init() + return ca + + @classmethod + def get_service_cert(cls, type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca = cls.get_ca() + crt, key = ca.get_or_create_cert(service_name) + return crt, key, ca.get_ca_bundle() + + ############### + + def init(self): + log("initializing service ca", level=DEBUG) + if not exists(self.ca_dir): + self._init_ca_dir(self.ca_dir) + self._init_ca() + + @property + def ca_key(self): + return path_join(self.ca_dir, 'private', 'cacert.key') + + @property + def ca_cert(self): + return path_join(self.ca_dir, 'cacert.pem') + + @property + def ca_conf(self): + return path_join(self.ca_dir, 'ca.cnf') + + @property + def signing_conf(self): + return path_join(self.ca_dir, 'signing.cnf') + + def _init_ca_dir(self, ca_dir): + os.mkdir(ca_dir) + for i in ['certs', 'crl', 'newcerts', 'private']: + sd = path_join(ca_dir, i) + if not exists(sd): + os.mkdir(sd) + + if not exists(path_join(ca_dir, 'serial')): + with open(path_join(ca_dir, 'serial'), 'w') as fh: + fh.write('02\n') + + if not exists(path_join(ca_dir, 'index.txt')): + with open(path_join(ca_dir, 'index.txt'), 'w') as fh: + fh.write('') + + def _init_ca(self): + """Generate the root ca's cert and key. + """ + if not exists(path_join(self.ca_dir, 'ca.cnf')): + with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh: + fh.write( + CA_CONF_TEMPLATE % (self.get_conf_variables())) + + if not exists(path_join(self.ca_dir, 'signing.cnf')): + with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh: + fh.write( + SIGNING_CONF_TEMPLATE % (self.get_conf_variables())) + + if exists(self.ca_cert) or exists(self.ca_key): + raise RuntimeError("Initialized called when CA already exists") + cmd = ['openssl', 'req', '-config', self.ca_conf, + '-x509', '-nodes', '-newkey', 'rsa', + '-days', self.default_ca_expiry, + '-keyout', self.ca_key, '-out', self.ca_cert, + '-outform', 'PEM'] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + log("CA Init:\n %s" % output, level=DEBUG) + + def get_conf_variables(self): + return dict( + org_name="juju", + org_unit_name="%s service" % self.name, + common_name=self.name, + ca_dir=self.ca_dir) + + def get_or_create_cert(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + return self.create_certificate(common_name) + + def create_certificate(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name) + self._create_certificate(common_name, key_p, csr_p, crt_p) + return self.get_certificate(common_name) + + def get_certificate(self, common_name): + if common_name not in self: + raise ValueError("No certificate for %s" % common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + with open(crt_p) as fh: + crt = fh.read() + with open(key_p) as fh: + key = fh.read() + return crt, key + + def __contains__(self, common_name): + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + return exists(crt_p) + + def _create_certificate(self, common_name, key_p, csr_p, crt_p): + template_vars = self.get_conf_variables() + template_vars['common_name'] = common_name + subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % ( + template_vars) + + log("CA Create Cert %s" % common_name, level=DEBUG) + cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048', + '-nodes', '-days', self.default_expiry, + '-keyout', key_p, '-out', csr_p, '-subj', subj] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + log("CA Sign Cert %s" % common_name, level=DEBUG) + if self.cert_type == MYSQL_CERT: + cmd = ['openssl', 'x509', '-req', + '-in', csr_p, '-days', self.default_expiry, + '-CA', self.ca_cert, '-CAkey', self.ca_key, + '-set_serial', '01', '-out', crt_p] + else: + cmd = ['openssl', 'ca', '-config', self.signing_conf, + '-extensions', 'req_extensions', + '-days', self.default_expiry, '-notext', + '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch'] + log("running %s" % " ".join(cmd), level=DEBUG) + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + def get_ca_bundle(self): + with open(self.ca_cert) as fh: + return fh.read() + + +CA_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s Certificate Authority + + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign +""" + + +SIGNING_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s machine resources +commonName = %(common_name)s + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +""" diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/bcache.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/bcache.py new file mode 100644 index 0000000000000000000000000000000000000000..605991e16a4238f4ed46fbf0791048187f375c5c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/bcache.py @@ -0,0 +1,74 @@ +# Copyright 2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import json + +from charmhelpers.core.hookenv import log + +stats_intervals = ['stats_day', 'stats_five_minute', + 'stats_hour', 'stats_total'] + +SYSFS = '/sys' + + +class Bcache(object): + """Bcache behaviour + """ + + def __init__(self, cachepath): + self.cachepath = cachepath + + @classmethod + def fromdevice(cls, devname): + return cls('{}/block/{}/bcache'.format(SYSFS, devname)) + + def __str__(self): + return self.cachepath + + def get_stats(self, interval): + """Get cache stats + """ + intervaldir = 'stats_{}'.format(interval) + path = "{}/{}".format(self.cachepath, intervaldir) + out = dict() + for elem in os.listdir(path): + out[elem] = open('{}/{}'.format(path, elem)).read().strip() + return out + + +def get_bcache_fs(): + """Return all cache sets + """ + cachesetroot = "{}/fs/bcache".format(SYSFS) + try: + dirs = os.listdir(cachesetroot) + except OSError: + log("No bcache fs found") + return [] + cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) + return cacheset + + +def get_stats_action(cachespec, interval): + """Action for getting bcache statistics for a given cachespec. + Cachespec can either be a device name, eg. 'sdb', which will retrieve + cache stats for the given device, or 'global', which will retrieve stats + for all cachesets + """ + if cachespec == 'global': + caches = get_bcache_fs() + else: + caches = [Bcache.fromdevice(cachespec)] + res = dict((c.cachepath, c.get_stats(interval)) for c in caches) + return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/ceph.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 0000000000000000000000000000000000000000..814d5c72bc246c9536d3bb5975ade1c7fbe989e4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,1810 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import collections +import errno +import hashlib +import math +import six + +import os +import shutil +import json +import time +import uuid + +from subprocess import ( + check_call, + check_output, + CalledProcessError, +) +from charmhelpers.core.hookenv import ( + config, + service_name, + local_unit, + relation_get, + relation_ids, + relation_set, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, + cmp_pkgrevno, +) +from charmhelpers.fetch import ( + apt_install, +) +from charmhelpers.core.unitdata import kv + +from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} +""" + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 + + +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + +class OSDSettingConflict(Exception): + """Error class for conflicting osd setting requests.""" + pass + + +class OSDSettingNotAllowed(Exception): + """Error class for a disallowed setting.""" + pass + + +OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) + +OSD_SETTING_WHITELIST = [ + 'osd heartbeat grace', + 'osd heartbeat interval', +] + + +def _order_dict_by_key(rdict): + """Convert a dictionary into an OrderedDict sorted by key. + + :param rdict: Dictionary to be ordered. + :type rdict: dict + :returns: Ordered Dictionary. + :rtype: collections.OrderedDict + """ + return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) + + +def get_osd_settings(relation_name): + """Consolidate requested osd settings from all clients. + + Consolidate requested osd settings from all clients. Check that the + requested setting is on the whitelist and it does not conflict with + any other requested settings. + + :returns: Dictionary of settings + :rtype: dict + + :raises: OSDSettingNotAllowed + :raises: OSDSettingConflict + """ + rel_ids = relation_ids(relation_name) + osd_settings = {} + for relid in rel_ids: + for unit in related_units(relid): + unit_settings = relation_get('osd-settings', unit, relid) or '{}' + unit_settings = json.loads(unit_settings) + for key, value in unit_settings.items(): + if key not in OSD_SETTING_WHITELIST: + msg = 'Illegal settings "{}"'.format(key) + raise OSDSettingNotAllowed(msg) + if key in osd_settings: + if osd_settings[key] != unit_settings[key]: + msg = 'Conflicting settings for "{}"'.format(key) + raise OSDSettingConflict(msg) + else: + osd_settings[key] = value + return _order_dict_by_key(osd_settings) + + +def send_osd_settings(): + """Pass on requested OSD settings to osd units.""" + try: + settings = get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + # There is a problem with the settings, not passing them on. Update + # status will notify the user. + log(e, level=ERROR) + return + data = { + 'osd-settings': json.dumps(settings, sort_keys=True)} + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + relation_settings=data) + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if isinstance(value, six.string_types): + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(self.service, cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if cmp_pkgrevno('ceph-common', '10.1') >= 0: + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) + # Flush the cache and wait for it to return + check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. + :return: int. The number of pgs to use. + """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. + validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count + osd_list = get_osds(self.service, device_class) + expected = config('expected-osd-count') or 0 + + if osd_list: + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if not device_class and expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2 ** exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) + else: + return int(nearest) + + +class ReplicatedPool(Pool): + def __init__(self, service, name, pg_num=None, replicas=2, + percent_data=10.0, app_name=None): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + self.percent_data = percent_data + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) + else: + self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' + + def create(self): + if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + + try: + check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name), level=WARNING) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e), level=WARNING) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default", + percent_data=10.0, app_name=None): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + + try: + check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name), level=WARNING) + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e), level=WARNING) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}" + .format(str(e))) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]).decode('UTF-8') + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ + try: + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): + """ + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None, + device_class=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :param device_class: six.string_types + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return name in out.split() + + +def get_osds(service, device_class=None): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str + """ + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] + check_call(cmd) + + update_pool(service, name, settings={'size': str(replicas)}) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ + keyring = _keyring_path(service) + if os.path.exists(keyring): + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" + hosts = [] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + add_key(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key + """ + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + add_key(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, api_version=1, request_id=None): + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] + + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None, + object_prefix_permissions=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} + """ + self.add_op({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) + + def add_op_create_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def request_id(self): + return self.rsp.get('request-id') + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request, relation='ceph'): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids(relation): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request, relation='ceph'): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=relation): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids(relation): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) + + +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + +def is_broker_action_done(action, rid=None, unit=None): + """Check whether broker action has completed yet. + + @param action: name of action to be performed + @returns True if action complete otherwise False + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return False + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + val = kvstore.get(key=key) + if val and val == rsp.request_id: + return True + + return False + + +def mark_broker_action_done(action, rid=None, unit=None): + """Mark action as having been completed. + + @param action: name of action to be performed + @returns None + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + kvstore.set(key=key, value=rsp.request_id) + kvstore.flush() + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if not isinstance(conf, dict): + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + return ceph_conf + + +class CephOSDConfContext(CephConfContext): + """Ceph config (ceph.conf) context. + + Consolidates settings from config-flags via CephConfContext with + settings provided by the mons. The config-flag values are preserved in + conf['osd'], settings from the mons which do not clash with config-flag + settings are in conf['osd_from_client'] and finally settings which do + clash are in conf['osd_from_client_conflict']. Rather than silently drop + the conflicting settings they are provided in the context so they can be + rendered commented out to give some visability to the admin. + """ + + def __init__(self, permitted_sections=None): + super(CephOSDConfContext, self).__init__( + permitted_sections=permitted_sections) + try: + self.settings_from_mons = get_osd_settings('mon') + except OSDSettingConflict: + log( + "OSD settings from mons are inconsistent, ignoring them", + level=WARNING) + self.settings_from_mons = {} + + def filter_osd_from_mon_settings(self): + """Filter settings from client relation against config-flags. + + :returns: A tuple ( + ,config-flag values, + ,client settings which do not conflict with config-flag values, + ,client settings which confilct with config-flag values) + :rtype: (OrderedDict, OrderedDict, OrderedDict) + """ + ceph_conf = super(CephOSDConfContext, self).__call__() + conflicting_entries = {} + clear_entries = {} + for key, value in self.settings_from_mons.items(): + if key in ceph_conf.get('osd', {}): + if ceph_conf['osd'][key] != value: + conflicting_entries[key] = value + else: + clear_entries[key] = value + clear_entries = _order_dict_by_key(clear_entries) + conflicting_entries = _order_dict_by_key(conflicting_entries) + return ceph_conf, clear_entries, conflicting_entries + + def __call__(self): + """Construct OSD config context. + + Standard context with two additional special keys. + osd_from_client_conflict: client settings which confilct with + config-flag values + osd_from_client: settings which do not conflict with config-flag + values + + :returns: OSD config context dict. + :rtype: dict + """ + conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() + conf['osd_from_client_conflict'] = osd_conflict + conf['osd_from_client'] = osd_clear + return conf diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/loopback.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 0000000000000000000000000000000000000000..74bab40e43a978e3d9e1e2f9c8975368092145c0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,92 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + or: + + /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) + + +def is_mapped_loopback_device(device): + """ + Checks if a given device name is an existing/mapped loopback device. + :param device: str: Full path to the device (eg, /dev/loop1). + :returns: str: Path to the backing file if is a loopback device + empty string otherwise + """ + return loopback_devices().get(device, "") diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/lvm.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 0000000000000000000000000000000000000000..c8bde69263f0e917d32d0e5d70abba1409b26012 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,182 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/utils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a35617606cf52d7cffc04ac245811b770fd95e8e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,128 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from stat import S_ISBLK + +from subprocess import ( + CalledProcessError, + check_call, + check_output, + call +) + + +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + try: + out = check_output(['lsblk', '-P', device]).decode('UTF-8') + except Exception: + return False + return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False, inode_size=1024): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', "size={}".format(inode_size), device] + check_call(cmd) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/contexts.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/contexts.py new file mode 100644 index 0000000000000000000000000000000000000000..c1adf94b133f41a168727a3cdd3a536633ff62b3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/contexts.py @@ -0,0 +1,137 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +"""A helper to create a yaml cache of config with namespaced relation data.""" +import os +import yaml + +import six + +import charmhelpers.core.hookenv + + +charm_dir = os.environ.get('CHARM_DIR', '') + + +def dict_keys_without_hyphens(a_dict): + """Return the a new dict with underscores instead of hyphens in keys.""" + return dict( + (key.replace('-', '_'), val) for key, val in a_dict.items()) + + +def update_relations(context, namespace_separator=':'): + """Update the context with the relation data.""" + # Add any relation data prefixed with the relation type. + relation_type = charmhelpers.core.hookenv.relation_type() + relations = [] + context['current_relation'] = {} + if relation_type is not None: + relation_data = charmhelpers.core.hookenv.relation_get() + context['current_relation'] = relation_data + # Deprecated: the following use of relation data as keys + # directly in the context will be removed. + relation_data = dict( + ("{relation_type}{namespace_separator}{key}".format( + relation_type=relation_type, + key=key, + namespace_separator=namespace_separator), val) + for key, val in relation_data.items()) + relation_data = dict_keys_without_hyphens(relation_data) + context.update(relation_data) + relations = charmhelpers.core.hookenv.relations_of_type(relation_type) + relations = [dict_keys_without_hyphens(rel) for rel in relations] + + context['relations_full'] = charmhelpers.core.hookenv.relations() + + # the hookenv.relations() data structure is effectively unusable in + # templates and other contexts when trying to access relation data other + # than the current relation. So provide a more useful structure that works + # with any hook. + local_unit = charmhelpers.core.hookenv.local_unit() + relations = {} + for rname, rids in context['relations_full'].items(): + relations[rname] = [] + for rid, rdata in rids.items(): + data = rdata.copy() + if local_unit in rdata: + data.pop(local_unit) + for unit_name, rel_data in data.items(): + new_data = {'__relid__': rid, '__unit__': unit_name} + new_data.update(rel_data) + relations[rname].append(new_data) + context['relations'] = relations + + +def juju_state_to_yaml(yaml_path, namespace_separator=':', + allow_hyphens_in_keys=True, mode=None): + """Update the juju config and state in a yaml file. + + This includes any current relation-get data, and the charm + directory. + + This function was created for the ansible and saltstack + support, as those libraries can use a yaml file to supply + context to templates, but it may be useful generally to + create and update an on-disk cache of all the config, including + previous relation data. + + By default, hyphens are allowed in keys as this is supported + by yaml, but for tools like ansible, hyphens are not valid [1]. + + [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name + """ + config = charmhelpers.core.hookenv.config() + + # Add the charm_dir which we will need to refer to charm + # file resources etc. + config['charm_dir'] = charm_dir + config['local_unit'] = charmhelpers.core.hookenv.local_unit() + config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip() + config['unit_public_address'] = charmhelpers.core.hookenv.unit_get( + 'public-address' + ) + + # Don't use non-standard tags for unicode which will not + # work when salt uses yaml.load_safe. + yaml.add_representer(six.text_type, + lambda dumper, value: dumper.represent_scalar( + six.u('tag:yaml.org,2002:str'), value)) + + yaml_dir = os.path.dirname(yaml_path) + if not os.path.exists(yaml_dir): + os.makedirs(yaml_dir) + + if os.path.exists(yaml_path): + with open(yaml_path, "r") as existing_vars_file: + existing_vars = yaml.load(existing_vars_file.read()) + else: + with open(yaml_path, "w+"): + pass + existing_vars = {} + + if mode is not None: + os.chmod(yaml_path, mode) + + if not allow_hyphens_in_keys: + config = dict_keys_without_hyphens(config) + existing_vars.update(config) + + update_relations(existing_vars, namespace_separator) + + with open(yaml_path, "w+") as fp: + fp.write(yaml.dump(existing_vars, default_flow_style=False)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/jinja.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/jinja.py new file mode 100644 index 0000000000000000000000000000000000000000..38d4fba0e651399064a14112398a0ef43717f5cd --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/jinja.py @@ -0,0 +1,38 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Templating using the python-jinja2 package. +""" +import six +from charmhelpers.fetch import apt_install, apt_update +try: + import jinja2 +except ImportError: + apt_update(fatal=True) + if six.PY3: + apt_install(["python3-jinja2"], fatal=True) + else: + apt_install(["python-jinja2"], fatal=True) + import jinja2 + + +DEFAULT_TEMPLATES_DIR = 'templates' + + +def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/pyformat.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/pyformat.py new file mode 100644 index 0000000000000000000000000000000000000000..51a24dc42379fbee88d8952c578fc2498c852b0b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/templating/pyformat.py @@ -0,0 +1,27 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Templating using standard Python str.format() method. +''' + +from charmhelpers.core import hookenv + + +def render(template, extra={}, **kwargs): + """Return the template rendered using Python's str.format().""" + context = hookenv.execution_environment() + context.update(extra) + context.update(kwargs) + return template.format(**context) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/unison/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/unison/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61409b14c843766aa10cb957bb1c3d83ba87c6a7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/contrib/unison/__init__.py @@ -0,0 +1,314 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Easy file synchronization among peer units using ssh + unison. +# +# For the -joined, -changed, and -departed peer relations, add a call to +# ssh_authorized_peers() describing the peer relation and the desired +# user + group. After all peer relations have settled, all hosts should +# be able to connect to on another via key auth'd ssh as the specified user. +# +# Other hooks are then free to synchronize files and directories using +# sync_to_peers(). +# +# For a peer relation named 'cluster', for example: +# +# cluster-relation-joined: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-changed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-departed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# Hooks are now free to sync files as easily as: +# +# files = ['/etc/fstab', '/etc/apt.conf.d/'] +# sync_to_peers(peer_interface='cluster', +# user='juju_ssh, paths=[files]) +# +# It is assumed the charm itself has setup permissions on each unit +# such that 'juju_ssh' has read + write permissions. Also assumed +# that the calling charm takes care of leader delegation. +# +# Additionally files can be synchronized only to an specific unit: +# sync_to_peer(slave_address, user='juju_ssh', +# paths=[files], verbose=False) + +import os +import pwd + +from copy import copy +from subprocess import check_call, check_output + +from charmhelpers.core.host import ( + adduser, + add_user_to_group, + pwgen, + remove_password_expiry, +) + +from charmhelpers.core.hookenv import ( + log, + hook_name, + relation_ids, + related_units, + relation_set, + relation_get, + unit_private_ip, + INFO, + ERROR, +) + +BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false', + '-fastcheck=true', '-group=false', '-owner=false', + '-prefer=newer', '-times=true'] + + +def get_homedir(user): + try: + user = pwd.getpwnam(user) + return user.pw_dir + except KeyError: + log('Could not get homedir for user %s: user exists?' % (user), ERROR) + raise Exception + + +def create_private_key(user, priv_key_path, key_type='rsa'): + types_bits = { + 'rsa': '2048', + 'ecdsa': '521', + } + if key_type not in types_bits: + log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR) + key_type = 'rsa' + if not os.path.isfile(priv_key_path): + log('Generating new SSH key for user %s.' % user) + cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type, + '-b', types_bits[key_type], '-f', priv_key_path] + check_call(cmd) + else: + log('SSH key already exists at %s.' % priv_key_path) + check_call(['chown', user, priv_key_path]) + check_call(['chmod', '0600', priv_key_path]) + + +def create_public_key(user, priv_key_path, pub_key_path): + if not os.path.isfile(pub_key_path): + log('Generating missing ssh public key @ %s.' % pub_key_path) + cmd = ['ssh-keygen', '-y', '-f', priv_key_path] + p = check_output(cmd).strip() + with open(pub_key_path, 'wb') as out: + out.write(p) + check_call(['chown', user, pub_key_path]) + + +def get_keypair(user): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + priv_key = os.path.join(ssh_dir, 'id_rsa') + pub_key = '%s.pub' % priv_key + + if not os.path.isdir(ssh_dir): + os.mkdir(ssh_dir) + check_call(['chown', '-R', user, ssh_dir]) + + create_private_key(user, priv_key) + create_public_key(user, priv_key, pub_key) + + with open(priv_key, 'r') as p: + _priv = p.read().strip() + + with open(pub_key, 'r') as p: + _pub = p.read().strip() + + return (_priv, _pub) + + +def write_authorized_keys(user, keys): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + auth_keys = os.path.join(ssh_dir, 'authorized_keys') + log('Syncing authorized_keys @ %s.' % auth_keys) + with open(auth_keys, 'w') as out: + for k in keys: + out.write('%s\n' % k) + + +def write_known_hosts(user, hosts): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + known_hosts = os.path.join(ssh_dir, 'known_hosts') + khosts = [] + for host in hosts: + cmd = ['ssh-keyscan', host] + remote_key = check_output(cmd, universal_newlines=True).strip() + khosts.append(remote_key) + log('Syncing known_hosts @ %s.' % known_hosts) + with open(known_hosts, 'w') as out: + for host in khosts: + out.write('%s\n' % host) + + +def ensure_user(user, group=None): + adduser(user, pwgen()) + if group: + add_user_to_group(user, group) + # Remove password expiry (Bug #1686085) + remove_password_expiry(user) + + +def ssh_authorized_peers(peer_interface, user, group=None, + ensure_local_user=False): + """ + Main setup function, should be called from both peer -changed and -joined + hooks with the same parameters. + """ + if ensure_local_user: + ensure_user(user, group) + priv_key, pub_key = get_keypair(user) + hook = hook_name() + if hook == '%s-relation-joined' % peer_interface: + relation_set(ssh_pub_key=pub_key) + elif hook == '%s-relation-changed' % peer_interface or \ + hook == '%s-relation-departed' % peer_interface: + hosts = [] + keys = [] + + for r_id in relation_ids(peer_interface): + for unit in related_units(r_id): + ssh_pub_key = relation_get('ssh_pub_key', + rid=r_id, + unit=unit) + priv_addr = relation_get('private-address', + rid=r_id, + unit=unit) + if ssh_pub_key: + keys.append(ssh_pub_key) + hosts.append(priv_addr) + else: + log('ssh_authorized_peers(): ssh_pub_key ' + 'missing for unit %s, skipping.' % unit) + write_authorized_keys(user, keys) + write_known_hosts(user, hosts) + authed_hosts = ':'.join(hosts) + relation_set(ssh_authorized_hosts=authed_hosts) + + +def _run_as_user(user, gid=None): + try: + user = pwd.getpwnam(user) + except KeyError: + log('Invalid user: %s' % user) + raise Exception + uid = user.pw_uid + gid = gid or user.pw_gid + os.environ['HOME'] = user.pw_dir + + def _inner(): + os.setgid(gid) + os.setuid(uid) + return _inner + + +def run_as_user(user, cmd, gid=None): + return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/') + + +def collect_authed_hosts(peer_interface): + '''Iterate through the units on peer interface to find all that + have the calling host in its authorized hosts list''' + hosts = [] + for r_id in (relation_ids(peer_interface) or []): + for unit in related_units(r_id): + private_addr = relation_get('private-address', + rid=r_id, unit=unit) + authed_hosts = relation_get('ssh_authorized_hosts', + rid=r_id, unit=unit) + + if not authed_hosts: + log('Peer %s has not authorized *any* hosts yet, skipping.' % + (unit), level=INFO) + continue + + if unit_private_ip() in authed_hosts.split(':'): + hosts.append(private_addr) + else: + log('Peer %s has not authorized *this* host yet, skipping.' % + (unit), level=INFO) + return hosts + + +def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync path to an specific peer host + + Propagates exception if operation fails and fatal=True. + """ + cmd = cmd or copy(BASE_CMD) + if not verbose: + cmd.append('-silent') + + # removing trailing slash from directory paths, unison + # doesn't like these. + if path.endswith('/'): + path = path[:(len(path) - 1)] + + cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] + + try: + log('Syncing local path %s to %s@%s:%s' % (path, user, host, path)) + run_as_user(user, cmd, gid) + except Exception: + log('Error syncing remote files') + if fatal: + raise + + +def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync paths to an specific peer host + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for p in paths: + sync_path_to_host(p, host, user, verbose, cmd, gid, fatal) + + +def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None, + gid=None, fatal=False): + """Sync all hosts to an specific path + + The type of group is integer, it allows user has permissions to + operate a directory have a different group id with the user id. + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for host in collect_authed_hosts(peer_interface): + sync_to_peer(host, user, paths, verbose, cmd, gid, fatal) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/coordinator.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/coordinator.py new file mode 100644 index 0000000000000000000000000000000000000000..59bee3e5b320f6bffa0d89eef7a20888cddb0521 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/coordinator.py @@ -0,0 +1,606 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +The coordinator module allows you to use Juju's leadership feature to +coordinate operations between units of a service. + +Behavior is defined in subclasses of coordinator.BaseCoordinator. +One implementation is provided (coordinator.Serial), which allows an +operation to be run on a single unit at a time, on a first come, first +served basis. You can trivially define more complex behavior by +subclassing BaseCoordinator or Serial. + +:author: Stuart Bishop + + +Services Framework Usage +======================== + +Ensure a peers relation is defined in metadata.yaml. Instantiate a +BaseCoordinator subclass before invoking ServiceManager.manage(). +Ensure that ServiceManager.manage() is wired up to the leader-elected, +leader-settings-changed, peers relation-changed and peers +relation-departed hooks in addition to any other hooks you need, or your +service will deadlock. + +Ensure calls to acquire() are guarded, so that locks are only requested +when they are really needed (and thus hooks only triggered when necessary). +Failing to do this and calling acquire() unconditionally will put your unit +into a hook loop. Calls to granted() do not need to be guarded. + +For example:: + + from charmhelpers.core import hookenv, services + from charmhelpers import coordinator + + def maybe_restart(servicename): + serial = coordinator.Serial() + if needs_restart(): + serial.acquire('restart') + if serial.granted('restart'): + hookenv.service_restart(servicename) + + services = [dict(service='servicename', + data_ready=[maybe_restart])] + + if __name__ == '__main__': + _ = coordinator.Serial() # Must instantiate before manager.manage() + manager = services.ServiceManager(services) + manager.manage() + + +You can implement a similar pattern using a decorator. If the lock has +not been granted, an attempt to acquire() it will be made if the guard +function returns True. If the lock has been granted, the decorated function +is run as normal:: + + from charmhelpers.core import hookenv, services + from charmhelpers import coordinator + + serial = coordinator.Serial() # Global, instatiated on module import. + + def needs_restart(): + [ ... Introspect state. Return True if restart is needed ... ] + + @serial.require('restart', needs_restart) + def maybe_restart(servicename): + hookenv.service_restart(servicename) + + services = [dict(service='servicename', + data_ready=[maybe_restart])] + + if __name__ == '__main__': + manager = services.ServiceManager(services) + manager.manage() + + +Traditional Usage +================= + +Ensure a peers relation is defined in metadata.yaml. + +If you are using charmhelpers.core.hookenv.Hooks, ensure that a +BaseCoordinator subclass is instantiated before calling Hooks.execute. + +If you are not using charmhelpers.core.hookenv.Hooks, ensure +that a BaseCoordinator subclass is instantiated and its handle() +method called at the start of all your hooks. + +For example:: + + import sys + from charmhelpers.core import hookenv + from charmhelpers import coordinator + + hooks = hookenv.Hooks() + + def maybe_restart(): + serial = coordinator.Serial() + if serial.granted('restart'): + hookenv.service_restart('myservice') + + @hooks.hook + def config_changed(): + update_config() + serial = coordinator.Serial() + if needs_restart(): + serial.acquire('restart'): + maybe_restart() + + # Cluster hooks must be wired up. + @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') + def cluster_relation_changed(): + maybe_restart() + + # Leader hooks must be wired up. + @hooks.hook('leader-elected', 'leader-settings-changed') + def leader_settings_changed(): + maybe_restart() + + [ ... repeat for *all* other hooks you are using ... ] + + if __name__ == '__main__': + _ = coordinator.Serial() # Must instantiate before execute() + hooks.execute(sys.argv) + + +You can also use the require decorator. If the lock has not been granted, +an attempt to acquire() it will be made if the guard function returns True. +If the lock has been granted, the decorated function is run as normal:: + + from charmhelpers.core import hookenv + + hooks = hookenv.Hooks() + serial = coordinator.Serial() # Must instantiate before execute() + + @require('restart', needs_restart) + def maybe_restart(): + hookenv.service_restart('myservice') + + @hooks.hook('install', 'config-changed', 'upgrade-charm', + # Peers and leader hooks must be wired up. + 'cluster-relation-changed', 'cluster-relation-departed', + 'leader-elected', 'leader-settings-changed') + def default_hook(): + [...] + maybe_restart() + + if __name__ == '__main__': + hooks.execute() + + +Details +======= + +A simple API is provided similar to traditional locking APIs. A lock +may be requested using the acquire() method, and the granted() method +may be used do to check if a lock previously requested by acquire() has +been granted. It doesn't matter how many times acquire() is called in a +hook. + +Locks are released at the end of the hook they are acquired in. This may +be the current hook if the unit is leader and the lock is free. It is +more likely a future hook (probably leader-settings-changed, possibly +the peers relation-changed or departed hook, potentially any hook). + +Whenever a charm needs to perform a coordinated action it will acquire() +the lock and perform the action immediately if acquisition is +successful. It will also need to perform the same action in every other +hook if the lock has been granted. + + +Grubby Details +-------------- + +Why do you need to be able to perform the same action in every hook? +If the unit is the leader, then it may be able to grant its own lock +and perform the action immediately in the source hook. If the unit is +the leader and cannot immediately grant the lock, then its only +guaranteed chance of acquiring the lock is in the peers relation-joined, +relation-changed or peers relation-departed hooks when another unit has +released it (the only channel to communicate to the leader is the peers +relation). If the unit is not the leader, then it is unlikely the lock +is granted in the source hook (a previous hook must have also made the +request for this to happen). A non-leader is notified about the lock via +leader settings. These changes may be visible in any hook, even before +the leader-settings-changed hook has been invoked. Or the requesting +unit may be promoted to leader after making a request, in which case the +lock may be granted in leader-elected or in a future peers +relation-changed or relation-departed hook. + +This could be simpler if leader-settings-changed was invoked on the +leader. We could then never grant locks except in +leader-settings-changed hooks giving one place for the operation to be +performed. Unfortunately this is not the case with Juju 1.23 leadership. + +But of course, this doesn't really matter to most people as most people +seem to prefer the Services Framework or similar reset-the-world +approaches, rather than the twisty maze of attempting to deduce what +should be done based on what hook happens to be running (which always +seems to evolve into reset-the-world anyway when the charm grows beyond +the trivial). + +I chose not to implement a callback model, where a callback was passed +to acquire to be executed when the lock is granted, because the callback +may become invalid between making the request and the lock being granted +due to an upgrade-charm being run in the interim. And it would create +restrictions, such no lambdas, callback defined at the top level of a +module, etc. Still, we could implement it on top of what is here, eg. +by adding a defer decorator that stores a pickle of itself to disk and +have BaseCoordinator unpickle and execute them when the locks are granted. +''' +from datetime import datetime +from functools import wraps +import json +import os.path + +from six import with_metaclass + +from charmhelpers.core import hookenv + + +# We make BaseCoordinator and subclasses singletons, so that if we +# need to spill to local storage then only a single instance does so, +# rather than having multiple instances stomp over each other. +class Singleton(type): + _instances = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, + **kwargs) + return cls._instances[cls] + + +class BaseCoordinator(with_metaclass(Singleton, object)): + relid = None # Peer relation-id, set by __init__ + relname = None + + grants = None # self.grants[unit][lock] == timestamp + requests = None # self.requests[unit][lock] == timestamp + + def __init__(self, relation_key='coordinator', peer_relation_name=None): + '''Instatiate a Coordinator. + + Data is stored on the peers relation and in leadership storage + under the provided relation_key. + + The peers relation is identified by peer_relation_name, and defaults + to the first one found in metadata.yaml. + ''' + # Most initialization is deferred, since invoking hook tools from + # the constructor makes testing hard. + self.key = relation_key + self.relname = peer_relation_name + hookenv.atstart(self.initialize) + + # Ensure that handle() is called, without placing that burden on + # the charm author. They still need to do this manually if they + # are not using a hook framework. + hookenv.atstart(self.handle) + + def initialize(self): + if self.requests is not None: + return # Already initialized. + + assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+' + + if self.relname is None: + self.relname = _implicit_peer_relation_name() + + relids = hookenv.relation_ids(self.relname) + if relids: + self.relid = sorted(relids)[0] + + # Load our state, from leadership, the peer relationship, and maybe + # local state as a fallback. Populates self.requests and self.grants. + self._load_state() + self._emit_state() + + # Save our state if the hook completes successfully. + hookenv.atexit(self._save_state) + + # Schedule release of granted locks for the end of the hook. + # This needs to be the last of our atexit callbacks to ensure + # it will be run first when the hook is complete, because there + # is no point mutating our state after it has been saved. + hookenv.atexit(self._release_granted) + + def acquire(self, lock): + '''Acquire the named lock, non-blocking. + + The lock may be granted immediately, or in a future hook. + + Returns True if the lock has been granted. The lock will be + automatically released at the end of the hook in which it is + granted. + + Do not mindlessly call this method, as it triggers a cascade of + hooks. For example, if you call acquire() every time in your + peers relation-changed hook you will end up with an infinite loop + of hooks. It should almost always be guarded by some condition. + ''' + unit = hookenv.local_unit() + ts = self.requests[unit].get(lock) + if not ts: + # If there is no outstanding request on the peers relation, + # create one. + self.requests.setdefault(lock, {}) + self.requests[unit][lock] = _timestamp() + self.msg('Requested {}'.format(lock)) + + # If the leader has granted the lock, yay. + if self.granted(lock): + self.msg('Acquired {}'.format(lock)) + return True + + # If the unit making the request also happens to be the + # leader, it must handle the request now. Even though the + # request has been stored on the peers relation, the peers + # relation-changed hook will not be triggered. + if hookenv.is_leader(): + return self.grant(lock, unit) + + return False # Can't acquire lock, yet. Maybe next hook. + + def granted(self, lock): + '''Return True if a previously requested lock has been granted''' + unit = hookenv.local_unit() + ts = self.requests[unit].get(lock) + if ts and self.grants.get(unit, {}).get(lock) == ts: + return True + return False + + def requested(self, lock): + '''Return True if we are in the queue for the lock''' + return lock in self.requests[hookenv.local_unit()] + + def request_timestamp(self, lock): + '''Return the timestamp of our outstanding request for lock, or None. + + Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute. + ''' + ts = self.requests[hookenv.local_unit()].get(lock, None) + if ts is not None: + return datetime.strptime(ts, _timestamp_format) + + def handle(self): + if not hookenv.is_leader(): + return # Only the leader can grant requests. + + self.msg('Leader handling coordinator requests') + + # Clear our grants that have been released. + for unit in self.grants.keys(): + for lock, grant_ts in list(self.grants[unit].items()): + req_ts = self.requests.get(unit, {}).get(lock) + if req_ts != grant_ts: + # The request timestamp does not match the granted + # timestamp. Several hooks on 'unit' may have run + # before the leader got a chance to make a decision, + # and 'unit' may have released its lock and attempted + # to reacquire it. This will change the timestamp, + # and we correctly revoke the old grant putting it + # to the end of the queue. + ts = datetime.strptime(self.grants[unit][lock], + _timestamp_format) + del self.grants[unit][lock] + self.released(unit, lock, ts) + + # Grant locks + for unit in self.requests.keys(): + for lock in self.requests[unit]: + self.grant(lock, unit) + + def grant(self, lock, unit): + '''Maybe grant the lock to a unit. + + The decision to grant the lock or not is made for $lock + by a corresponding method grant_$lock, which you may define + in a subclass. If no such method is defined, the default_grant + method is used. See Serial.default_grant() for details. + ''' + if not hookenv.is_leader(): + return False # Not the leader, so we cannot grant. + + # Set of units already granted the lock. + granted = set() + for u in self.grants: + if lock in self.grants[u]: + granted.add(u) + if unit in granted: + return True # Already granted. + + # Ordered list of units waiting for the lock. + reqs = set() + for u in self.requests: + if u in granted: + continue # In the granted set. Not wanted in the req list. + for _lock, ts in self.requests[u].items(): + if _lock == lock: + reqs.add((ts, u)) + queue = [t[1] for t in sorted(reqs)] + if unit not in queue: + return False # Unit has not requested the lock. + + # Locate custom logic, or fallback to the default. + grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant) + + if grant_func(lock, unit, granted, queue): + # Grant the lock. + self.msg('Leader grants {} to {}'.format(lock, unit)) + self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock] + return True + + return False + + def released(self, unit, lock, timestamp): + '''Called on the leader when it has released a lock. + + By default, does nothing but log messages. Override if you + need to perform additional housekeeping when a lock is released, + for example recording timestamps. + ''' + interval = _utcnow() - timestamp + self.msg('Leader released {} from {}, held {}'.format(lock, unit, + interval)) + + def require(self, lock, guard_func, *guard_args, **guard_kw): + """Decorate a function to be run only when a lock is acquired. + + The lock is requested if the guard function returns True. + + The decorated function is called if the lock has been granted. + """ + def decorator(f): + @wraps(f) + def wrapper(*args, **kw): + if self.granted(lock): + self.msg('Granted {}'.format(lock)) + return f(*args, **kw) + if guard_func(*guard_args, **guard_kw) and self.acquire(lock): + return f(*args, **kw) + return None + return wrapper + return decorator + + def msg(self, msg): + '''Emit a message. Override to customize log spam.''' + hookenv.log('coordinator.{} {}'.format(self._name(), msg), + level=hookenv.INFO) + + def _name(self): + return self.__class__.__name__ + + def _load_state(self): + self.msg('Loading state') + + # All responses must be stored in the leadership settings. + # The leader cannot use local state, as a different unit may + # be leader next time. Which is fine, as the leadership + # settings are always available. + self.grants = json.loads(hookenv.leader_get(self.key) or '{}') + + local_unit = hookenv.local_unit() + + # All requests must be stored on the peers relation. This is + # the only channel units have to communicate with the leader. + # Even the leader needs to store its requests here, as a + # different unit may be leader by the time the request can be + # granted. + if self.relid is None: + # The peers relation is not available. Maybe we are early in + # the units's lifecycle. Maybe this unit is standalone. + # Fallback to using local state. + self.msg('No peer relation. Loading local state') + self.requests = {local_unit: self._load_local_state()} + else: + self.requests = self._load_peer_state() + if local_unit not in self.requests: + # The peers relation has just been joined. Update any state + # loaded from our peers with our local state. + self.msg('New peer relation. Merging local state') + self.requests[local_unit] = self._load_local_state() + + def _emit_state(self): + # Emit this units lock status. + for lock in sorted(self.requests[hookenv.local_unit()].keys()): + if self.granted(lock): + self.msg('Granted {}'.format(lock)) + else: + self.msg('Waiting on {}'.format(lock)) + + def _save_state(self): + self.msg('Publishing state') + if hookenv.is_leader(): + # sort_keys to ensure stability. + raw = json.dumps(self.grants, sort_keys=True) + hookenv.leader_set({self.key: raw}) + + local_unit = hookenv.local_unit() + + if self.relid is None: + # No peers relation yet. Fallback to local state. + self.msg('No peer relation. Saving local state') + self._save_local_state(self.requests[local_unit]) + else: + # sort_keys to ensure stability. + raw = json.dumps(self.requests[local_unit], sort_keys=True) + hookenv.relation_set(self.relid, relation_settings={self.key: raw}) + + def _load_peer_state(self): + requests = {} + units = set(hookenv.related_units(self.relid)) + units.add(hookenv.local_unit()) + for unit in units: + raw = hookenv.relation_get(self.key, unit, self.relid) + if raw: + requests[unit] = json.loads(raw) + return requests + + def _local_state_filename(self): + # Include the class name. We allow multiple BaseCoordinator + # subclasses to be instantiated, and they are singletons, so + # this avoids conflicts (unless someone creates and uses two + # BaseCoordinator subclasses with the same class name, so don't + # do that). + return '.charmhelpers.coordinator.{}'.format(self._name()) + + def _load_local_state(self): + fn = self._local_state_filename() + if os.path.exists(fn): + with open(fn, 'r') as f: + return json.load(f) + return {} + + def _save_local_state(self, state): + fn = self._local_state_filename() + with open(fn, 'w') as f: + json.dump(state, f) + + def _release_granted(self): + # At the end of every hook, release all locks granted to + # this unit. If a hook neglects to make use of what it + # requested, it will just have to make the request again. + # Implicit release is the only way this will work, as + # if the unit is standalone there may be no future triggers + # called to do a manual release. + unit = hookenv.local_unit() + for lock in list(self.requests[unit].keys()): + if self.granted(lock): + self.msg('Released local {} lock'.format(lock)) + del self.requests[unit][lock] + + +class Serial(BaseCoordinator): + def default_grant(self, lock, unit, granted, queue): + '''Default logic to grant a lock to a unit. Unless overridden, + only one unit may hold the lock and it will be granted to the + earliest queued request. + + To define custom logic for $lock, create a subclass and + define a grant_$lock method. + + `unit` is the unit name making the request. + + `granted` is the set of units already granted the lock. It will + never include `unit`. It may be empty. + + `queue` is the list of units waiting for the lock, ordered by time + of request. It will always include `unit`, but `unit` is not + necessarily first. + + Returns True if the lock should be granted to `unit`. + ''' + return unit == queue[0] and not granted + + +def _implicit_peer_relation_name(): + md = hookenv.metadata() + assert 'peers' in md, 'No peer relations in metadata.yaml' + return sorted(md['peers'].keys())[0] + + +# A human readable, sortable UTC timestamp format. +_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ' + + +def _utcnow(): # pragma: no cover + # This wrapper exists as mocking datetime methods is problematic. + return datetime.utcnow() + + +def _timestamp(): + return _utcnow().strftime(_timestamp_format) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/decorators.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad41ee4121f4c0816935f8b16cd84f972aff22b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/files.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/files.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd82b75709c13da0d534bf4962822984a3c1867 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/fstab.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/fstab.py new file mode 100644 index 0000000000000000000000000000000000000000..d9fa9152c765c538adad3fd9bc45a46018c89b72 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/hookenv.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/hookenv.py new file mode 100644 index 0000000000000000000000000000000000000000..db7ce7282b4c96c8a33abf309a340377216922ec --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/hookenv.py @@ -0,0 +1,1613 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from enum import Enum +from functools import wraps +from collections import namedtuple +import glob +import os +import json +import yaml +import re +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +from charmhelpers import deprecate + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +TRACE = "TRACE" +MARKER = object() +SH_MAX_ARG = 131071 + + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + + +class WORKLOAD_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ + return local_unit().split('/')[0] + + +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + if not md: + continue + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path) and os.stat(self.path).st_size: + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Found but was unable to parse previous config data, ' + 'ignoring which will report all values as changed - {}' + .format(str(e)), level=ERROR) + return + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +_cache_config = None + + +def config(scope=None): + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] + try: + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) + if scope is not None: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _port_op('open-port', port, protocol) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _port_op('close-port', port, protocol) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +class NoNetworkBinding(Exception): + pass + + +def charm_dir(): + """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d + return os.environ.get('CHARM_DIR') + + +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + +@cached +@deprecate("moved to function_get()", log=log) +def action_get(key=None): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) +def action_set(values): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) +def action_fail(message): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. + + The results set by action_set are preserved. + """ + subprocess.check_call(['action-fail', message]) + + +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + +def status_set(workload_state, message, application=False): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message instead. + + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set + """ + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + + cmd = ['status-set'] + if application: + cmd.append('--application') + cmd.extend([workload_state.value, message]) + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state.value, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Deprecated since Juju 2.3; use network_get() + + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response + + +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if request not supported by the Juju version. + """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + return yaml.safe_load(response) + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host.py new file mode 100644 index 0000000000000000000000000000000000000000..b33ac906d9eeb198210dceb069724ac9a35652ea --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host.py @@ -0,0 +1,1104 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log, INFO, DEBUG, local_unit, charm_name +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + CompareHostReleases, + get_distrib_codename, + arch + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + CompareHostReleases, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + service('mask', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('unmask', service_name) + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid, existing_perms = None, None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership or permissions. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile(r'^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output + + +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/centos.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/centos.py new file mode 100644 index 0000000000000000000000000000000000000000..7781a3961f23ce0b161ae08b11710466af8de814 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,72 @@ +import subprocess +import yum +import os + +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 0000000000000000000000000000000000000000..3edc0687275b29762b45ebf3fe1b045bc9f568b2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,116 @@ +import subprocess + +from charmhelpers.core.hookenv import cached +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', + 'artful', + 'bionic', + 'cosmic', + 'disco', + 'eoan', + 'focal' +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + from charmhelpers.fetch import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/hugepage.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/hugepage.py new file mode 100644 index 0000000000000000000000000000000000000000..54b5b5e2fcf81eea5f2ebfbceb620ea68d725584 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..e01f4f8ba73ee0d5ab7553740c2590a50e42f96d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/centos.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 0000000000000000000000000000000000000000..1c402c1157900ff1ad5c6c296a409c9e8fb96d2b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 0000000000000000000000000000000000000000..3de372fd3df38fe151cf79243f129cb504516f22 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61fd074edc09de434859e48ae1b36baef0503708 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/base.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/base.py new file mode 100644 index 0000000000000000000000000000000000000000..179ad4f0c367dd6b13c10b201c3752d1c8daf05e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/base.py @@ -0,0 +1,362 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' + if event_name == 'start': + hookenv.open_port(port, protocol) + elif event_name == 'stop': + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/helpers.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..3e6e30d2fe0d9c73ffdc42d70b77e864b6379c53 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/strutils.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/strutils.py new file mode 100644 index 0000000000000000000000000000000000000000..e8df0452f8203b53947eb137eed22d85ff62dff0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/strutils.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as bytes" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/sysctl.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/sysctl.py new file mode 100644 index 0000000000000000000000000000000000000000..386428d619bc38edf02dc088bf7ec32767c0ab94 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/sysctl.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call, CalledProcessError + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, + WARNING, +) + +from charmhelpers.core.host import is_container + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file, ignore=False): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool + :returns: None + """ + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), + level=DEBUG) + + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/templating.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/templating.py new file mode 100644 index 0000000000000000000000000000000000000000..9014015c14ee0b48c775562cd4f0d30884944439 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/templating.py @@ -0,0 +1,93 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + config_template may be provided to render from a provided template instead + of loading from a file. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/unitdata.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/unitdata.py new file mode 100644 index 0000000000000000000000000000000000000000..ab554327b343f896880523fc627c1abea84be29a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/core/unitdata.py @@ -0,0 +1,525 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except Exception: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0cc7fc850a0632568ad78aae9716be718c9ff6b5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/__init__.py @@ -0,0 +1,209 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +from charmhelpers.osplatform import get_platform +from yaml import safe_load +from charmhelpers.core.hookenv import ( + config, + log, +) + +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', +) + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) + +filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge +add_source = fetch.add_source + +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold + import_key = fetch.import_key + get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env +elif __platform__ == "centos": + yum_search = fetch.yum_search + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The fragment needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). + + Example config: + install_sources: | + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: | + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, six.string_types): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) + else: + if isinstance(keys, six.string_types): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) + if update: + _fetch_update(fatal=True) + + +def install_remote(source, *args, **kwargs): + """Install a file tree from a remote source. + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + for handler in handlers: + try: + return handler.install(source, *args, **kwargs) + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') + raise UnhandledSource("No handler found for source {}".format(source)) + + +def install_from_config(config_var_name): + """Install a file from config.""" + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr( + importlib.import_module(package), + classname) + plugin_list.append(handler_class()) + except NotImplementedError: + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) + return plugin_list diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/archiveurl.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/archiveurl.py new file mode 100644 index 0000000000000000000000000000000000000000..d25587adeff102c3fc9e402f98746fccbd8a3693 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,165 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import hashlib +import re + +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs + + +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ + # propagate all exceptions + # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse(source) + if proto in ('http', 'https'): + auth, barehost = splituser(netloc) + if auth is not None: + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) + try: + with open(dest, 'wb') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + options = parse_qs(url_parts.fragment) + for key, value in options.items(): + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/bzrurl.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/bzrurl.py new file mode 100644 index 0000000000000000000000000000000000000000..c4ab3ff1e6bc7dde24e8ed568a3dc0c6012ddea6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,76 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from subprocess import STDOUT, check_output +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource, + filter_installed_packages, + install, +) +from charmhelpers.core.host import mkdir + + +if filter_installed_packages(['bzr']) != []: + install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') + + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs.""" + + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): + return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) + else: + return True + + def branch(self, source, dest, revno=None): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + cmd_opts = [] + if revno: + cmd_opts += ['-r', str(revno)] + if os.path.exists(dest): + cmd = ['bzr', 'pull'] + cmd += cmd_opts + cmd += ['--overwrite', '-d', dest, source] + else: + cmd = ['bzr', 'branch'] + cmd += cmd_opts + cmd += [source, dest] + check_output(cmd, stderr=STDOUT) + + def install(self, source, dest=None, revno=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + + if dest and not os.path.exists(dest): + mkdir(dest, perms=0o755) + + try: + self.branch(source, dest_dir, revno) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/centos.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/centos.py new file mode 100644 index 0000000000000000000000000000000000000000..a91dcff0645ed541a79cd72af3112bdff393719a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file.name]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/giturl.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/giturl.py new file mode 100644 index 0000000000000000000000000000000000000000..070ca9bb5c1a2fdef39f88606ffcaf39bb049410 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/giturl.py @@ -0,0 +1,69 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from subprocess import check_output, CalledProcessError, STDOUT +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource, + filter_installed_packages, + install, +) + +if filter_installed_packages(['git']) != []: + install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs.""" + + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git', ''): + return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) + else: + return True + + def clone(self, source, dest, branch="master", depth=None): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] + else: + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_output(cmd, stderr=STDOUT) + + def install(self, source, branch="master", dest=None, depth=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + try: + self.clone(source, dest_dir, branch, depth) + except CalledProcessError as e: + raise UnhandledSource(e) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bff99dc93c64f80716e2d5a2b6d0d4e8a2436955 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/debug.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..757135ee4cf3b5ff4c02305126f5ca3940892afc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.fetch.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski " + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except Exception: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/packages.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/packages.py new file mode 100644 index 0000000000000000000000000000000000000000..6e95028bc540aace84a2ec6c1bcc4de2663e8a87 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/packages.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import subprocess +import sys + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import charm_dir, log + +__author__ = "Jorge Niedbalski " + + +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if not value: + continue + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, venv=None, + constraints=None, **options): + """Install a python package""" + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] + + available_options = ('proxy', 'src', 'log', 'index-url', ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if constraints: + command.extend(['-c', constraints]) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/rpdb.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/rpdb.py new file mode 100644 index 0000000000000000000000000000000000000000..9b31610c22fc2d24fe5097016cf45728f87de4ae --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin " +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/version.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/version.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb421036ff737f8ff1684e85ff87703e30fe543 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski " + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/snap.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/snap.py new file mode 100644 index 0000000000000000000000000000000000000000..fc70aa941bc4f0bb5ff126237db65705b9e4a10a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/snap.py @@ -0,0 +1,150 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +import os +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg ' + +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] + + +class CouldNotAcquireLockException(Exception): + pass + + +class InvalidSnapChannel(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/ubuntu.py new file mode 100644 index 0000000000000000000000000000000000000000..3ddaf0dd47f23cef60d7b7e59a83c989999d9f3f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,805 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import platform +import re +import six +import subprocess +import sys +import time + +from charmhelpers.core.host import get_distrib_codename, get_system_env + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, + env_proxy_settings, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', + # Ocata + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'xenial-ocata': 'xenial-updates/ocata', + 'xenial-ocata/updates': 'xenial-updates/ocata', + 'xenial-updates/ocata': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/proposed': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', + # Pike + 'pike': 'xenial-updates/pike', + 'xenial-pike': 'xenial-updates/pike', + 'xenial-pike/updates': 'xenial-updates/pike', + 'xenial-updates/pike': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/proposed': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', + # Queens + 'queens': 'xenial-updates/queens', + 'xenial-queens': 'xenial-updates/queens', + 'xenial-queens/updates': 'xenial-updates/queens', + 'xenial-updates/queens': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/proposed': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', +} + + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def apt_update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def import_key(key): + """Import an ASCII Armor key. + + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) + :raises: GPGKeyError if the key could not be imported + """ + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("SECURELY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) + + +def add_source(source, key=None, fail_invalid=False): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds -proposed to the debs [2] + 'ppa:': add-apt-repository --yes + 'deb ': add-apt-repository --yes deb + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:': add-apt-repository -yes cloud-archive: + 'cloud:[-staging]': specify a Cloud Archive pocket with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu + to /etc/apt/sources.list.d/cloud-archive-list + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:, the is not a + valid pocket in CLOUD_ARCHIVE_POCKETS + """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + ]) + if source is None: + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + break + else: + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses get_distrib_codename to determine the correct stanza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = get_distrib_codename() + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + :type spec: str + """ + if '{series}' in spec: + series = get_distrib_codename() + spec = spec.replace('{series}', series) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = get_distrib_codename() + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) + + +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. + + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] + """ + env = get_apt_dpkg_env() + if cmd_env: + env.update(cmd_env) + + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) + + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + """ + if fatal: + _run_with_retries( + cmd, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") + else: + subprocess.call(cmd, env=get_apt_dpkg_env()) + + +def get_upstream_version(package): + """Determine upstream version based on installed package + + @returns None (if not installed) or the upstream version + """ + cache = apt_cache() + try: + pkg = cache[package] + except Exception: + # the package is unknown to the current apt cache. + return None + + if not pkg.current_ver: + # package is known, but no version is currently installed. + return None + + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/ubuntu_apt_pkg.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 0000000000000000000000000000000000000000..929a75d7a775921c32368e8cc7950eaa97390047 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,267 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess +import sys + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/osplatform.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/osplatform.py new file mode 100644 index 0000000000000000000000000000000000000000..78c81af5955caee51271c830d58cacac2cab9bcc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/osplatform.py @@ -0,0 +1,46 @@ +import platform +import os + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee55cb3d2baddb556df910f1d41638c3c7f39c59 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Tools for working with files injected into a charm just before deployment." diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/archive.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/archive.py new file mode 100644 index 0000000000000000000000000000000000000000..7fc453f523cf49f0c123839a347c4452c6d465ca --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/archive.py @@ -0,0 +1,71 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import tarfile +import zipfile +from charmhelpers.core import ( + host, + hookenv, +) + + +class ArchiveError(Exception): + pass + + +def get_archive_handler(archive_name): + if os.path.isfile(archive_name): + if tarfile.is_tarfile(archive_name): + return extract_tarfile + elif zipfile.is_zipfile(archive_name): + return extract_zipfile + else: + # look at the file name + for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'): + if archive_name.endswith(ext): + return extract_tarfile + for ext in ('.zip', '.jar'): + if archive_name.endswith(ext): + return extract_zipfile + + +def archive_dest_default(archive_name): + archive_file = os.path.basename(archive_name) + return os.path.join(hookenv.charm_dir(), "archives", archive_file) + + +def extract(archive_name, destpath=None): + handler = get_archive_handler(archive_name) + if handler: + if not destpath: + destpath = archive_dest_default(archive_name) + if not os.path.isdir(destpath): + host.mkdir(destpath) + handler(archive_name, destpath) + return destpath + else: + raise ArchiveError("No handler for archive") + + +def extract_tarfile(archive_name, destpath): + "Unpack a tar archive, optionally compressed" + archive = tarfile.open(archive_name) + archive.extractall(destpath) + + +def extract_zipfile(archive_name, destpath): + "Unpack a zip file" + archive = zipfile.ZipFile(archive_name) + archive.extractall(destpath) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/execd.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/execd.py new file mode 100644 index 0000000000000000000000000000000000000000..1502aa0b596f0b1a2017ccb4543a35999774431d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charmhelpers/payload/execd.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_output(submodule_path, stderr=stderr, + universal_newlines=True) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/libansible.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/libansible.py new file mode 100644 index 0000000000000000000000000000000000000000..32fd26ae7d63d42edef33982d5438669b191a361 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/libansible.py @@ -0,0 +1,108 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import fnmatch +import os +import yaml +import subprocess +import sys + +sys.path.append("lib") +import charmhelpers.fetch + + +ansible_hosts_path = "/etc/ansible/hosts" + + +def install_ansible_support(from_ppa=True, ppa_location="ppa:ansible/ansible"): + """Installs the ansible package. + + By default it is installed from the `PPA`_ linked from + the ansible `website`_ or from a ppa specified by a charm config.. + + .. _PPA: https://launchpad.net/~rquillo/+archive/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If from_ppa is empty, you must ensure that the package is available + from a configured repository. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install("ansible") + with open(ansible_hosts_path, "w+") as hosts_file: + hosts_file.write("localhost ansible_connection=local") + + +def create_hosts(hostname, username, password, hosts): + inventory_path = "/etc/ansible/hosts" + + with open(inventory_path, "w") as f: + f.write("[{}]\n".format(hosts)) + h1 = "host ansible_host={0} ansible_user={1} ansible_password={2}\n".format( + hostname, username, password + ) + f.write(h1) + + +def create_ansible_cfg(): + ansible_config_path = "/etc/ansible/ansible.cfg" + + with open(ansible_config_path, "w") as f: + f.write("[defaults]\n") + f.write("host_key_checking = False\n") + + +# Function to find the playbook path +def find(pattern, path): + result = "" + for root, dirs, files in os.walk(path): + for name in files: + if fnmatch.fnmatch(name, pattern): + result = os.path.join(root, name) + return result + + +def execute_playbook(playbook_file, hostname, user, password, vars_dict=None): + playbook_path = find(playbook_file, "/var/lib/juju/agents/") + + with open(playbook_path, "r") as f: + playbook_data = yaml.load(f) + + hosts = "all" + if "hosts" in playbook_data[0].keys() and playbook_data[0]["hosts"]: + hosts = playbook_data[0]["hosts"] + + create_ansible_cfg() + create_hosts(hostname, user, password, hosts) + + call = "ansible-playbook {} ".format(playbook_path) + + if vars_dict and isinstance(vars_dict, dict) and len(vars_dict) > 0: + call += "--extra-vars " + + string_var = "" + for k,v in vars_dict.items(): + string_var += "{}={} ".format(k, v) + + string_var = string_var.strip() + call += '"{}"'.format(string_var) + + call = call.strip() + result = subprocess.check_output(call, shell=True) + + return result diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/ns.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/ns.py new file mode 100644 index 0000000000000000000000000000000000000000..25be4056282e48ce946632025be8b466557e3171 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/ns.py @@ -0,0 +1,301 @@ +# A prototype of a library to aid in the development and operation of +# OSM Network Service charms + +import asyncio +import logging +import os +import os.path +import re +import subprocess +import sys +import time +import yaml + +try: + import juju +except ImportError: + # Not all cloud images are created equal + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + + # Install the Python3 package + subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + + # Install the libjuju build dependencies + subprocess.check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "juju"], + ) + +from juju.controller import Controller + +# Quiet the debug logging +logging.getLogger('websockets.protocol').setLevel(logging.INFO) +logging.getLogger('juju.client.connection').setLevel(logging.WARN) +logging.getLogger('juju.model').setLevel(logging.WARN) +logging.getLogger('juju.machine').setLevel(logging.WARN) + + +class NetworkService: + """A lightweight interface to the Juju controller. + + This NetworkService client is specifically designed to allow a higher-level + "NS" charm to interoperate with "VNF" charms, allowing for the execution of + Primitives across other charms within the same model. + """ + endpoint = None + user = 'admin' + secret = None + port = 17070 + loop = None + client = None + model = None + cacert = None + + def __init__(self, user, secret, endpoint=None): + + self.user = user + self.secret = secret + if endpoint is None: + addresses = os.environ['JUJU_API_ADDRESSES'] + for address in addresses.split(' '): + self.endpoint = address + else: + self.endpoint = endpoint + + # Stash the name of the model + self.model = os.environ['JUJU_MODEL_NAME'] + + # Load the ca-cert from agent.conf + AGENT_PATH = os.path.dirname(os.environ['JUJU_CHARM_DIR']) + with open("{}/agent.conf".format(AGENT_PATH), "r") as f: + try: + y = yaml.safe_load(f) + self.cacert = y['cacert'] + except yaml.YAMLError as exc: + print("Unable to find Juju ca-cert.") + raise exc + + # Create our event loop + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + async def connect(self): + """Connect to the Juju controller.""" + controller = Controller() + + print( + "Connecting to controller... ws://{}:{} as {}/{}".format( + self.endpoint, + self.port, + self.user, + self.secret[-4:].rjust(len(self.secret), "*"), + ) + ) + await controller.connect( + endpoint=self.endpoint, + username=self.user, + password=self.secret, + cacert=self.cacert, + ) + + return controller + + def __del__(self): + self.logout() + + async def disconnect(self): + """Disconnect from the Juju controller.""" + if self.client: + print("Disconnecting Juju controller") + await self.client.disconnect() + + def login(self): + """Login to the Juju controller.""" + if not self.client: + # Connect to the Juju API server + self.client = self.loop.run_until_complete(self.connect()) + return self.client + + def logout(self): + """Logout of the Juju controller.""" + + if self.loop: + print("Disconnecting from API") + self.loop.run_until_complete(self.disconnect()) + + def FormatApplicationName(self, *args): + """ + Generate a Juju-compatible Application name + + :param args tuple: Positional arguments to be used to construct the + application name. + + Limitations:: + - Only accepts characters a-z and non-consequitive dashes (-) + - Application name should not exceed 50 characters + + Examples:: + + FormatApplicationName("ping_pong_ns", "ping_vnf", "a") + """ + appname = "" + for c in "-".join(list(args)): + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + appname += c + + return re.sub('-+', '-', appname.lower()) + + def GetApplicationName(self, nsr_name, vnf_name, vnf_member_index): + """Get the runtime application name of a VNF/VDU. + + This will generate an application name matching the name of the deployed charm, + given the right parameters. + + :param nsr_name str: The name of the running Network Service, as specified at instantiation. + :param vnf_name str: The name of the VNF or VDU + :param vnf_member_index: The vnf-member-index as specified in the descriptor + """ + + application_name = self.FormatApplicationName(nsr_name, vnf_member_index, vnf_name) + + # This matches the logic used by the LCM + application_name = application_name[0:48] + vca_index = int(vnf_member_index) - 1 + application_name += '-' + chr(97 + vca_index // 26) + chr(97 + vca_index % 26) + + return application_name + + def ExecutePrimitiveGetOutput(self, application, primitive, params={}, timeout=600): + """Execute a single primitive and return it's output. + + This is a blocking method that will execute a single primitive and wait + for its completion before return it's output. + + :param application str: The application name provided by `GetApplicationName`. + :param primitive str: The name of the primitive to execute. + :param params list: A list of parameters. + :param timeout int: A timeout, in seconds, to wait for the primitive to finish. Defaults to 600 seconds. + """ + uuid = self.ExecutePrimitive(application, primitive, params) + + status = None + output = None + + starttime = time.time() + while(time.time() < starttime + timeout): + status = self.GetPrimitiveStatus(uuid) + if status in ['completed', 'failed']: + break + time.sleep(10) + + # When the primitive is done, get the output + if status in ['completed', 'failed']: + output = self.GetPrimitiveOutput(uuid) + + return output + + def ExecutePrimitive(self, application, primitive, params={}): + """Execute a primitive. + + This is a non-blocking method to execute a primitive. It will return + the UUID of the queued primitive execution, which you can use + for subsequent calls to `GetPrimitiveStatus` and `GetPrimitiveOutput`. + + :param application string: The name of the application + :param primitive string: The name of the Primitive. + :param params list: A list of parameters. + + :returns uuid string: The UUID of the executed Primitive + """ + uuid = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + # Get the application + if application in model.applications: + app = model.applications[application] + + # Execute the primitive + unit = app.units[0] + if unit: + action = self.loop.run_until_complete( + unit.run_action(primitive, **params) + ) + uuid = action.id + print("Executing action: {}".format(uuid)) + self.loop.run_until_complete( + model.disconnect() + ) + else: + # Invalid mapping: application not found. Raise exception + raise Exception("Application not found: {}".format(application)) + + return uuid + + def GetPrimitiveStatus(self, uuid): + """Get the status of a Primitive execution. + + This will return one of the following strings: + - pending + - running + - completed + - failed + + :param uuid string: The UUID of the executed Primitive. + :returns: The status of the executed Primitive + """ + status = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + status = self.loop.run_until_complete( + model.get_action_status(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return status[uuid] + + def GetPrimitiveOutput(self, uuid): + """Get the output of a completed Primitive execution. + + + :param uuid string: The UUID of the executed Primitive. + :returns: The output of the execution, or None if it's still running. + """ + result = None + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + result = self.loop.run_until_complete( + model.get_action_output(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return result diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/proxy_cluster.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/proxy_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..f323a3af88f3011ef9fde794cdfe343be8665abb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/proxy_cluster.py @@ -0,0 +1,59 @@ +import socket + +from ops.framework import Object, StoredState + + +class ProxyCluster(Object): + + state = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self._relation_name = relation_name + self._relation = self.framework.model.get_relation(self._relation_name) + + self.framework.observe(charm.on.ssh_keys_initialized, self.on_ssh_keys_initialized) + + self.state.set_default(ssh_public_key=None) + self.state.set_default(ssh_private_key=None) + + def on_ssh_keys_initialized(self, event): + if not self.framework.model.unit.is_leader(): + raise RuntimeError("The initial unit of a cluster must also be a leader.") + + self.state.ssh_public_key = event.ssh_public_key + self.state.ssh_private_key = event.ssh_private_key + if not self.is_joined: + event.defer() + return + + self._relation.data[self.model.app][ + "ssh_public_key" + ] = self.state.ssh_public_key + self._relation.data[self.model.app][ + "ssh_private_key" + ] = self.state.ssh_private_key + + @property + def is_joined(self): + return self._relation is not None + + @property + def ssh_public_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_public_key") + + @property + def ssh_private_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_private_key") + + @property + def is_cluster_initialized(self): + return ( + True + if self.is_joined + and self._relation.data[self.model.app].get("ssh_public_key") + and self._relation.data[self.model.app].get("ssh_private_key") + else False + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/sshproxy.py b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/sshproxy.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c311e5be8515a7fe19c05dfed9f042ded0576b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/charms/osm/sshproxy.py @@ -0,0 +1,375 @@ +"""Module to help with executing commands over SSH.""" +## +# Copyright 2016 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +# from charmhelpers.core import unitdata +# from charmhelpers.core.hookenv import log + +import io +import ipaddress +import subprocess +import os +import socket +import shlex +import traceback +import sys + +from subprocess import ( + check_call, + Popen, + CalledProcessError, + PIPE, +) + +from ops.charm import CharmBase, CharmEvents +from ops.framework import StoredState, EventBase, EventSource +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess +from .proxy_cluster import ProxyCluster + +import logging + + +logger = logging.getLogger(__name__) + +class SSHKeysInitialized(EventBase): + def __init__(self, handle, ssh_public_key, ssh_private_key): + super().__init__(handle) + self.ssh_public_key = ssh_public_key + self.ssh_private_key = ssh_private_key + + def snapshot(self): + return { + "ssh_public_key": self.ssh_public_key, + "ssh_private_key": self.ssh_private_key, + } + + def restore(self, snapshot): + self.ssh_public_key = snapshot["ssh_public_key"] + self.ssh_private_key = snapshot["ssh_private_key"] + + +class ProxyClusterEvents(CharmEvents): + ssh_keys_initialized = EventSource(SSHKeysInitialized) + + +class SSHProxyCharm(CharmBase): + + state = StoredState() + on = ProxyClusterEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + self.peers = ProxyCluster(self, "proxypeer") + + # SSH Proxy actions (primitives) + self.framework.observe(self.on.generate_ssh_key_action, self.on_generate_ssh_key_action) + self.framework.observe(self.on.get_ssh_public_key_action, self.on_get_ssh_public_key_action) + self.framework.observe(self.on.run_action, self.on_run_action) + self.framework.observe(self.on.verify_ssh_credentials_action, self.on_verify_ssh_credentials_action) + + self.framework.observe(self.on.proxypeer_relation_changed, self.on_proxypeer_relation_changed) + + def get_ssh_proxy(self): + """Get the SSHProxy instance""" + proxy = SSHProxy( + hostname=self.model.config["ssh-hostname"], + username=self.model.config["ssh-username"], + password=self.model.config["ssh-password"], + ) + return proxy + + def on_proxypeer_relation_changed(self, event): + if self.peers.is_cluster_initialized and not SSHProxy.has_ssh_key(): + pubkey = self.peers.ssh_public_key + privkey = self.peers.ssh_private_key + SSHProxy.write_ssh_keys(public=pubkey, private=privkey) + self.verify_credentials() + else: + event.defer() + + def on_config_changed(self, event): + """Handle changes in configuration""" + self.verify_credentials() + + def on_install(self, event): + SSHProxy.install() + + def on_start(self, event): + """Called when the charm is being installed""" + if not self.peers.is_joined: + event.defer() + return + + unit = self.model.unit + + if not SSHProxy.has_ssh_key(): + unit.status = MaintenanceStatus("Generating SSH keys...") + pubkey = None + privkey = None + if self.model.unit.is_leader(): + if self.peers.is_cluster_initialized: + SSHProxy.write_ssh_keys( + public=self.peers.ssh_public_key, + private=self.peers.ssh_private_key, + ) + else: + SSHProxy.generate_ssh_key() + self.on.ssh_keys_initialized.emit( + SSHProxy.get_ssh_public_key(), SSHProxy.get_ssh_private_key() + ) + self.verify_credentials() + + def verify_credentials(self): + unit = self.model.unit + + # Unit should go into a waiting state until verify_ssh_credentials is successful + unit.status = WaitingStatus("Waiting for SSH credentials") + proxy = self.get_ssh_proxy() + verified, _ = proxy.verify_credentials() + if verified: + unit.status = ActiveStatus() + else: + unit.status = BlockedStatus("Invalid SSH credentials.") + return verified + + ##################### + # SSH Proxy methods # + ##################### + def on_generate_ssh_key_action(self, event): + """Generate a new SSH keypair for this unit.""" + if self.model.unit.is_leader(): + if not SSHProxy.generate_ssh_key(): + event.fail("Unable to generate ssh key") + else: + event.fail("Unit is not leader") + return + + def on_get_ssh_public_key_action(self, event): + """Get the SSH public key for this unit.""" + if self.model.unit.is_leader(): + pubkey = SSHProxy.get_ssh_public_key() + event.set_results({"pubkey": SSHProxy.get_ssh_public_key()}) + else: + event.fail("Unit is not leader") + return + + def on_run_action(self, event): + """Run an arbitrary command on the remote host.""" + if self.model.unit.is_leader(): + cmd = event.params["command"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run(cmd) + event.set_results({"output": stdout}) + if len(stderr): + event.fail(stderr) + else: + event.fail("Unit is not leader") + return + + def on_verify_ssh_credentials_action(self, event): + """Verify the SSH credentials for this unit.""" + unit = self.model.unit + if unit.is_leader(): + proxy = self.get_ssh_proxy() + verified, stderr = proxy.verify_credentials() + if verified: + event.set_results({"verified": True}) + unit.status = ActiveStatus() + else: + event.set_results({"verified": False, "stderr": stderr}) + event.fail("Not verified") + unit.status = BlockedStatus("Invalid SSH credentials.") + + else: + event.fail("Unit is not leader") + return + + +class LeadershipError(ModelError): + def __init__(self): + super().__init__("not leader") + +class SSHProxy: + private_key_path = "/root/.ssh/id_sshproxy" + public_key_path = "/root/.ssh/id_sshproxy.pub" + key_type = "rsa" + key_bits = 4096 + + def __init__(self, hostname: str, username: str, password: str = ""): + self.hostname = hostname + self.username = username + self.password = password + + @staticmethod + def install(): + check_call("apt update && apt install -y openssh-client sshpass", shell=True) + + @staticmethod + def generate_ssh_key(): + """Generate a 4096-bit rsa keypair.""" + if not os.path.exists(SSHProxy.private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path, + ) + + try: + check_call(cmd, shell=True) + except CalledProcessError: + return False + + return True + + @staticmethod + def write_ssh_keys(public, private): + """Write a 4096-bit rsa keypair.""" + with open(SSHProxy.public_key_path, "w") as f: + f.write(public) + f.close() + with open(SSHProxy.private_key_path, "w") as f: + f.write(private) + f.close() + + @staticmethod + def get_ssh_public_key(): + publickey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.public_key_path, "r") as f: + publickey = f.read() + return publickey + + @staticmethod + def get_ssh_private_key(): + privatekey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.private_key_path, "r") as f: + privatekey = f.read() + return privatekey + + @staticmethod + def has_ssh_key(): + return True if os.path.exists(SSHProxy.private_key_path) else False + + def run(self, cmd: str) -> (str, str): + """Run a command remotely via SSH. + + Note: The previous behavior was to run the command locally if SSH wasn't + configured, but that can lead to cases where execution succeeds when you'd + expect it not to. + """ + if isinstance(cmd, str): + cmd = shlex.split(cmd) + + host = self._get_hostname() + user = self.username + passwd = self.password + key = self.private_key_path + + # Make sure we have everything we need to connect + if host and user: + return self.ssh(cmd) + + raise Exception("Invalid SSH credentials.") + + def scp(self, source_file, destination_file): + """Execute an scp command. Requires a fully qualified source and + destination. + + :param str source_file: Path to the source file + :param str destination_file: Path to the destination file + :raises: :class:`CalledProcessError` if the command fails + """ + cmd = [ + "sshpass", + "-p", + self.password, + "scp", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + "-B", + ] + destination = "{}@{}:{}".format(self.username, self.hostname, destination_file) + cmd.extend([source_file, destination]) + subprocess.run(cmd, check=True) + + def ssh(self, command): + """Run a command remotely via SSH. + + :param list(str) command: The command to execute + :return: tuple: The stdout and stderr of the command execution + :raises: :class:`CalledProcessError` if the command fails + """ + + destination = "{}@{}".format(self.username, self.hostname) + cmd = [ + "sshpass", + "-p", + self.password, + "ssh", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + destination, + ] + cmd.extend(command) + output = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return (output.stdout.decode("utf-8").strip(), output.stderr.decode("utf-8").strip()) + + def verify_credentials(self): + """Verify the SSH credentials. + + :return (bool, str): Verified, Stderr + """ + verified = False + try: + (stdout, stderr) = self.run("hostname") + verified = True + except CalledProcessError as e: + stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output)) + except (TimeoutError, socket.timeout): + stderr = "Timeout attempting to reach {}".format(self._get_hostname()) + except Exception as error: + tb = traceback.format_exc() + stderr = "Unhandled exception: {}".format(tb) + return verified, stderr + + ################### + # Private methods # + ################### + def _get_hostname(self): + """Get the hostname for the ssh target. + + HACK: This function was added to work around an issue where the + ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first + is the floating ip, and the second the non-floating ip, for an Openstack + instance. + """ + return self.hostname.split(";")[0] diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/charm.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..d898de859fc444814bc19a7f8f0caaaec6f7e5f4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/charm.py @@ -0,0 +1,575 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """A base class for events that trigger because of a Juju hook firing.""" + + +class ActionEvent(EventBase): + """A base class for events that trigger when a user asks for an Action to be run. + + To read the parameters for the action, see the instance variable `params`. + To respond with the result of the action, call `set_results`. To add progress + messages that are visible as the action is progressing use `log`. + + :ivar params: The parameters passed to the action (read by action-get) + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the user is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by Charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Represents the `install` hook from Juju.""" + + +class StartEvent(HookEvent): + """Represents the `start` hook from Juju.""" + + +class StopEvent(HookEvent): + """Represents the `stop` hook from Juju.""" + + +class RemoveEvent(HookEvent): + """Represents the `remove` hook from Juju. """ + + +class ConfigChangedEvent(HookEvent): + """Represents the `config-changed` hook from Juju.""" + + +class UpdateStatusEvent(HookEvent): + """Represents the `update-status` hook from Juju.""" + + +class UpgradeCharmEvent(HookEvent): + """Represents the `upgrade-charm` hook from Juju. + + This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju + has unpacked the upgraded charm code, and so this event will be handled with new code. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Represents the `pre-series-upgrade` hook from Juju. + + This happens when a user has run `juju upgrade-series MACHINE prepare` and + will fire for each unit that is running on the machine, telling them that + the user is preparing to upgrade the Machine's series (eg trusty->bionic). + The charm should take actions to prepare for the upgrade (a database charm + would want to write out a version-independent dump of the database, so that + when a new version of the database is available in a new series, it can be + used.) + Once all units on a machine have run `pre-series-upgrade`, the user will + initiate the steps to actually upgrade the machine (eg `do-release-upgrade`). + When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Represents the `post-series-upgrade` hook from Juju. + + This is run after the user has done a distribution upgrade (or rolled back + and kept the same series). It is called in response to + `juju upgrade-series MACHINE complete`. Charms are expected to do whatever + steps are necessary to reconfigure their applications for the new series. + """ + + +class LeaderElectedEvent(HookEvent): + """Represents the `leader-elected` hook from Juju. + + Juju will trigger this when a new lead unit is chosen for a given application. + This represents the leader of the charm information (not necessarily the primary + of a running application). The main utility is that charm authors can know + that only one unit will be a leader at any given time, so they can do + configuration, etc, that would otherwise require coordination between units. + (eg, selecting a password for a new relation) + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Represents the `leader-settings-changed` hook from Juju. + + Deprecated. This represents when a lead unit would call `leader-set` to inform + the other units of an application that they have new information to handle. + This has been deprecated in favor of using a Peer relation, and having the + leader set a value in the Application data bag for that peer relation. + (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Represents the `collect-metrics` hook from Juju. + + Note that events firing during a CollectMetricsEvent are currently + sandboxed in how they can interact with Juju. To report metrics + use :meth:`.add_metrics`. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Charmers should not be creating RelationEvents directly. The events will be + generated by the framework from Juju related events. Users can observe them + from the various `CharmBase.on[relation_name].relation_*` events. + + Attributes: + relation: The Relation involved in this event + app: The remote application that has triggered this event + unit: The remote unit that has triggered this event. This may be None + if the relation event was triggered as an Application level event + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by Charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by Charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Represents the `relation-created` hook from Juju. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Represents the `relation-joined` hook from Juju. + + This is triggered whenever a new unit of a related application joins the relation. + (eg, a unit was added to an existing related app, or a new relation was established + with an application that already had units.) + """ + + +class RelationChangedEvent(RelationEvent): + """Represents the `relation-changed` hook from Juju. + + This is triggered whenever there is a change to the data bucket for a related + application or unit. Look at `event.relation.data[event.unit/app]` to see the + new information. + """ + + +class RelationDepartedEvent(RelationEvent): + """Represents the `relation-departed` hook from Juju. + + This is the inverse of the RelationJoinedEvent, representing when a unit + is leaving the relation (the unit is being removed, the app is being removed, + the relation is being removed). It is fired once for each unit that is + going away. + """ + + +class RelationBrokenEvent(RelationEvent): + """Represents the `relation-broken` hook from Juju. + + If a relation is being removed (`juju remove-relation` or `juju remove-application`), + once all the units have been removed, RelationBrokenEvent will fire to signal + that the relationship has been fully terminated. + """ + + +class StorageEvent(HookEvent): + """Base class representing Storage related events.""" + + +class StorageAttachedEvent(StorageEvent): + """Represents the `storage-attached` hook from Juju. + + Called when new storage is available for the charm to use. + """ + + +class StorageDetachingEvent(StorageEvent): + """Represents the `storage-detaching` hook from Juju. + + Called when storage a charm has been using is going away. + """ + + +class CharmEvents(ObjectEvents): + """The events that are generated by Juju in response to the lifecycle of an application.""" + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the Charm overall. + + Usually this initialization is done by ops.main.main() rather than Charm authors + directly instantiating a Charm. + + Args: + framework: The framework responsible for managing the Model and events for this + Charm. + key: Ignored; will remove after deprecation period of the signature change. + """ + + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """CharmMeta of this charm. + """ + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the Charm as it is running. + """ + return self.framework.charm_dir + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from metadata.yaml and/or actions.yaml. Generally charms will + define this information, rather than reading it at runtime. This class is + mostly for the framework to understand what the charm has defined. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by Charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/framework.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/framework.py new file mode 100755 index 0000000000000000000000000000000000000000..b7c4749ff2b5bfb4f354bf1a8d4cd6ed64cf0da5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/framework.py @@ -0,0 +1,1067 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return ''.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(ObjectEvents): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint(); if Python < 3.7, this doesn't affect anything + sys.breakpointhook = self.breakpoint + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse once the env var, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/jujuversion.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..b2b8177dbe396f0d8c46b86e26af6b4e54ea046d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/jujuversion.py @@ -0,0 +1,98 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'''^ + (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there + ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - + (\.(?P\d{1,9}))?$ # and sometimes with a number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if not v: + raise RuntimeError('environ has no JUJU_VERSION') + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/lib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edb9fcacea6f0173aed9f07ca8a683cfead989cc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/lib/__init__.py @@ -0,0 +1,194 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import re + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType + + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except OSError: + continue + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except OSError: + continue + finder = get_importer(opslib) + if finder is None or not hasattr(finder, 'find_spec'): + continue + for lib_dir in lib_dirs: + spec = finder.find_spec(lib_dir) + if spec is None: + continue + if spec.loader is None: + # a namespace package; not supported + continue + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 + + +def _parse_lib(spec): + if spec.origin is None: + return None + + _expected = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_expected): + break + if n > _MAX_LIB_LINES: + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _expected: + value = literal_eval(value) + if not isinstance(value, _expected[key]): + return None + libinfo[key] = value + else: + if len(libinfo) != len(_expected): + return None + except Exception: + return None + + return _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {0.name} by {0.author}, API {0.api}, patch {0.patch}>".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/log.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/log.py new file mode 100644 index 0000000000000000000000000000000000000000..4aac5543aec4d84dc393e79b772a30284712d6d4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/log.py @@ -0,0 +1,51 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/main.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/main.py new file mode 100755 index 0000000000000000000000000000000000000000..6dc31c3575044796e8fe1f61b8415395689d6339 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/main.py @@ -0,0 +1,348 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import logging +import os +import subprocess +import sys +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. + # For Windows it is always an absolute path (any symlinks are resolved) + # while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy” hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(sys.argv[0]) + + if 'JUJU_DISPATCH_PATH' in os.environ and (charm_dir / 'dispatch').exists(): + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk""" + + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = self._charm_dir / self._dispatch_path + if not dispatch_path.exists(): + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == self._exec_path.resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning( + "Legacy %s exited with status %d.", + self._dispatch_path, e.returncode) + sys.exit(e.returncode) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """"Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def main(charm_class, use_juju_for_storage=False): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/model.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b96e89154ea9cec2b62a4fab4649412e115c304e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/model.py @@ -0,0 +1,1237 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + +import ops +from ops.jujuversion import JujuVersion + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + + Attributes: + unit: A :class:`Unit` that represents the unit that is running this code (eg yourself) + app: A :class:`Application` that represents the application this unit is a part of. + relations: Mapping of endpoint to list of :class:`Relation` answering the question + "what am I currently related to". See also :meth:`.get_relation` + config: A dict of the config for the current application. + resources: Access to resources for this charm. Use ``model.resources.fetch(resource_name)`` + to get the path on disk where the resource can be found. + storages: Mapping of storage_name to :class:`Storage` for the storage points defined in + metadata.yaml + pod: Used to get access to ``model.pod.set_spec`` to set the container specification + for Kubernetes charms. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(self._backend.unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworKInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info['bind-addresses']: + interface_name = interface_info['interface-name'] + for address_info in interface_info['addresses']: + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info['ingress-addresses']: + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info['egress-subnets']: + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + return self.interfaces[0].address + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + return self.ingress_addresses[0] + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + self.address = ipaddress.ip_address(address_info['value']) + cidr = address_info['cidr'] + if not cidr: + # The cidr field may be empty, see LP: #1864102. + # In this case, make it a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address_info['value']) + else: + self.subnet = ipaddress.ip_network(cidr) + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """"Represents a storage as defined in metadata.yaml + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/storage.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/storage.py new file mode 100755 index 0000000000000000000000000000000000000000..d4310ce1cfbb707c6278b70f84a9751da3ce07af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/storage.py @@ -0,0 +1,318 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer)""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: typing.Optional[str]) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """"Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + return + + def commit(self): + return + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str): + notice_list = self._load_notice_list() + for row in notice_list: + if row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal' + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + @staticmethod + def is_available() -> bool: + """Check if Juju state storage is available. + + This checks if there is a 'state-get' executable in PATH. + """ + p = shutil.which('state-get') + return p is not None + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf-8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + subprocess.run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = subprocess.run( + ["state-get", key], + stdout=subprocess.PIPE, + check=True, + ) + if p.stdout == b'' or p.stdout == b'\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + subprocess.run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/testing.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/testing.py new file mode 100755 index 0000000000000000000000000000000000000000..b4b3fe071216238007c9f3847ca9556be626bf6b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/testing.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pathlib +from textwrap import dedent +import tempfile +import typing +import yaml +import weakref + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None): + # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since + # it would define the default values of config that the charm would see. + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._lazy_resource_dir = None + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + @property + def _resource_dir(self) -> pathlib.Path: + if self._lazy_resource_dir is not None: + return self._lazy_resource_dir + + self.__resource_dir = tempfile.TemporaryDirectory() + self._lazy_resource_dir = pathlib.Path(self.__resource_dir.name) + self._finalizer = weakref.finalize(self, self.__resource_dir.cleanup) + return self._lazy_resource_dir + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling begin(), there is no Charm instance, so changes to the Model won't emit + events. You must call begin before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + resource_dir = self._resource_dir / resource_name + resource_dir.mkdir(exist_ok=True) + resource_file = resource_dir / "contents.yaml" + with resource_file.open('wt', encoding='utf8') as resource_yaml: + yaml.dump(contents, resource_yaml) + self._backend._resources_map[resource_name] = resource_file + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return rel_id + relation = self._model.get_relation(relation_name, rel_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + return rel_id + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event and a `relation_changed` event. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + self._config = {} + self._is_leader = False + self._resources_map = {} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + return self._resources_map[resource_name] + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/hackfest_firewall_pnf/charms/vyos-config/lib/ops/version.py b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/version.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5478555ee0fa948bfb0ad57cc79ba7cef3721 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/lib/ops/version.py @@ -0,0 +1,50 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from pathlib import Path + +__all__ = ('version',) + +_FALLBACK = '0.8' # this gets bumped after release + + +def _get_version(): + version = _FALLBACK + ".dev0+unknown" + + p = Path(__file__).parent + if (p.parent / '.git').exists(): + try: + proc = subprocess.run( + ['git', 'describe', '--tags', '--dirty'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=p, + check=True) + except Exception: + pass + else: + version = proc.stdout.strip().decode('utf8') + if '-' in version: + # version will look like -<#commits>-g[-dirty] + # in terms of PEP 440, the tag we'll make sure is a 'public version identifier'; + # everything after the first - needs to be a 'local version' + public, local = version.split('-', 1) + version = public + '+' + local.replace('-', '.') + # version now +<#commits>.g[.dirty] + # which is PEP440-compliant (as long as is :-) + return version + + +version = _get_version() diff --git a/hackfest_firewall_pnf/charms/vyos-config/metadata.yaml b/hackfest_firewall_pnf/charms/vyos-config/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c331b60d88dcfc3f1c675e24282dacdf8ec4ffdb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/metadata.yaml @@ -0,0 +1,11 @@ +name: vyos-config +summary: A proxy charm to configure VyOS Router +maintainer: David García +description: | + Charm to configure VyOS PNF +series: + - xenial + - bionic +peers: + proxypeer: + interface: proxypeer diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.bzrignore b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.bzrignore new file mode 100644 index 0000000000000000000000000000000000000000..398f08fcbfae019fbf1fe40e9b1d49f8384393d9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.bzrignore @@ -0,0 +1,17 @@ +*.pyc +__pycache__/ +dist/ +build/ +MANIFEST +charmhelpers.egg-info/ +charmhelpers/version.py +.coverage +.env/ +coverage.xml +docs/_build +.idea +.project +.pydevproject +.settings +.venv +.venv3 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.gitignore b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9b13d46d0a42cd97ff428ffa4f4ba0a9d9946806 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.gitignore @@ -0,0 +1,125 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv* +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +*.pyc +__pycache__/ +dist/ +build/ +MANIFEST +charmhelpers.egg-info/ +charmhelpers/version.py +.coverage +.env/ +coverage.xml +docs/_build +.idea +.project +.pydevproject +.settings +.venv +.venv3 +.bzr +.unit-state.db + +AUTHORS +ChangeLog diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.travis.yml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..7e67f092d935333e2e9324bc612b069761322bd6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/.travis.yml @@ -0,0 +1,32 @@ +dist: xenial +sudo: required +language: python +addons: + snaps: + - name: juju + classic: true + channel: stable +before_install: + - sudo apt -qq update + - sudo apt install -y libapt-pkg-dev # For python-apt wheel build +# NOTE(beisner): Avoid test pollution by not enabling system site packages. +virtualenv: + system_site_packages: false +install: + - pip install tox +matrix: + include: + - python: 2.7 + env: ENV=pep8,py2 + - python: 3.4 + env: ENV=pep8,py3 + - python: 3.5 + env: ENV=pep8,py3 + - python: 3.6 + env: ENV=pep8,py3 + - python: 3.7 + env: ENV=pep8,py3 + - python: 3.8 + env: ENV=pep8,py3 +script: + - tox -c tox.ini -e $ENV diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/HACKING.md b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/HACKING.md new file mode 100644 index 0000000000000000000000000000000000000000..d9d5996d0fc12622e63039db3b038ddfb5272362 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/HACKING.md @@ -0,0 +1,102 @@ +# Hacking on charmhelpers + +## Run testsuite (tox method) + +CAUTION: the charm-helpers library has some unit tests which do unsavory things +such as making real, unmocked calls out to sudo foo, juju binaries, and perhaps +other things. This is not ideal for a number of reasons. One of those reasons +is that it pollutes the test runner (your) system. + +The current recommendation for testing locally is to do so in a fresh Xenial +(16.04) lxc container. 16.04 is selected for consistency with what is available +in the Travis CI test gates. As of this writing, 18.04 is not available there. + +The fresh Xenial lxc system container will need to have the following packages +installed in order to satisfy test runner dependencies: + + sudo apt install git bzr tox libapt-pkg-dev python-dev python3-dev build-essential juju -y + +The tests can be executed as follows: + + tox -e pep8 + tox -e py3 + tox -e py2 + +See also: .travis.yaml for what is happening in the test gate. + +## Run testsuite (legacy Makefile method) + + make test + +Run `make` without arguments for more options. + +## Test it in a charm + +Use following instructions to build a charm that uses your own development branch of +charmhelpers. + +Step 1: Make sure your version of charmhelpers is recognised as the latest version by +by appending `dev0` to the version number in the `VERSION` file. + +Step 2: Create an override file `override-wheelhouse.txt` that points to your own +charmhelpers branch. *The format of this file is the same as pip's +[`requirements.txt`](https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format) +file. + + # Override charmhelpers by the version found in folder + -e /path/to/charmhelpers + # Or point it to a github repo with + -e git+https://github.com//charm-helpers#egg=charmhelpers + +Step 3: Build the charm specifying the override file. *You might need to install the +candidate channel of the charm snap* + + charm build -w wheelhouse-overrides.txt + +Now when you deploy your charm, it will use your own branch of charmhelpers. + +*Note: If you want to verify this or change the charmhelpers code on a built +charm, get the path of the installed charmhelpers by running following command.* + + python3 -c "import charmhelpers; print(charmhelpers.__file__)" + + +# Hacking on Docs + +Install html doc dependencies: + +```bash +sudo apt-get install python-flake8 python-shelltoolbox python-tempita \ +python-nose python-mock python-testtools python-jinja2 python-coverage \ +python-git python-netifaces python-netaddr python-pip zip +``` + +To build the html documentation: + +```bash +make docs +``` + +To browse the html documentation locally: + +```bash +make docs +cd docs/_build/html +python -m SimpleHTTPServer 8765 +# point web browser to http://localhost:8765 +``` + +To build and upload package and doc updates to PyPI: + +```bash +make release +# note: if the package version already exists on PyPI +# this command will upload doc updates only +``` + +# PyPI Package and Docs + +The published package and docs currently live at: + + https://pypi.python.org/pypi/charmhelpers + http://pythonhosted.org/charmhelpers/ diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/LICENSE b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/MANIFEST.in b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..72bbf0ab78b68d6d0ebb8d354dd993e751d4be89 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/MANIFEST.in @@ -0,0 +1,7 @@ +include *.txt +include Makefile +include VERSION +include MANIFEST.in +include scripts/* +include README.rst +recursive-include debian * diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/Makefile b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8771f31a57b1af939e73a1dfe8c7e56be195cc87 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/Makefile @@ -0,0 +1,89 @@ +PROJECT=charmhelpers +PYTHON := /usr/bin/env python +SUITE=unstable +TESTS=tests/ + +all: + @echo "make source - Create source package" + @echo "make sdeb - Create debian source package" + @echo "make deb - Create debian package" + @echo "make clean" + @echo "make userinstall - Install locally" + @echo "make docs - Build html documentation" + @echo "make release - Build and upload package and docs to PyPI" + @echo "make test" + +sdeb: source + scripts/build source + +deb: source + scripts/build + +source: setup.py + scripts/update-revno + python setup.py sdist + +clean: + -python setup.py clean + rm -rf build/ MANIFEST + find . -name '*.pyc' -delete + find . -name '__pycache__' -delete + rm -rf dist/* + rm -rf .venv + rm -rf .venv3 + (which dh_clean && dh_clean) || true + +userinstall: + scripts/update-revno + python setup.py install --user + + +.venv: + dpkg-query -W -f='$${status}' gcc python-dev python-virtualenv 2>/dev/null | grep --invert-match "not-installed" || sudo apt-get install -y python-dev python-virtualenv + virtualenv .venv --system-site-packages + .venv/bin/pip install -U pip + .venv/bin/pip install -I -r test-requirements.txt + .venv/bin/pip install bzr + +.venv3: + dpkg-query -W -f='$${status}' gcc python3-dev python-virtualenv python3-apt 2>/dev/null | grep --invert-match "not-installed" || sudo apt-get install -y python3-dev python-virtualenv python3-apt + virtualenv .venv3 --python=python3 --system-site-packages + .venv3/bin/pip install -U pip + .venv3/bin/pip install -I -r test-requirements.txt + +# Note we don't even attempt to run tests if lint isn't passing. +test: lint test2 test3 + @echo OK + +test2: + @echo Starting Py2 tests... + .venv/bin/nosetests -s --nologcapture tests/ + +test3: + @echo Starting Py3 tests... + .venv3/bin/nosetests -s --nologcapture tests/ + +ftest: lint + @echo Starting fast tests... + .venv/bin/nosetests --attr '!slow' --nologcapture tests/ + .venv3/bin/nosetests --attr '!slow' --nologcapture tests/ + +lint: .venv .venv3 + @echo Checking for Python syntax... + @.venv/bin/flake8 --ignore=E402,E501,W504 $(PROJECT) $(TESTS) tools/ \ + && echo Py2 OK + @.venv3/bin/flake8 --ignore=E402,E501,W504 $(PROJECT) $(TESTS) tools/ \ + && echo Py3 OK + +docs: + - [ -z "`dpkg -l | grep python-sphinx`" ] && sudo apt-get install python-sphinx -y + - [ -z "`dpkg -l | grep python-pip`" ] && sudo apt-get install python-pip -y + - [ -z "`pip list | grep -i sphinx-pypi-upload`" ] && sudo pip install sphinx-pypi-upload + - [ -z "`pip list | grep -i sphinx_rtd_theme`" ] && sudo pip install sphinx_rtd_theme + cd docs && make html && cd - +.PHONY: docs + +release: docs + $(PYTHON) setup.py sdist upload upload_sphinx + +build: test lint docs diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/README.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..b8fb15eaabf79cb949b703301d6b89bbb0cb6b5e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/README.rst @@ -0,0 +1,52 @@ +CharmHelpers |badge| +-------------------- + +.. |badge| image:: https://travis-ci.org/juju/charm-helpers.svg?branch=master + :target: https://travis-ci.org/juju/charm-helpers + +Overview +======== + +CharmHelpers provides an opinionated set of tools for building Juju charms. + +The full documentation is available online at: https://charm-helpers.readthedocs.io/ + +Common Usage Examples +===================== + +* interaction with charm-specific Juju unit agents via hook tools; +* processing of events and execution of decorated functions based on event names; +* handling of persistent storage between independent charm invocations; +* rendering of configuration file templates; +* modification of system configuration files; +* installation of packages; +* retrieval of machine-specific details; +* implementation of application-specific code reused in similar charms. + +Why Python? +=========== + +* Python is an extremely popular, easy to learn, and powerful language which is also common in automation tools; +* An interpreted language helps with charm portability across different CPU architectures; +* Doesn't require debugging symbols (just use pdb in-place); +* An author or a user is able to make debugging changes without recompiling a charm. + +Dev/Test +======== + +See the HACKING.md file for information about testing and development. + +License +======= + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/README b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/README new file mode 100644 index 0000000000000000000000000000000000000000..df4a50484ea9dfca98c4dc63a94531e3449c98aa --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/README @@ -0,0 +1,4 @@ +This directory contains executables for accessing charmhelpers functionality + + +Please see charmhelpers.cli for the recommended way to add scripts. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/chlp b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/chlp new file mode 100755 index 0000000000000000000000000000000000000000..0c1c38dc15039191fd3ea04fc7ea740764ea974b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/chlp @@ -0,0 +1,8 @@ +#!/usr/bin/env python + +from charmhelpers.cli import cmdline +from charmhelpers.cli.commands import * + + +if __name__ == '__main__': + cmdline.run() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/contrib/charmsupport/charmsupport b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/contrib/charmsupport/charmsupport new file mode 100755 index 0000000000000000000000000000000000000000..7a28beb3867db4b517ca98bd048303aaa727105a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/contrib/charmsupport/charmsupport @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +import argparse +from charmhelpers.contrib.charmsupport import execd + + +def run_execd(args): + execd.execd_run(args.module, args.dir, die_on_error=True) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Perform common charm tasks') + subparsers = parser.add_subparsers(help='Commands') + + execd_parser = subparsers.add_parser('execd', + help='Execute a directory of commands') + execd_parser.add_argument('--module', default='charm-pre-install', + help='module to run (default: charm-pre-install)') + execd_parser.add_argument('--dir', + help="Override the exec.d directory path") + execd_parser.set_defaults(func=run_execd) + + return parser.parse_args() + + +def main(): + arguments = parse_args() + arguments.func(arguments) + +if __name__ == '__main__': + exit(main()) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/contrib/saltstack/salt-call b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/contrib/saltstack/salt-call new file mode 100755 index 0000000000000000000000000000000000000000..5b8a8f39355447bd23c569c32c2987a9c494fe73 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/bin/contrib/saltstack/salt-call @@ -0,0 +1,11 @@ +#!/usr/bin/env python +''' +Directly call a salt command in the modules, does not require a running salt +minion to run. +''' + +from salt.scripts import salt_call + + +if __name__ == '__main__': + salt_call() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61ef90719b5d5d759de1a6b80a1ea748d8bb0911 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/__init__.py @@ -0,0 +1,97 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect +import subprocess +import sys + +try: + import six # NOQA:F401 +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # NOQA:F401 + +try: + import yaml # NOQA:F401 +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # NOQA:F401 + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/README.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7901c09c79c268d353825776a74072a7ee4dee7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/README.rst @@ -0,0 +1,57 @@ +========== +Commandant +========== + +----------------------------------------------------- +Automatic command-line interfaces to Python functions +----------------------------------------------------- + +One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands. + +Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life. + +Goals +===== + +* Single decorator to expose a function as a command. + * now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW) +* Automatic analysis of function signature through ``inspect.getargspec()`` +* Command argument parser built automatically with ``argparse`` +* Interactive interpreter loop object made with ``Cmd`` +* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps. + +Other Important Features that need writing +------------------------------------------ + +* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour +* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc. + - Filename arguments are important, as good practice is for functions to accept file objects as parameters. + - choices arguments help to limit bad input before the function is called +* Some automatic behaviour could make for better defaults, once the user can override them. + - We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True. + - We could automatically support hyphens as alternates for underscores + - Arguments defaulting to sequence types could support the ``append`` action. + + +----------------------------------------------------- +Implementing subcommands +----------------------------------------------------- + +(WIP) + +So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose. + +Some examples:: + + from charmhelpers.cli import CommandLine + from charmhelpers.payload import execd + from charmhelpers.foo import bar + + cli = CommandLine() + + cli.subcommand(execd.execd_run) + + @cli.subcommand_builder("bar", help="Bar baz qux") + def barcmd_builder(subparser): + subparser.add_argument('argument1', help="yackety") + return bar diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..389b490f4eec27f18118ab6a5f3f529dbf2e9ecc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/__init__.py @@ -0,0 +1,189 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import argparse +import sys + +from six.moves import zip + +import charmhelpers.core.unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + output = arguments.func(*vargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/benchmark.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..303af14b607d31e338aefff0df593609b7b45feb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/benchmark.py @@ -0,0 +1,34 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/commands.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/commands.py new file mode 100644 index 0000000000000000000000000000000000000000..b93105650be8226aa390fda46aa08afb23ebc7bc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/commands.py @@ -0,0 +1,30 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/hookenv.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/hookenv.py new file mode 100644 index 0000000000000000000000000000000000000000..bd72f448bf0092251a454ff6dd3145f09048ae72 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/hookenv.py @@ -0,0 +1,21 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/host.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/host.py new file mode 100644 index 0000000000000000000000000000000000000000..40396849907976fd077cc9c53a0852c4380c6266 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/host.py @@ -0,0 +1,29 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/unitdata.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/unitdata.py new file mode 100644 index 0000000000000000000000000000000000000000..acce846f84ef32ed0b5829cf08e67ad33f0eb5d1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/cli/unitdata.py @@ -0,0 +1,46 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/context.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/context.py new file mode 100644 index 0000000000000000000000000000000000000000..01864740e89633f6c65f43c32ed89ef9d5e857c9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/context.py @@ -0,0 +1,205 @@ +# Copyright 2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +A Pythonic API to interact with the charm hook environment. + +:author: Stuart Bishop +''' + +import six + +from charmhelpers.core import hookenv + +from collections import OrderedDict +if six.PY3: + from collections import UserDict # pragma: nocover +else: + from UserDict import IterableUserDict as UserDict # pragma: nocover + + +class Relations(OrderedDict): + '''Mapping relation name -> relation id -> Relation. + + >>> rels = Relations() + >>> rels['sprog']['sprog:12']['client/6']['widget'] + 'remote widget' + >>> rels['sprog']['sprog:12'].local['widget'] = 'local widget' + >>> rels['sprog']['sprog:12'].local['widget'] + 'local widget' + >>> rels.peer.local['widget'] + 'local widget on the peer relation' + ''' + def __init__(self): + super(Relations, self).__init__() + for relname in sorted(hookenv.relation_types()): + self[relname] = OrderedDict() + relids = hookenv.relation_ids(relname) + relids.sort(key=lambda x: int(x.split(':', 1)[-1])) + for relid in relids: + self[relname][relid] = Relation(relid) + + @property + def peer(self): + peer_relid = hookenv.peer_relation_id() + for rels in self.values(): + if peer_relid in rels: + return rels[peer_relid] + + +class Relation(OrderedDict): + '''Mapping of unit -> remote RelationInfo for a relation. + + This is an OrderedDict mapping, ordered numerically by + by unit number. + + Also provides access to the local RelationInfo, and peer RelationInfo + instances by the 'local' and 'peers' attributes. + + >>> r = Relation('sprog:12') + >>> r.keys() + ['client/9', 'client/10'] # Ordered numerically + >>> r['client/10']['widget'] # A remote RelationInfo setting + 'remote widget' + >>> r.local['widget'] # The local RelationInfo setting + 'local widget' + ''' + relid = None # The relation id. + relname = None # The relation name (also known as relation type). + service = None # The remote service name, if known. + local = None # The local end's RelationInfo. + peers = None # Map of peer -> RelationInfo. None if no peer relation. + + def __init__(self, relid): + remote_units = hookenv.related_units(relid) + remote_units.sort(key=lambda u: int(u.split('/', 1)[-1])) + super(Relation, self).__init__((unit, RelationInfo(relid, unit)) + for unit in remote_units) + + self.relname = relid.split(':', 1)[0] + self.relid = relid + self.local = RelationInfo(relid, hookenv.local_unit()) + + for relinfo in self.values(): + self.service = relinfo.service + break + + # If we have peers, and they have joined both the provided peer + # relation and this relation, we can peek at their data too. + # This is useful for creating consensus without leadership. + peer_relid = hookenv.peer_relation_id() + if peer_relid and peer_relid != relid: + peers = hookenv.related_units(peer_relid) + if peers: + peers.sort(key=lambda u: int(u.split('/', 1)[-1])) + self.peers = OrderedDict((peer, RelationInfo(relid, peer)) + for peer in peers) + else: + self.peers = OrderedDict() + else: + self.peers = None + + def __str__(self): + return '{} ({})'.format(self.relid, self.service) + + +class RelationInfo(UserDict): + '''The bag of data at an end of a relation. + + Every unit participating in a relation has a single bag of + data associated with that relation. This is that bag. + + The bag of data for the local unit may be updated. Remote data + is immutable and will remain static for the duration of the hook. + + Changes made to the local units relation data only become visible + to other units after the hook completes successfully. If the hook + does not complete successfully, the changes are rolled back. + + Unlike standard Python mappings, setting an item to None is the + same as deleting it. + + >>> relinfo = RelationInfo('db:12') # Default is the local unit. + >>> relinfo['user'] = 'fred' + >>> relinfo['user'] + 'fred' + >>> relinfo['user'] = None + >>> 'fred' in relinfo + False + + This class wraps hookenv.relation_get and hookenv.relation_set. + All caching is left up to these two methods to avoid synchronization + issues. Data is only loaded on demand. + ''' + relid = None # The relation id. + relname = None # The relation name (also know as the relation type). + unit = None # The unit id. + number = None # The unit number (integer). + service = None # The service name. + + def __init__(self, relid, unit): + self.relname = relid.split(':', 1)[0] + self.relid = relid + self.unit = unit + self.service, num = self.unit.split('/', 1) + self.number = int(num) + + def __str__(self): + return '{} ({})'.format(self.relid, self.unit) + + @property + def data(self): + return hookenv.relation_get(rid=self.relid, unit=self.unit) + + def __setitem__(self, key, value): + if self.unit != hookenv.local_unit(): + raise TypeError('Attempting to set {} on remote unit {}' + ''.format(key, self.unit)) + if value is not None and not isinstance(value, six.string_types): + # We don't do implicit casting. This would cause simple + # types like integers to be read back as strings in subsequent + # hooks, and mutable types would require a lot of wrapping + # to ensure relation-set gets called when they are mutated. + raise ValueError('Only string values allowed') + hookenv.relation_set(self.relid, {key: value}) + + def __delitem__(self, key): + # Deleting a key and setting it to null is the same thing in + # Juju relations. + self[key] = None + + +class Leader(UserDict): + def __init__(self): + pass # Don't call superclass initializer, as it will nuke self.data + + @property + def data(self): + return hookenv.leader_get() + + def __setitem__(self, key, value): + if not hookenv.is_leader(): + raise TypeError('Not the leader. Cannot change leader settings.') + if value is not None and not isinstance(value, six.string_types): + # We don't do implicit casting. This would cause simple + # types like integers to be read back as strings in subsequent + # hooks, and mutable types would require a lot of wrapping + # to ensure leader-set gets called when they are mutated. + raise ValueError('Only string values allowed') + hookenv.leader_set({key: value}) + + def __delitem__(self, key): + # Deleting a key and setting it to null is the same thing in + # Juju leadership settings. + self[key] = None diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/deployment.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..d21d01d8ffe242d686283b0ed977b88be6bfc74e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,99 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints'), + storage=this_service.get('storage')) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints'), + storage=svc.get('storage')) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) + try: + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) + except amulet.helpers.TimeoutError: + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..54283088e1ddd53bc85008bc7446620e87b42278 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,820 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import json +import logging +import os +import re +import socket +import subprocess +import sys +import time +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.ssh(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg + + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding + service units. + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): + # init is systemd (or regular sysv) + cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output + + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = configparser.ConfigParser(allow_no_value=True) + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. + + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + pid_list = self.get_process_id_list( + sentry_unit, service, pgrep_full=pgrep_full) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=None, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) + time.sleep(sleep_time) + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) + return True + else: + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, or if file not found. + """ + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) + time.sleep(sleep_time) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + return True + else: + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers + retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return None + + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True, pgrep_full=False): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Amulet sentry instance (juju unit) + :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. + :returns: List of process IDs + """ + if pgrep_full: + cmd = 'pgrep -f "{}"'.format(process_name) + else: + cmd = 'pidof -x "{}"'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output).split() + + def get_unit_process_ids( + self, unit_processes, expect_success=True, pgrep_full=False): + """Construct a dict containing unit sentries, process names, and + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ + pid_dict = {} + for sentry_unit, process_list in six.iteritems(unit_processes): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success, + pgrep_full=pgrep_full) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + + for (e_sentry, e_proc_names) in six.iteritems(expected): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + + a_pids_length = len(a_pids) + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids, a_pids_length, + a_pids)) + + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: + return fail_msg + else: + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids, a_pids)) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + self.log.debug('Dicts within list are identical') + else: + return 'Dicts within list are not identical' + + return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + + # amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output, + params=None): + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. + + params a dict of parameters to use + _check_output parameter is no longer used + + @return action_id. + """ + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + action_id a string action uuid + _check_output parameter is no longer used + """ + data = amulet.actions.get_action_output(action_id, full_output=True) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ansible/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ansible/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..824780aca93c406f7331ec3f0da8f22bee1272ec --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ansible/__init__.py @@ -0,0 +1,303 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +""" +The ansible package enables you to easily use the configuration management +tool `Ansible`_ to setup and configure your charm. All of your charm +configuration options and relation-data are available as regular Ansible +variables which can be used in your playbooks and templates. + +.. _Ansible: https://www.ansible.com/ + +Usage +===== + +Here is an example directory structure for a charm to get you started:: + + charm-ansible-example/ + |-- ansible + | |-- playbook.yaml + | `-- templates + | `-- example.j2 + |-- config.yaml + |-- copyright + |-- icon.svg + |-- layer.yaml + |-- metadata.yaml + |-- reactive + | `-- example.py + |-- README.md + +Running a playbook called ``playbook.yaml`` when the ``install`` hook is run +can be as simple as:: + + from charmhelpers.contrib import ansible + from charms.reactive import hook + + @hook('install') + def install(): + ansible.install_ansible_support() + ansible.apply_playbook('ansible/playbook.yaml') + +Here is an example playbook that uses the ``template`` module to template the +file ``example.j2`` to the charm host and then uses the ``debug`` module to +print out all the host and Juju variables that you can use in your playbooks. +Note that you must target ``localhost`` as the playbook is run locally on the +charm host:: + + --- + - hosts: localhost + tasks: + - name: Template a file + template: + src: templates/example.j2 + dest: /tmp/example.j2 + + - name: Print all variables available to Ansible + debug: + var: vars + +Read more online about `playbooks`_ and standard Ansible `modules`_. + +.. _playbooks: https://docs.ansible.com/ansible/latest/user_guide/playbooks.html +.. _modules: https://docs.ansible.com/ansible/latest/user_guide/modules.html + +A further feature of the Ansible hooks is to provide a light weight "action" +scripting tool. This is a decorator that you apply to a function, and that +function can now receive cli args, and can pass extra args to the playbook:: + + @hooks.action() + def some_action(amount, force="False"): + "Usage: some-action AMOUNT [force=True]" # <-- shown on error + # process the arguments + # do some calls + # return extra-vars to be passed to ansible-playbook + return { + 'amount': int(amount), + 'type': force, + } + +You can now create a symlink to hooks.py that can be invoked like a hook, but +with cli params:: + + # link actions/some-action to hooks/hooks.py + + actions/some-action amount=10 force=true + +Install Ansible via pip +======================= + +If you want to install a specific version of Ansible via pip instead of +``install_ansible_support`` which uses APT, consider using the layer options +of `layer-basic`_ to install Ansible in a virtualenv:: + + options: + basic: + python_packages: ['ansible==2.9.0'] + include_system_packages: true + use_venv: true + +.. _layer-basic: https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#layer-configuration + +""" +import os +import json +import stat +import subprocess +import functools + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv +import charmhelpers.fetch + + +charm_dir = os.environ.get('CHARM_DIR', '') +ansible_hosts_path = '/etc/ansible/hosts' +# Ansible will automatically include any vars in the following +# file in its inventory when run locally. +ansible_vars_path = '/etc/ansible/host_vars/localhost' + + +def install_ansible_support(from_ppa=True, ppa_location='ppa:ansible/ansible'): + """Installs Ansible via APT. + + By default this installs Ansible from the `PPA`_ linked from + the Ansible `website`_ or from a PPA set in ``ppa_location``. + + .. _PPA: https://launchpad.net/~ansible/+archive/ubuntu/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If ``from_ppa`` is ``False``, then Ansible will be installed from + Ubuntu's Universe repositories. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install('ansible') + with open(ansible_hosts_path, 'w+') as hosts_file: + hosts_file.write('localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp') + + +def apply_playbook(playbook, tags=None, extra_vars=None): + """Run a playbook. + + This helper runs a playbook with juju state variables as context, + therefore variables set in application config can be used directly. + List of tags (--tags) and dictionary with extra_vars (--extra-vars) + can be passed as additional parameters. + + Read more about playbook `_variables`_ online. + + .. _variables: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html + + Example:: + + # Run ansible/playbook.yaml with tag install and pass extra + # variables var_a and var_b + apply_playbook( + playbook='ansible/playbook.yaml', + tags=['install'], + extra_vars={'var_a': 'val_a', 'var_b': 'val_b'} + ) + + # Run ansible/playbook.yaml with tag config and extra variable nested, + # which is passed as json and can be used as dictionary in playbook + apply_playbook( + playbook='ansible/playbook.yaml', + tags=['config'], + extra_vars={'nested': {'a': 'value1', 'b': 'value2'}} + ) + + # Custom config file can be passed within extra_vars + apply_playbook( + playbook='ansible/playbook.yaml', + extra_vars="@some_file.json" + ) + + """ + tags = tags or [] + tags = ",".join(tags) + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + ansible_vars_path, namespace_separator='__', + allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR)) + + # we want ansible's log output to be unbuffered + env = os.environ.copy() + env['PYTHONUNBUFFERED'] = "1" + call = [ + 'ansible-playbook', + '-c', + 'local', + playbook, + ] + if tags: + call.extend(['--tags', '{}'.format(tags)]) + if extra_vars: + call.extend(['--extra-vars', json.dumps(extra_vars)]) + subprocess.check_call(call, env=env) + + +class AnsibleHooks(charmhelpers.core.hookenv.Hooks): + """Run a playbook with the hook-name as the tag. + + This helper builds on the standard hookenv.Hooks helper, + but additionally runs the playbook with the hook-name specified + using --tags (ie. running all the tasks tagged with the hook-name). + + Example:: + + hooks = AnsibleHooks(playbook_path='ansible/my_machine_state.yaml') + + # All the tasks within my_machine_state.yaml tagged with 'install' + # will be run automatically after do_custom_work() + @hooks.hook() + def install(): + do_custom_work() + + # For most of your hooks, you won't need to do anything other + # than run the tagged tasks for the hook: + @hooks.hook('config-changed', 'start', 'stop') + def just_use_playbook(): + pass + + # As a convenience, you can avoid the above noop function by specifying + # the hooks which are handled by ansible-only and they'll be registered + # for you: + # hooks = AnsibleHooks( + # 'ansible/my_machine_state.yaml', + # default_hooks=['config-changed', 'start', 'stop']) + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, playbook_path, default_hooks=None): + """Register any hooks handled by ansible.""" + super(AnsibleHooks, self).__init__() + + self._actions = {} + self.playbook_path = playbook_path + + default_hooks = default_hooks or [] + + def noop(*args, **kwargs): + pass + + for hook in default_hooks: + self.register(hook, noop) + + def register_action(self, name, function): + """Register a hook""" + self._actions[name] = function + + def execute(self, args): + """Execute the hook followed by the playbook using the hook as tag.""" + hook_name = os.path.basename(args[0]) + extra_vars = None + if hook_name in self._actions: + extra_vars = self._actions[hook_name](args[1:]) + else: + super(AnsibleHooks, self).execute(args) + + charmhelpers.contrib.ansible.apply_playbook( + self.playbook_path, tags=[hook_name], extra_vars=extra_vars) + + def action(self, *action_names): + """Decorator, registering them as actions""" + def action_wrapper(decorated): + + @functools.wraps(decorated) + def wrapper(argv): + kwargs = dict(arg.split('=') for arg in argv) + try: + return decorated(**kwargs) + except TypeError as e: + if decorated.__doc__: + e.args += (decorated.__doc__,) + raise + + self.register_action(decorated.__name__, wrapper) + if '_' in decorated.__name__: + self.register_action( + decorated.__name__.replace('_', '-'), wrapper) + + return wrapper + + return action_wrapper diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/benchmark/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c35f7fe785a29519a257f1ed5aa33fdfa19129c7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/benchmark/__init__.py @@ -0,0 +1,124 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import time +import os +from distutils.spawn import find_executable + +from charmhelpers.core.hookenv import ( + in_relation_hook, + relation_ids, + relation_set, + relation_get, +) + + +def action_set(key, val): + if find_executable('action-set'): + action_cmd = ['action-set'] + + if isinstance(val, dict): + for k, v in iter(val.items()): + action_set('%s.%s' % (key, k), v) + return True + + action_cmd.append('%s=%s' % (key, val)) + subprocess.check_call(action_cmd) + return True + return False + + +class Benchmark(): + """ + Helper class for the `benchmark` interface. + + :param list actions: Define the actions that are also benchmarks + + From inside the benchmark-relation-changed hook, you would + Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom']) + + Examples: + + siege = Benchmark(['siege']) + siege.start() + [... run siege ...] + # The higher the score, the better the benchmark + siege.set_composite_score(16.70, 'trans/sec', 'desc') + siege.finish() + + + """ + + BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing + + required_keys = [ + 'hostname', + 'port', + 'graphite_port', + 'graphite_endpoint', + 'api_port' + ] + + def __init__(self, benchmarks=None): + if in_relation_hook(): + if benchmarks is not None: + for rid in sorted(relation_ids('benchmark')): + relation_set(relation_id=rid, relation_settings={ + 'benchmarks': ",".join(benchmarks) + }) + + # Check the relation data + config = {} + for key in self.required_keys: + val = relation_get(key) + if val is not None: + config[key] = val + else: + # We don't have all of the required keys + config = {} + break + + if len(config): + with open(self.BENCHMARK_CONF, 'w') as f: + for key, val in iter(config.items()): + f.write("%s=%s\n" % (key, val)) + + @staticmethod + def start(): + action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + """ + If the collectd charm is also installed, tell it to send a snapshot + of the current profile data. + """ + COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' + if os.path.exists(COLLECT_PROFILE_DATA): + subprocess.check_output([COLLECT_PROFILE_DATA]) + + @staticmethod + def finish(): + action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + @staticmethod + def set_composite_score(value, units, direction='asc'): + """ + Set the composite score for a benchmark run. This is a single number + representative of the benchmark results. This could be the most + important metric, or an amalgamation of metric scores. + """ + return action_set( + "meta.composite", + {'value': value, 'units': units, 'direction': direction} + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmhelpers/IMPORT b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmhelpers/IMPORT new file mode 100644 index 0000000000000000000000000000000000000000..d41cb041f45cb19b87aee7d3f5fd4d47a3d3fb2d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmhelpers/IMPORT @@ -0,0 +1,4 @@ +Source lp:charm-tools/trunk + +charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py +charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmhelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmhelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed63e8121e23a8e01ccb8ddcbf9aea34543d3666 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmhelpers/__init__.py @@ -0,0 +1,203 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa + +import operator +import tempfile +import time +import yaml +import subprocess + +import six +if six.PY3: + from urllib.request import urlopen + from urllib.error import (HTTPError, URLError) +else: + from urllib2 import (urlopen, HTTPError, URLError) + +"""Helper functions for writing Juju charms in Python.""" + +__metaclass__ = type +__all__ = [ + # 'get_config', # core.hookenv.config() + # 'log', # core.hookenv.log() + # 'log_entry', # core.hookenv.log() + # 'log_exit', # core.hookenv.log() + # 'relation_get', # core.hookenv.relation_get() + # 'relation_set', # core.hookenv.relation_set() + # 'relation_ids', # core.hookenv.relation_ids() + # 'relation_list', # core.hookenv.relation_units() + # 'config_get', # core.hookenv.config() + # 'unit_get', # core.hookenv.unit_get() + # 'open_port', # core.hookenv.open_port() + # 'close_port', # core.hookenv.close_port() + # 'service_control', # core.host.service() + 'unit_info', # client-side, NOT IMPLEMENTED + 'wait_for_machine', # client-side, NOT IMPLEMENTED + 'wait_for_page_contents', # client-side, NOT IMPLEMENTED + 'wait_for_relation', # client-side, NOT IMPLEMENTED + 'wait_for_unit', # client-side, NOT IMPLEMENTED +] + + +SLEEP_AMOUNT = 0.1 + + +# We create a juju_status Command here because it makes testing much, +# much easier. +def juju_status(): + subprocess.check_call(['juju', 'status']) + +# re-implemented as charmhelpers.fetch.configure_sources() +# def configure_source(update=False): +# source = config_get('source') +# if ((source.startswith('ppa:') or +# source.startswith('cloud:') or +# source.startswith('http:'))): +# run('add-apt-repository', source) +# if source.startswith("http:"): +# run('apt-key', 'import', config_get('key')) +# if update: +# run('apt-get', 'update') + + +# DEPRECATED: client-side only +def make_charm_config_file(charm_config): + charm_config_file = tempfile.NamedTemporaryFile(mode='w+') + charm_config_file.write(yaml.dump(charm_config)) + charm_config_file.flush() + # The NamedTemporaryFile instance is returned instead of just the name + # because we want to take advantage of garbage collection-triggered + # deletion of the temp file when it goes out of scope in the caller. + return charm_config_file + + +# DEPRECATED: client-side only +def unit_info(service_name, item_name, data=None, unit=None): + if data is None: + data = yaml.safe_load(juju_status()) + service = data['services'].get(service_name) + if service is None: + # XXX 2012-02-08 gmb: + # This allows us to cope with the race condition that we + # have between deploying a service and having it come up in + # `juju status`. We could probably do with cleaning it up so + # that it fails a bit more noisily after a while. + return '' + units = service['units'] + if unit is not None: + item = units[unit][item_name] + else: + # It might seem odd to sort the units here, but we do it to + # ensure that when no unit is specified, the first unit for the + # service (or at least the one with the lowest number) is the + # one whose data gets returned. + sorted_unit_names = sorted(units.keys()) + item = units[sorted_unit_names[0]][item_name] + return item + + +# DEPRECATED: client-side only +def get_machine_data(): + return yaml.safe_load(juju_status())['machines'] + + +# DEPRECATED: client-side only +def wait_for_machine(num_machines=1, timeout=300): + """Wait `timeout` seconds for `num_machines` machines to come up. + + This wait_for... function can be called by other wait_for functions + whose timeouts might be too short in situations where only a bare + Juju setup has been bootstrapped. + + :return: A tuple of (num_machines, time_taken). This is used for + testing. + """ + # You may think this is a hack, and you'd be right. The easiest way + # to tell what environment we're working in (LXC vs EC2) is to check + # the dns-name of the first machine. If it's localhost we're in LXC + # and we can just return here. + if get_machine_data()[0]['dns-name'] == 'localhost': + return 1, 0 + start_time = time.time() + while True: + # Drop the first machine, since it's the Zookeeper and that's + # not a machine that we need to wait for. This will only work + # for EC2 environments, which is why we return early above if + # we're in LXC. + machine_data = get_machine_data() + non_zookeeper_machines = [ + machine_data[key] for key in list(machine_data.keys())[1:]] + if len(non_zookeeper_machines) >= num_machines: + all_machines_running = True + for machine in non_zookeeper_machines: + if machine.get('instance-state') != 'running': + all_machines_running = False + break + if all_machines_running: + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + return num_machines, time.time() - start_time + + +# DEPRECATED: client-side only +def wait_for_unit(service_name, timeout=480): + """Wait `timeout` seconds for a given service name to come up.""" + wait_for_machine(num_machines=1) + start_time = time.time() + while True: + state = unit_info(service_name, 'agent-state') + if 'error' in state or state == 'started': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + if state != 'started': + raise RuntimeError('unit did not start, agent-state: ' + state) + + +# DEPRECATED: client-side only +def wait_for_relation(service_name, relation_name, timeout=120): + """Wait `timeout` seconds for a given relation to come up.""" + start_time = time.time() + while True: + relation = unit_info(service_name, 'relations').get(relation_name) + if relation is not None and relation['state'] == 'up': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for relation to be up') + time.sleep(SLEEP_AMOUNT) + + +# DEPRECATED: client-side only +def wait_for_page_contents(url, contents, timeout=120, validate=None): + if validate is None: + validate = operator.contains + start_time = time.time() + while True: + try: + stream = urlopen(url) + except (HTTPError, URLError): + pass + else: + page = stream.read() + if validate(page, contents): + return page + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for contents of ' + url) + time.sleep(SLEEP_AMOUNT) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/IMPORT b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/IMPORT new file mode 100644 index 0000000000000000000000000000000000000000..554fddda9f48d6d17abaff942834493b2cca2dbe --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/IMPORT @@ -0,0 +1,14 @@ +Source: lp:charmsupport/trunk + +charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py +charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py +charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py +charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py +charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py + +charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py +charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py +charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py +charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py + +charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 0000000000000000000000000000000000000000..d775861b0868a174a3f0f4a8d4a42320272a4cb0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,500 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import glob +import shutil +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + hook_name, + local_unit, + log, + relation_get, + relation_ids, + relation_set, + relations_of_type, +) + +from charmhelpers.core.host import service +from charmhelpers.core import host + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_.@]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + + def write(self, nagios_context, hostname, nagios_servicegroups): + nrpe_check_file = self._get_check_filename() + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + if nagios_servicegroups: + nrpe_check_config.write( + "# The following header was added automatically by juju\n") + nrpe_check_config.write( + "# Modifying it will affect nagios monitoring and alerting\n") + nrpe_check_config.write( + "# servicegroups: {}\n".format(nagios_servicegroups)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) + + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups): + self._remove_service_files() + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_servicegroups, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = self._get_service_filename(hostname) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server + + def __init__(self, hostname=None, primary=True): + super(NRPE, self).__init__() + self.config = config() + self.primary = primary + self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = self.nagios_context + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation: + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() + + def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass + + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except Exception: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unnecessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') + + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_host_context' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately + """ + for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-e -s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, + ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) + + +def copy_nrpe_checks(nrpe_files_dir=None): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/volumes.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea43f0888cd92ac4344f908aa7c9d0afe7568ed --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,173 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas:: + + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage:: + + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) + +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/database/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..64fac9def6101c1de83cd5dafe7fdb5213d6a955 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/database/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/database/mysql.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/database/mysql.py new file mode 100644 index 0000000000000000000000000000000000000000..c9ecce5f793eedeb963557d109deaf5e1271e65f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/database/mysql.py @@ -0,0 +1,821 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper for working with a MySQL database""" +import collections +import copy +import json +import re +import sys +import platform +import os +import glob +import six + +# from string import upper + +from charmhelpers.core.host import ( + CompareHostReleases, + lsb_release, + mkdir, + pwgen, + write_file +) +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + related_units, + unit_get, + log, + DEBUG, + INFO, + WARNING, + leader_get, + leader_set, + is_leader, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, + filter_installed_packages, +) +from charmhelpers.contrib.network.ip import get_host_ip + +try: + import MySQLdb +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install(filter_installed_packages(['python-mysqldb']), fatal=True) + else: + apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True) + import MySQLdb + + +class MySQLSetPasswordError(Exception): + pass + + +class MySQLHelper(object): + + def __init__(self, rpasswdf_template, upasswdf_template, host='localhost', + migrate_passwd_to_leader_storage=True, + delete_ondisk_passwd_file=True, user="root", password=None, port=None): + self.user = user + self.host = host + self.password = password + self.port = port + + # Password file path templates + self.root_passwd_file_template = rpasswdf_template + self.user_passwd_file_template = upasswdf_template + + self.migrate_passwd_to_leader_storage = migrate_passwd_to_leader_storage + # If we migrate we have the option to delete local copy of root passwd + self.delete_ondisk_passwd_file = delete_ondisk_passwd_file + self.connection = None + + def connect(self, user='root', password=None, host=None, port=None): + _connection_info = { + "user": user or self.user, + "passwd": password or self.password, + "host": host or self.host + } + # port cannot be None but we also do not want to specify it unless it + # has been explicit set. + port = port or self.port + if port is not None: + _connection_info["port"] = port + + log("Opening db connection for %s@%s" % (user, host), level=DEBUG) + self.connection = MySQLdb.connect(**_connection_info) + + def database_exists(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("SHOW DATABASES") + databases = [i[0] for i in cursor.fetchall()] + finally: + cursor.close() + + return db_name in databases + + def create_database(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("CREATE DATABASE `{}` CHARACTER SET UTF8" + .format(db_name)) + finally: + cursor.close() + + def grant_exists(self, db_name, db_user, remote_ip): + cursor = self.connection.cursor() + priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \ + "TO '{}'@'{}'".format(db_name, db_user, remote_ip) + try: + cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user, + remote_ip)) + grants = [i[0] for i in cursor.fetchall()] + except MySQLdb.OperationalError: + return False + finally: + cursor.close() + + # TODO: review for different grants + return priv_string in grants + + def create_grant(self, db_name, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + # TODO: review for different grants + cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_name, + db_user, + remote_ip, + password)) + finally: + cursor.close() + + def create_admin_grant(self, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_user, + remote_ip, + password)) + finally: + cursor.close() + + def cleanup_grant(self, db_user, remote_ip): + cursor = self.connection.cursor() + try: + cursor.execute("DROP FROM mysql.user WHERE user='{}' " + "AND HOST='{}'".format(db_user, + remote_ip)) + finally: + cursor.close() + + def flush_priviledges(self): + cursor = self.connection.cursor() + try: + cursor.execute("FLUSH PRIVILEGES") + finally: + cursor.close() + + def execute(self, sql): + """Execute arbitary SQL against the database.""" + cursor = self.connection.cursor() + try: + cursor.execute(sql) + finally: + cursor.close() + + def select(self, sql): + """ + Execute arbitrary SQL select query against the database + and return the results. + + :param sql: SQL select query to execute + :type sql: string + :returns: SQL select query result + :rtype: list of lists + :raises: MySQLdb.Error + """ + cursor = self.connection.cursor() + try: + cursor.execute(sql) + results = [list(i) for i in cursor.fetchall()] + finally: + cursor.close() + return results + + def migrate_passwords_to_leader_storage(self, excludes=None): + """Migrate any passwords storage on disk to leader storage.""" + if not is_leader(): + log("Skipping password migration as not the lead unit", + level=DEBUG) + return + dirname = os.path.dirname(self.root_passwd_file_template) + path = os.path.join(dirname, '*.passwd') + for f in glob.glob(path): + if excludes and f in excludes: + log("Excluding %s from leader storage migration" % (f), + level=DEBUG) + continue + + key = os.path.basename(f) + with open(f, 'r') as passwd: + _value = passwd.read().strip() + + try: + leader_set(settings={key: _value}) + + if self.delete_ondisk_passwd_file: + os.unlink(f) + except ValueError: + # NOTE cluster relation not yet ready - skip for now + pass + + def get_mysql_password_on_disk(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username on disk.""" + if username: + template = self.user_passwd_file_template + passwd_file = template.format(username) + else: + passwd_file = self.root_passwd_file_template + + _password = None + if os.path.exists(passwd_file): + log("Using existing password file '%s'" % passwd_file, level=DEBUG) + with open(passwd_file, 'r') as passwd: + _password = passwd.read().strip() + else: + log("Generating new password file '%s'" % passwd_file, level=DEBUG) + if not os.path.isdir(os.path.dirname(passwd_file)): + # NOTE: need to ensure this is not mysql root dir (which needs + # to be mysql readable) + mkdir(os.path.dirname(passwd_file), owner='root', group='root', + perms=0o770) + # Force permissions - for some reason the chmod in makedirs + # fails + os.chmod(os.path.dirname(passwd_file), 0o770) + + _password = password or pwgen(length=32) + write_file(passwd_file, _password, owner='root', group='root', + perms=0o660) + + return _password + + def passwd_keys(self, username): + """Generator to return keys used to store passwords in peer store. + + NOTE: we support both legacy and new format to support mysql + charm prior to refactor. This is necessary to avoid LP 1451890. + """ + keys = [] + if username == 'mysql': + log("Bad username '%s'" % (username), level=WARNING) + + if username: + # IMPORTANT: *newer* format must be returned first + keys.append('mysql-%s.passwd' % (username)) + keys.append('%s.passwd' % (username)) + else: + keys.append('mysql.passwd') + + for key in keys: + yield key + + def get_mysql_password(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username using peer relation cluster.""" + excludes = [] + + # First check peer relation. + try: + for key in self.passwd_keys(username): + _password = leader_get(key) + if _password: + break + + # If root password available don't update peer relation from local + if _password and not username: + excludes.append(self.root_passwd_file_template) + + except ValueError: + # cluster relation is not yet started; use on-disk + _password = None + + # If none available, generate new one + if not _password: + _password = self.get_mysql_password_on_disk(username, password) + + # Put on wire if required + if self.migrate_passwd_to_leader_storage: + self.migrate_passwords_to_leader_storage(excludes=excludes) + + return _password + + def get_mysql_root_password(self, password=None): + """Retrieve or generate mysql root password for service units.""" + return self.get_mysql_password(username=None, password=password) + + def set_mysql_password(self, username, password, current_password=None): + """Update a mysql password for the provided username changing the + leader settings + + To update root's password pass `None` in the username + + :param username: Username to change password of + :type username: str + :param password: New password for user. + :type password: str + :param current_password: Existing password for user. + :type current_password: str + """ + + if username is None: + username = 'root' + + # get root password via leader-get, it may be that in the past (when + # changes to root-password were not supported) the user changed the + # password, so leader-get is more reliable source than + # config.previous('root-password'). + rel_username = None if username == 'root' else username + if not current_password: + current_password = self.get_mysql_password(rel_username) + + # password that needs to be set + new_passwd = password + + # update password for all users (e.g. root@localhost, root@::1, etc) + try: + self.connect(user=username, password=current_password) + cursor = self.connection.cursor() + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError(('Cannot connect using password in ' + 'leader settings (%s)') % ex, ex) + + try: + # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account + # fails when using SET PASSWORD so using UPDATE against the + # mysql.user table is needed, but changes to this table are not + # replicated across the cluster, so this update needs to run in + # all the nodes. More info at + # http://galeracluster.com/documentation-webpages/userchanges.html + release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) + if release < 'bionic': + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = " + "PASSWORD( %s ) WHERE user = %s;") + else: + # PXC 5.7 (introduced in Bionic) uses authentication_string + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET " + "authentication_string = " + "PASSWORD( %s ) WHERE user = %s;") + cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username)) + cursor.execute('FLUSH PRIVILEGES;') + self.connection.commit() + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError('Cannot update password: %s' % str(ex), + ex) + finally: + cursor.close() + + # check the password was changed + try: + self.connect(user=username, password=new_passwd) + self.execute('select 1;') + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError(('Cannot connect using new password: ' + '%s') % str(ex), ex) + + if not is_leader(): + log('Only the leader can set a new password in the relation', + level=DEBUG) + return + + for key in self.passwd_keys(rel_username): + _password = leader_get(key) + if _password: + log('Updating password for %s (%s)' % (key, rel_username), + level=DEBUG) + leader_set(settings={key: new_passwd}) + + def set_mysql_root_password(self, password, current_password=None): + """Update mysql root password changing the leader settings + + :param password: New password for user. + :type password: str + :param current_password: Existing password for user. + :type current_password: str + """ + self.set_mysql_password( + 'root', + password, + current_password=current_password) + + def normalize_address(self, hostname): + """Ensure that address returned is an IP address (i.e. not fqdn)""" + if config_get('prefer-ipv6'): + # TODO: add support for ipv6 dns + return hostname + + if hostname != unit_get('private-address'): + return get_host_ip(hostname, fallback=hostname) + + # Otherwise assume localhost + return '127.0.0.1' + + def get_allowed_units(self, database, username, relation_id=None): + """Get list of units with access grants for database with username. + + This is typically used to provide shared-db relations with a list of + which units have been granted access to the given database. + """ + self.connect(password=self.get_mysql_root_password()) + allowed_units = set() + for unit in related_units(relation_id): + settings = relation_get(rid=relation_id, unit=unit) + # First check for setting with prefix, then without + for attr in ["%s_hostname" % (database), 'hostname']: + hosts = settings.get(attr, None) + if hosts: + break + + if hosts: + # hostname can be json-encoded list of hostnames + try: + hosts = json.loads(hosts) + except ValueError: + hosts = [hosts] + else: + hosts = [settings['private-address']] + + if hosts: + for host in hosts: + host = self.normalize_address(host) + if self.grant_exists(database, username, host): + log("Grant exists for host '%s' on db '%s'" % + (host, database), level=DEBUG) + if unit not in allowed_units: + allowed_units.add(unit) + else: + log("Grant does NOT exist for host '%s' on db '%s'" % + (host, database), level=DEBUG) + else: + log("No hosts found for grant check", level=INFO) + + return allowed_units + + def configure_db(self, hostname, database, username, admin=False): + """Configure access to database for username from hostname.""" + self.connect(password=self.get_mysql_root_password()) + if not self.database_exists(database): + self.create_database(database) + + remote_ip = self.normalize_address(hostname) + password = self.get_mysql_password(username) + if not self.grant_exists(database, username, remote_ip): + if not admin: + self.create_grant(database, username, remote_ip, password) + else: + self.create_admin_grant(username, remote_ip, password) + self.flush_priviledges() + + return password + + +# `_singleton_config_helper` stores the instance of the helper class that is +# being used during a hook invocation. +_singleton_config_helper = None + + +def get_mysql_config_helper(): + global _singleton_config_helper + if _singleton_config_helper is None: + _singleton_config_helper = MySQLConfigHelper() + return _singleton_config_helper + + +class MySQLConfigHelper(object): + """Base configuration helper for MySQL.""" + + # Going for the biggest page size to avoid wasted bytes. + # InnoDB page size is 16MB + + DEFAULT_PAGE_SIZE = 16 * 1024 * 1024 + DEFAULT_INNODB_BUFFER_FACTOR = 0.50 + DEFAULT_INNODB_BUFFER_SIZE_MAX = 512 * 1024 * 1024 + + # Validation and lookups for InnoDB configuration + INNODB_VALID_BUFFERING_VALUES = [ + 'none', + 'inserts', + 'deletes', + 'changes', + 'purges', + 'all' + ] + INNODB_FLUSH_CONFIG_VALUES = { + 'fast': 2, + 'safest': 1, + 'unsafe': 0, + } + + def human_to_bytes(self, human): + """Convert human readable configuration options to bytes.""" + num_re = re.compile('^[0-9]+$') + if num_re.match(human): + return human + + factors = { + 'K': 1024, + 'M': 1048576, + 'G': 1073741824, + 'T': 1099511627776 + } + modifier = human[-1] + if modifier in factors: + return int(human[:-1]) * factors[modifier] + + if modifier == '%': + total_ram = self.human_to_bytes(self.get_mem_total()) + if self.is_32bit_system() and total_ram > self.sys_mem_limit(): + total_ram = self.sys_mem_limit() + factor = int(human[:-1]) * 0.01 + pctram = total_ram * factor + return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) + + raise ValueError("Can only convert K,M,G, or T") + + def is_32bit_system(self): + """Determine whether system is 32 or 64 bit.""" + try: + return sys.maxsize < 2 ** 32 + except OverflowError: + return False + + def sys_mem_limit(self): + """Determine the default memory limit for the current service unit.""" + if platform.machine() in ['armv7l']: + _mem_limit = self.human_to_bytes('2700M') # experimentally determined + else: + # Limit for x86 based 32bit systems + _mem_limit = self.human_to_bytes('4G') + + return _mem_limit + + def get_mem_total(self): + """Calculate the total memory in the current service unit.""" + with open('/proc/meminfo') as meminfo_file: + for line in meminfo_file: + key, mem = line.split(':', 2) + if key == 'MemTotal': + mtot, modifier = mem.strip().split(' ') + return '%s%s' % (mtot, modifier[0].upper()) + + def get_innodb_flush_log_at_trx_commit(self): + """Get value for innodb_flush_log_at_trx_commit. + + Use the innodb-flush-log-at-trx-commit or the tunning-level setting + translated by INNODB_FLUSH_CONFIG_VALUES to get the + innodb_flush_log_at_trx_commit value. + + :returns: Numeric value for innodb_flush_log_at_trx_commit + :rtype: Union[None, int] + """ + _iflatc = config_get('innodb-flush-log-at-trx-commit') + _tuning_level = config_get('tuning-level') + if _iflatc: + return _iflatc + elif _tuning_level: + return self.INNODB_FLUSH_CONFIG_VALUES.get(_tuning_level, 1) + + def get_innodb_change_buffering(self): + """Get value for innodb_change_buffering. + + Use the innodb-change-buffering validated against + INNODB_VALID_BUFFERING_VALUES to get the innodb_change_buffering value. + + :returns: String value for innodb_change_buffering. + :rtype: Union[None, str] + """ + _icb = config_get('innodb-change-buffering') + if _icb and _icb in self.INNODB_VALID_BUFFERING_VALUES: + return _icb + + def get_innodb_buffer_pool_size(self): + """Get value for innodb_buffer_pool_size. + + Return the number value of innodb-buffer-pool-size or dataset-size. If + neither is set, calculate a sane default based on total memory. + + :returns: Numeric value for innodb_buffer_pool_size. + :rtype: int + """ + total_memory = self.human_to_bytes(self.get_mem_total()) + + dataset_bytes = config_get('dataset-size') + innodb_buffer_pool_size = config_get('innodb-buffer-pool-size') + + if innodb_buffer_pool_size: + innodb_buffer_pool_size = self.human_to_bytes( + innodb_buffer_pool_size) + elif dataset_bytes: + log("Option 'dataset-size' has been deprecated, please use" + "innodb_buffer_pool_size option instead", level="WARN") + innodb_buffer_pool_size = self.human_to_bytes( + dataset_bytes) + else: + # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB + # to ensure that deployments in containers + # without constraints don't try to consume + # silly amounts of memory. + innodb_buffer_pool_size = min( + int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), + self.DEFAULT_INNODB_BUFFER_SIZE_MAX + ) + + if innodb_buffer_pool_size > total_memory: + log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( + innodb_buffer_pool_size, + total_memory), level='WARN') + + return innodb_buffer_pool_size + + +class PerconaClusterHelper(MySQLConfigHelper): + """Percona-cluster specific configuration helper.""" + + def parse_config(self): + """Parse charm configuration and calculate values for config files.""" + config = config_get() + mysql_config = {} + if 'max-connections' in config: + mysql_config['max_connections'] = config['max-connections'] + + if 'wait-timeout' in config: + mysql_config['wait_timeout'] = config['wait-timeout'] + + if self.get_innodb_flush_log_at_trx_commit() is not None: + mysql_config['innodb_flush_log_at_trx_commit'] = \ + self.get_innodb_flush_log_at_trx_commit() + + if self.get_innodb_change_buffering() is not None: + mysql_config['innodb_change_buffering'] = config['innodb-change-buffering'] + + if 'innodb-io-capacity' in config: + mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] + + # Set a sane default key_buffer size + mysql_config['key_buffer'] = self.human_to_bytes('32M') + mysql_config['innodb_buffer_pool_size'] = self.get_innodb_buffer_pool_size() + return mysql_config + + +class MySQL8Helper(MySQLHelper): + + def grant_exists(self, db_name, db_user, remote_ip): + cursor = self.connection.cursor() + priv_string = ("GRANT ALL PRIVILEGES ON {}.* " + "TO {}@{}".format(db_name, db_user, remote_ip)) + try: + cursor.execute("SHOW GRANTS FOR '{}'@'{}'".format(db_user, + remote_ip)) + grants = [i[0] for i in cursor.fetchall()] + except MySQLdb.OperationalError: + return False + finally: + cursor.close() + + # Different versions of MySQL use ' or `. Ignore these in the check. + return priv_string in [ + i.replace("'", "").replace("`", "") for i in grants] + + def create_grant(self, db_name, db_user, remote_ip, password): + if self.grant_exists(db_name, db_user, remote_ip): + return + + # Make sure the user exists + # MySQL8 must create the user before the grant + self.create_user(db_user, remote_ip, password) + + cursor = self.connection.cursor() + try: + cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}'" + .format(db_name, db_user, remote_ip)) + finally: + cursor.close() + + def create_user(self, db_user, remote_ip, password): + + SQL_USER_CREATE = ( + "CREATE USER '{db_user}'@'{remote_ip}' " + "IDENTIFIED BY '{password}'") + + cursor = self.connection.cursor() + try: + cursor.execute(SQL_USER_CREATE.format( + db_user=db_user, + remote_ip=remote_ip, + password=password) + ) + except MySQLdb._exceptions.OperationalError: + log("DB user {} already exists.".format(db_user), + "WARNING") + finally: + cursor.close() + + def create_router_grant(self, db_user, remote_ip, password): + + # Make sure the user exists + # MySQL8 must create the user before the grant + self.create_user(db_user, remote_ip, password) + + # Mysql-Router specific grants + cursor = self.connection.cursor() + try: + cursor.execute("GRANT CREATE USER ON *.* TO '{}'@'{}' WITH GRANT " + "OPTION".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON " + "mysql_innodb_cluster_metadata.* TO '{}'@'{}'" + .format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON mysql.user TO '{}'@'{}'" + .format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.replication_group_members " + "TO '{}'@'{}'".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.replication_group_member_stats " + "TO '{}'@'{}'".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.global_variables " + "TO '{}'@'{}'".format(db_user, remote_ip)) + finally: + cursor.close() + + def configure_router(self, hostname, username): + + if self.connection is None: + self.connect(password=self.get_mysql_root_password()) + + remote_ip = self.normalize_address(hostname) + password = self.get_mysql_password(username) + self.create_user(username, remote_ip, password) + self.create_router_grant(username, remote_ip, password) + + return password + + +def get_prefix(requested, keys=None): + """Return existing prefix or None. + + :param requested: Request string. i.e. novacell0_username + :type requested: str + :param keys: Keys to determine prefix. Defaults set in function. + :type keys: List of str keys + :returns: String prefix i.e. novacell0 + :rtype: Union[None, str] + """ + if keys is None: + # Shared-DB default keys + keys = ["_database", "_username", "_hostname"] + for key in keys: + if requested.endswith(key): + return requested[:-len(key)] + + +def get_db_data(relation_data, unprefixed): + """Organize database requests into a collections.OrderedDict + + :param relation_data: shared-db relation data + :type relation_data: dict + :param unprefixed: Prefix to use for requests without a prefix. This should + be unique for each side of the relation to avoid + conflicts. + :type unprefixed: str + :returns: Order dict of databases and users + :rtype: collections.OrderedDict + """ + # Deep copy to avoid unintentionally changing relation data + settings = copy.deepcopy(relation_data) + databases = collections.OrderedDict() + + # Clear non-db related elements + if "egress-subnets" in settings.keys(): + settings.pop("egress-subnets") + if "ingress-address" in settings.keys(): + settings.pop("ingress-address") + if "private-address" in settings.keys(): + settings.pop("private-address") + + singleset = {"database", "username", "hostname"} + if singleset.issubset(settings): + settings["{}_{}".format(unprefixed, "hostname")] = ( + settings["hostname"]) + settings.pop("hostname") + settings["{}_{}".format(unprefixed, "database")] = ( + settings["database"]) + settings.pop("database") + settings["{}_{}".format(unprefixed, "username")] = ( + settings["username"]) + settings.pop("username") + + for k, v in settings.items(): + db = k.split("_")[0] + x = "_".join(k.split("_")[1:]) + if db not in databases: + databases[db] = collections.OrderedDict() + databases[db][x] = v + + return databases diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/apache.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1e371e179bb6926f227331f86cb14a38c229a2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,86 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os + +from charmhelpers.core import host +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get(ssl_cert_attr, + rid=r_id, unit=unit) + if not key: + key = relation_get(ssl_key_attr, + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def retrieve_ca_cert(cert_file): + cert = None + if os.path.isfile(cert_file): + with open(cert_file, 'rb') as crt: + cert = crt.read() + return cert + + +def install_ca_cert(ca_cert): + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/cluster.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..ba34fba0cafa21d15a6a27946544b2c99fbd3663 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,451 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import functools +import subprocess +import os +import time + +from socket import gethostname as get_unit_hostname + +import six + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + DEBUG, + WARNING, + unit_get, + is_leader as juju_is_leader, + status_set, +) +from charmhelpers.core.host import ( + modulo_distribution, +) +from charmhelpers.core.decorators import ( + retry_on_exception, +) +from charmhelpers.core.strutils import ( + bool_from_string, +) + +DC_RESOURCE_NAME = 'DC' + + +class HAIncompleteConfig(Exception): + pass + + +class HAIncorrectConfig(Exception): + pass + + +class CRMResourceNotFound(Exception): + pass + + +class CRMDCNotFound(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 3. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + + return False + + +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) +def is_crm_leader(resource, retry=False): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. + """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() + cmd = ['crm', 'resource', 'show', resource] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port, singlenode_mode=False): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the API service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port, singlenode_mode=False): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + +def get_hacluster_config(exclude_keys=None): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname, os-access-hostname + + param: exclude_keys: list of setting key(s) to be excluded. + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing or incorrect. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] + conf = {} + for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + + conf[setting] = config_get(setting) + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') + return conf + + +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname', 'os-access-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') or 3 + if wait is None: + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/README.hardening.md b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/README.hardening.md new file mode 100644 index 0000000000000000000000000000000000000000..91280c03e6d7b5d75b356cd94614fc821abc2644 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/README.hardening.md @@ -0,0 +1,38 @@ +# Juju charm-helpers hardening library + +## Description + +This library provides multiple implementations of system and application +hardening that conform to the standards of http://hardening.io/. + +Current implementations include: + + * OS + * SSH + * MySQL + * Apache + +## Requirements + +* Juju Charms + +## Usage + +1. Synchronise this library into your charm and add the harden() decorator + (from contrib.hardening.harden) to any functions or methods you want to use + to trigger hardening of your application/system. + +2. Add a config option called 'harden' to your charm config.yaml and set it to + a space-delimited list of hardening modules you want to run e.g. "os ssh" + +3. Override any config defaults (contrib.hardening.defaults) by adding a file + called hardening.yaml to your charm root containing the name(s) of the + modules whose settings you want override at root level and then any settings + with overrides e.g. + + os: + general: + desktop_enable: True + +4. Now just run your charm as usual and hardening will be applied each time the + hook runs. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..30a3e94359e94011cd247de7ade76667346e7379 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3bc2ebd4760124e23c128868e098aceac610260f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.apache.checks import config + + +def run_apache_checks(): + log("Starting Apache hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("Apache hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/checks/config.py new file mode 100644 index 0000000000000000000000000000000000000000..341da9eee10f73cbe3d7e7e5cf91b57b4d2a89b4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/checks/config.py @@ -0,0 +1,104 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +import six +import subprocess + + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + NoReadWriteForOther, + TemplatedFile, + DeletedFile +) +from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit +from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get Apache hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: + log("Apache server does not appear to be installed on this node - " + "skipping apache hardening", level=INFO) + return [] + + context = ApacheConfContext() + settings = utils.get_settings('apache') + audits = [ + FilePermissionAudit(paths=os.path.join( + settings['common']['apache_dir'], 'apache2.conf'), + user='root', group='root', mode=0o0640), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'mods-available/alias.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'conf-enabled/99-hardening.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + DirectoryPermissionAudit(settings['common']['apache_dir'], + user='root', + group='root', + mode=0o0750), + + DisabledModuleAudit(settings['hardening']['modules_to_disable']), + + NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) + ] + + return audits + + +class ApacheConfContext(object): + """Defines the set of key/value pairs to set in a apache config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/apache/conf-enabled/hardening.conf file. + """ + def __call__(self): + settings = utils.get_settings('apache') + ctxt = settings['hardening'] + + out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') + ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', + out).group(1) + ctxt['apache_icondir'] = '/usr/share/apache2/icons/' + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf new file mode 100644 index 0000000000000000000000000000000000000000..22b68041d50ff753284bbb4b41a21e8f2bd8c18a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -0,0 +1,32 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + + + # http://httpd.apache.org/docs/2.4/upgrading.html + {% if apache_version > '2.2' -%} + Require all granted + {% else -%} + Order Allow,Deny + Deny from all + {% endif %} + + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + + + Options -Indexes -FollowSymLinks + AllowOverride None + + +TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/templates/alias.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/templates/alias.conf new file mode 100644 index 0000000000000000000000000000000000000000..e46a58a30dadbb6ccffa02d82593c63a9cbf52df --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/apache/templates/alias.conf @@ -0,0 +1,31 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + + # + # Aliases: Add here as many aliases as you need (with no limit). The format is + # Alias fakename realname + # + # Note that if you include a trailing / on fakename then the server will + # require it to be present in the URL. So "/icons" isn't aliased in this + # example, only "/icons/". If the fakename is slash-terminated, then the + # realname must also be slash terminated, and if the fakename omits the + # trailing slash, the realname must also omit it. + # + # We include the /icons/ alias for FancyIndexed directory listings. If + # you do not use FancyIndexing, you may comment this out. + # + Alias /icons/ "{{ apache_icondir }}/" + + + Options -Indexes -MultiViews -FollowSymLinks + AllowOverride None +{% if apache_version == '2.4' -%} + Require all granted +{% else -%} + Order allow,deny + Allow from all +{% endif %} + + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6dd5b05fec4ffcfcdb4378a06dfda4e8ac7e8371 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/__init__.py @@ -0,0 +1,54 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class BaseAudit(object): # NO-QA + """Base class for hardening checks. + + The lifecycle of a hardening check is to first check to see if the system + is in compliance for the specified check. If it is not in compliance, the + check method will return a value which will be supplied to the. + """ + def __init__(self, *args, **kwargs): + self.unless = kwargs.get('unless', None) + super(BaseAudit, self).__init__() + + def ensure_compliance(self): + """Checks to see if the current hardening check is in compliance or + not. + + If the check that is performed is not in compliance, then an exception + should be raised. + """ + pass + + def _take_action(self): + """Determines whether to perform the action or not. + + Checks whether or not an action should be taken. This is determined by + the truthy value for the unless parameter. If unless is a callback + method, it will be invoked with no parameters in order to determine + whether or not the action should be taken. Otherwise, the truthy value + of the unless attribute will determine if the action should be + performed. + """ + # Do the action if there isn't an unless override. + if self.unless is None: + return True + + # Invoke the callback if there is one. + if hasattr(self.unless, '__call__'): + return not self.unless() + + return not self.unless diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/apache.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/apache.py new file mode 100644 index 0000000000000000000000000000000000000000..04825f5ada0c5b0bb9fc0955baa9a10fa199184d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/apache.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +import six + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class DisabledModuleAudit(BaseAudit): + """Audits Apache2 modules. + + Determines if the apache2 modules are enabled. If the modules are enabled + then they are removed in the ensure_compliance. + """ + def __init__(self, modules): + if modules is None: + self.modules = [] + elif isinstance(modules, six.string_types): + self.modules = [modules] + else: + self.modules = modules + + def ensure_compliance(self): + """Ensures that the modules are not loaded.""" + if not self.modules: + return + + try: + loaded_modules = self._get_loaded_modules() + non_compliant_modules = [] + for module in self.modules: + if module in loaded_modules: + log("Module '%s' is enabled but should not be." % + (module), level=INFO) + non_compliant_modules.append(module) + + if len(non_compliant_modules) == 0: + return + + for module in non_compliant_modules: + self._disable_module(module) + self._restart_apache() + except subprocess.CalledProcessError as e: + log('Error occurred auditing apache module compliance. ' + 'This may have been already reported. ' + 'Output is: %s' % e.output, level=ERROR) + + @staticmethod + def _get_loaded_modules(): + """Returns the modules which are enabled in Apache.""" + output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') + modules = [] + for line in output.splitlines(): + # Each line of the enabled module output looks like: + # module_name (static|shared) + # Plus a header line at the top of the output which is stripped + # out by the regex. + matcher = re.search(r'^ (\S*)_module (\S*)', line) + if matcher: + modules.append(matcher.group(1)) + return modules + + @staticmethod + def _disable_module(module): + """Disables the specified module in Apache.""" + try: + subprocess.check_call(['a2dismod', module]) + except subprocess.CalledProcessError as e: + # Note: catch error here to allow the attempt of disabling + # multiple modules in one go rather than failing after the + # first module fails. + log('Error occurred disabling module %s. ' + 'Output is: %s' % (module, e.output), level=ERROR) + + @staticmethod + def _restart_apache(): + """Restarts the apache process""" + subprocess.check_output(['service', 'apache2', 'restart']) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/apt.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/apt.py new file mode 100644 index 0000000000000000000000000000000000000000..cad7bf7376d6f22ce8feccd843b070c399887aa9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/apt.py @@ -0,0 +1,104 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import # required for external apt import +from six import string_types + +from charmhelpers.fetch import ( + apt_cache, + apt_purge +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg + + +class AptConfig(BaseAudit): + + def __init__(self, config, **kwargs): + self.config = config + + def verify_config(self): + apt_pkg.init() + for cfg in self.config: + value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) + if value and value != cfg['expected']: + log("APT config '%s' has unexpected value '%s' " + "(expected='%s')" % + (cfg['key'], value, cfg['expected']), level=WARNING) + + def ensure_compliance(self): + self.verify_config() + + +class RestrictedPackages(BaseAudit): + """Class used to audit restricted packages on the system.""" + + def __init__(self, pkgs, **kwargs): + super(RestrictedPackages, self).__init__(**kwargs) + if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + self.pkgs = pkgs.split() + else: + self.pkgs = pkgs + + def ensure_compliance(self): + cache = apt_cache() + + for p in self.pkgs: + if p not in cache: + continue + + pkg = cache[p] + if not self.is_virtual_package(pkg): + if not pkg.current_ver: + log("Package '%s' is not installed." % pkg.name, + level=DEBUG) + continue + else: + log("Restricted package '%s' is installed" % pkg.name, + level=WARNING) + self.delete_package(cache, pkg) + else: + log("Checking restricted virtual package '%s' provides" % + pkg.name, level=DEBUG) + self.delete_package(cache, pkg) + + def delete_package(self, cache, pkg): + """Deletes the package from the system. + + Deletes the package form the system, properly handling virtual + packages. + + :param cache: the apt cache + :param pkg: the package to remove + """ + if self.is_virtual_package(pkg): + log("Package '%s' appears to be virtual - purging provides" % + pkg.name, level=DEBUG) + for _p in pkg.provides_list: + self.delete_package(cache, _p[2].parent_pkg) + elif not pkg.current_ver: + log("Package '%s' not installed" % pkg.name, level=DEBUG) + return + else: + log("Purging package '%s'" % pkg.name, level=DEBUG) + apt_purge(pkg.name) + + def is_virtual_package(self, pkg): + return (pkg.get('has_provides', False) and + not pkg.get('has_versions', False)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/file.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/file.py new file mode 100644 index 0000000000000000000000000000000000000000..257c6351a0b0d244273013faef913f52349f2486 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/audits/file.py @@ -0,0 +1,550 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grp +import os +import pwd +import re + +from subprocess import ( + CalledProcessError, + check_output, + check_call, +) +from traceback import format_exc +from six import string_types +from stat import ( + S_ISGID, + S_ISUID +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core import unitdata +from charmhelpers.core.host import file_hash +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.contrib.hardening.templating import ( + get_template_path, + render_and_write, +) +from charmhelpers.contrib.hardening import utils + + +class BaseFileAudit(BaseAudit): + """Base class for file audits. + + Provides api stubs for compliance check flow that must be used by any class + that implemented this one. + """ + + def __init__(self, paths, always_comply=False, *args, **kwargs): + """ + :param paths: string path of list of paths of files we want to apply + compliance checks are criteria to. + :param always_comply: if true compliance criteria is always applied + else compliance is skipped for non-existent + paths. + """ + super(BaseFileAudit, self).__init__(*args, **kwargs) + self.always_comply = always_comply + if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + self.paths = [paths] + else: + self.paths = paths + + def ensure_compliance(self): + """Ensure that the all registered files comply to registered criteria. + """ + for p in self.paths: + if os.path.exists(p): + if self.is_compliant(p): + continue + + log('File %s is not in compliance.' % p, level=INFO) + else: + if not self.always_comply: + log("Non-existent path '%s' - skipping compliance check" + % (p), level=INFO) + continue + + if self._take_action(): + log("Applying compliance criteria to '%s'" % (p), level=INFO) + self.comply(p) + + def is_compliant(self, path): + """Audits the path to see if it is compliance. + + :param path: the path to the file that should be checked. + """ + raise NotImplementedError + + def comply(self, path): + """Enforces the compliance of a path. + + :param path: the path to the file that should be enforced. + """ + raise NotImplementedError + + @classmethod + def _get_stat(cls, path): + """Returns the Posix st_stat information for the specified file path. + + :param path: the path to get the st_stat information for. + :returns: an st_stat object for the path or None if the path doesn't + exist. + """ + return os.stat(path) + + +class FilePermissionAudit(BaseFileAudit): + """Implements an audit for file permissions and ownership for a user. + + This class implements functionality that ensures that a specific user/group + will own the file(s) specified and that the permissions specified are + applied properly to the file. + """ + def __init__(self, paths, user, group=None, mode=0o600, **kwargs): + self.user = user + self.group = group + self.mode = mode + super(FilePermissionAudit, self).__init__(paths, user, group, mode, + **kwargs) + + @property + def user(self): + return self._user + + @user.setter + def user(self, name): + try: + user = pwd.getpwnam(name) + except KeyError: + log('Unknown user %s' % name, level=ERROR) + user = None + self._user = user + + @property + def group(self): + return self._group + + @group.setter + def group(self, name): + try: + group = None + if name: + group = grp.getgrnam(name) + else: + group = grp.getgrgid(self.user.pw_gid) + except KeyError: + log('Unknown group %s' % name, level=ERROR) + self._group = group + + def is_compliant(self, path): + """Checks if the path is in compliance. + + Used to determine if the path specified meets the necessary + requirements to be in compliance with the check itself. + + :param path: the file path to check + :returns: True if the path is compliant, False otherwise. + """ + stat = self._get_stat(path) + user = self.user + group = self.group + + compliant = True + if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: + log('File %s is not owned by %s:%s.' % (path, user.pw_name, + group.gr_name), + level=INFO) + compliant = False + + # POSIX refers to the st_mode bits as corresponding to both the + # file type and file permission bits, where the least significant 12 + # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the + # file permission bits (8-0) + perms = stat.st_mode & 0o7777 + if perms != self.mode: + log('File %s has incorrect permissions, currently set to %s' % + (path, oct(stat.st_mode & 0o7777)), level=INFO) + compliant = False + + return compliant + + def comply(self, path): + """Issues a chown and chmod to the file paths specified.""" + utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, + self.mode) + + +class DirectoryPermissionAudit(FilePermissionAudit): + """Performs a permission check for the specified directory path.""" + + def __init__(self, paths, user, group=None, mode=0o600, + recursive=True, **kwargs): + super(DirectoryPermissionAudit, self).__init__(paths, user, group, + mode, **kwargs) + self.recursive = recursive + + def is_compliant(self, path): + """Checks if the directory is compliant. + + Used to determine if the path specified and all of its children + directories are in compliance with the check itself. + + :param path: the directory path to check + :returns: True if the directory tree is compliant, otherwise False. + """ + if not os.path.isdir(path): + log('Path specified %s is not a directory.' % path, level=ERROR) + raise ValueError("%s is not a directory." % path) + + if not self.recursive: + return super(DirectoryPermissionAudit, self).is_compliant(path) + + compliant = True + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + continue + + if not super(DirectoryPermissionAudit, self).is_compliant(root): + compliant = False + continue + + return compliant + + def comply(self, path): + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + super(DirectoryPermissionAudit, self).comply(root) + + +class ReadOnly(BaseFileAudit): + """Audits that files and folders are read only.""" + def __init__(self, paths, *args, **kwargs): + super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + try: + output = check_output(['find', path, '-perm', '-go+w', + '-type', 'f']).strip() + + # The find above will find any files which have permission sets + # which allow too broad of write access. As such, the path is + # compliant if there is no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred checking finding writable files for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + return False + + def comply(self, path): + try: + check_output(['chmod', 'go-w', '-R', path]) + except CalledProcessError as e: + log('Error occurred removing writeable permissions for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class NoReadWriteForOther(BaseFileAudit): + """Ensures that the files found under the base path are readable or + writable by anyone other than the owner or the group. + """ + def __init__(self, paths): + super(NoReadWriteForOther, self).__init__(paths) + + def is_compliant(self, path): + try: + cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', + '-perm', '-o+w', '-type', 'f'] + output = check_output(cmd).strip() + + # The find above here will find any files which have read or + # write permissions for other, meaning there is too broad of access + # to read/write the file. As such, the path is compliant if there's + # no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred while finding files which are readable or ' + 'writable to the world in %s. ' + 'Command output is: %s.' % (path, e.output), level=ERROR) + + def comply(self, path): + try: + check_output(['chmod', '-R', 'o-rw', path]) + except CalledProcessError as e: + log('Error occurred attempting to change modes of files under ' + 'path %s. Output of command is: %s' % (path, e.output)) + + +class NoSUIDSGIDAudit(BaseFileAudit): + """Audits that specified files do not have SUID/SGID bits set.""" + def __init__(self, paths, *args, **kwargs): + super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + stat = self._get_stat(path) + if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: + return False + + return True + + def comply(self, path): + try: + log('Removing suid/sgid from %s.' % path, level=DEBUG) + check_output(['chmod', '-s', path]) + except CalledProcessError as e: + log('Error occurred removing suid/sgid from %s.' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class TemplatedFile(BaseFileAudit): + """The TemplatedFileAudit audits the contents of a templated file. + + This audit renders a file from a template, sets the appropriate file + permissions, then generates a hashsum with which to check the content + changed. + """ + def __init__(self, path, context, template_dir, mode, user='root', + group='root', service_actions=None, **kwargs): + self.context = context + self.user = user + self.group = group + self.mode = mode + self.template_dir = template_dir + self.service_actions = service_actions + super(TemplatedFile, self).__init__(paths=path, always_comply=True, + **kwargs) + + def is_compliant(self, path): + """Determines if the templated file is compliant. + + A templated file is only compliant if it has not changed (as + determined by its sha256 hashsum) AND its file permissions are set + appropriately. + + :param path: the path to check compliance. + """ + same_templates = self.templates_match(path) + same_content = self.contents_match(path) + same_permissions = self.permissions_match(path) + + if same_content and same_permissions and same_templates: + return True + + return False + + def run_service_actions(self): + """Run any actions on services requested.""" + if not self.service_actions: + return + + for svc_action in self.service_actions: + name = svc_action['service'] + actions = svc_action['actions'] + log("Running service '%s' actions '%s'" % (name, actions), + level=DEBUG) + for action in actions: + cmd = ['service', name, action] + try: + check_call(cmd) + except CalledProcessError as exc: + log("Service name='%s' action='%s' failed - %s" % + (name, action, exc), level=WARNING) + + def comply(self, path): + """Ensures the contents and the permissions of the file. + + :param path: the path to correct + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + self.pre_write() + render_and_write(self.template_dir, path, self.context()) + utils.ensure_permissions(path, self.user, self.group, self.mode) + self.run_service_actions() + self.save_checksum(path) + self.post_write() + + def pre_write(self): + """Invoked prior to writing the template.""" + pass + + def post_write(self): + """Invoked after writing the template.""" + pass + + def templates_match(self, path): + """Determines if the template files are the same. + + The template file equality is determined by the hashsum of the + template files themselves. If there is no hashsum, then the content + cannot be sure to be the same so treat it as if they changed. + Otherwise, return whether or not the hashsums are the same. + + :param path: the path to check + :returns: boolean + """ + template_path = get_template_path(self.template_dir, path) + key = 'hardening:template:%s' % template_path + template_checksum = file_hash(template_path) + kv = unitdata.kv() + stored_tmplt_checksum = kv.get(key) + if not stored_tmplt_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Saved template checksum for %s.' % template_path, + level=DEBUG) + # Since we don't have a template checksum, then assume it doesn't + # match and return that the template is different. + return False + elif stored_tmplt_checksum != template_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Updated template checksum for %s.' % template_path, + level=DEBUG) + return False + + # Here the template hasn't changed based upon the calculated + # checksum of the template and what was previously stored. + return True + + def contents_match(self, path): + """Determines if the file content is the same. + + This is determined by comparing hashsum of the file contents and + the saved hashsum. If there is no hashsum, then the content cannot + be sure to be the same so treat them as if they are not the same. + Otherwise, return True if the hashsums are the same, False if they + are not the same. + + :param path: the file to check. + """ + checksum = file_hash(path) + + kv = unitdata.kv() + stored_checksum = kv.get('hardening:%s' % path) + if not stored_checksum: + # If the checksum hasn't been generated, return False to ensure + # the file is written and the checksum stored. + log('Checksum for %s has not been calculated.' % path, level=DEBUG) + return False + elif stored_checksum != checksum: + log('Checksum mismatch for %s.' % path, level=DEBUG) + return False + + return True + + def permissions_match(self, path): + """Determines if the file owner and permissions match. + + :param path: the path to check. + """ + audit = FilePermissionAudit(path, self.user, self.group, self.mode) + return audit.is_compliant(path) + + def save_checksum(self, path): + """Calculates and saves the checksum for the path specified. + + :param path: the path of the file to save the checksum. + """ + checksum = file_hash(path) + kv = unitdata.kv() + kv.set('hardening:%s' % path, checksum) + kv.flush() + + +class DeletedFile(BaseFileAudit): + """Audit to ensure that a file is deleted.""" + def __init__(self, paths): + super(DeletedFile, self).__init__(paths) + + def is_compliant(self, path): + return not os.path.exists(path) + + def comply(self, path): + os.remove(path) + + +class FileContentAudit(BaseFileAudit): + """Audit the contents of a file.""" + def __init__(self, paths, cases, **kwargs): + # Cases we expect to pass + self.pass_cases = cases.get('pass', []) + # Cases we expect to fail + self.fail_cases = cases.get('fail', []) + super(FileContentAudit, self).__init__(paths, **kwargs) + + def is_compliant(self, path): + """ + Given a set of content matching cases i.e. tuple(regex, bool) where + bool value denotes whether or not regex is expected to match, check that + all cases match as expected with the contents of the file. Cases can be + expected to pass of fail. + + :param path: Path of file to check. + :returns: Boolean value representing whether or not all cases are + found to be compliant. + """ + log("Auditing contents of file '%s'" % (path), level=DEBUG) + with open(path, 'r') as fd: + contents = fd.read() + + matches = 0 + for pattern in self.pass_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if results: + matches += 1 + else: + log("Pattern '%s' was expected to pass but instead it failed" + % (pattern), level=WARNING) + + for pattern in self.fail_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if not results: + matches += 1 + else: + log("Pattern '%s' was expected to fail but instead it passed" + % (pattern), level=WARNING) + + total = len(self.pass_cases) + len(self.fail_cases) + log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) + return matches == total + + def comply(self, *args, **kwargs): + """NOOP since we just issue warnings. This is to avoid the + NotImplememtedError. + """ + log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/apache.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/apache.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f940d4cfa85ca7051dd60a4805d84bb6aebed6d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,16 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..c112137cb45c4b63cb05384145b3edf8c443e2b8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,12 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/mysql.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 0000000000000000000000000000000000000000..682d22bf3ded32eb1c8d6188486ec4468d9ec457 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..2edf325c311c6fbb062a072083b4d12cebc3d9c1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/os.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/os.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a8627b5ed2803828e1e4d78260c6b5f90cae659 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/os.yaml @@ -0,0 +1,68 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..cc3b9c206eae56cbe68826cb76748e2deb9483e1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,43 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: + ssh_tmout: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/ssh.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd529bcae1ec00fef2e969f43dc3cf530b46ef9a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..d05e054bc234015206bb1195152fa9ffd6a33151 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/harden.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/harden.py new file mode 100644 index 0000000000000000000000000000000000000000..63f21b9c9855065da3be875c01a2c94db7df47b4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/harden.py @@ -0,0 +1,96 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.host.checks import run_os_checks +from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks +from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks +from charmhelpers.contrib.hardening.apache.checks import run_apache_checks + +_DISABLE_HARDENING_FOR_UNIT_TEST = False + + +def harden(overrides=None): + """Hardening decorator. + + This is the main entry point for running the hardening stack. In order to + run modules of the stack you must add this decorator to charm hook(s) and + ensure that your charm config.yaml contains the 'harden' option set to + one or more of the supported modules. Setting these will cause the + corresponding hardening code to be run when the hook fires. + + This decorator can and should be applied to more than one hook or function + such that hardening modules are called multiple times. This is because + subsequent calls will perform auditing checks that will report any changes + to resources hardened by the first run (and possibly perform compliance + actions as a result of any detected infractions). + + :param overrides: Optional list of stack modules used to override those + provided with 'harden' config. + :returns: Returns value returned by decorated function once executed. + """ + if overrides is None: + overrides = [] + + def _harden_inner1(f): + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} + + def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True + RUN_CATALOG = OrderedDict([('os', run_os_checks), + ('ssh', run_ssh_checks), + ('mysql', run_mysql_checks), + ('apache', run_apache_checks)]) + + enabled = overrides[:] or (config("harden") or "").split() + if enabled: + modules_to_run = [] + # modules will always be performed in the following order + for module, func in six.iteritems(RUN_CATALOG): + if module in enabled: + enabled.remove(module) + modules_to_run.append(func) + + if enabled: + log("Unknown hardening modules '%s' - ignoring" % + (', '.join(enabled)), level=WARNING) + + for hardener in modules_to_run: + log("Executing hardening module '%s'" % + (hardener.__name__), level=DEBUG) + hardener() + else: + log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) + + return f(*args, **kwargs) + return _harden_inner2 + + return _harden_inner1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7e409f3c7e0406b40353f48acfc3479e4c1a24 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -0,0 +1,48 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.host.checks import ( + apt, + limits, + login, + minimize_access, + pam, + profile, + securetty, + suid_sgid, + sysctl +) + + +def run_os_checks(): + log("Starting OS hardening checks.", level=DEBUG) + checks = apt.get_audits() + checks.extend(limits.get_audits()) + checks.extend(login.get_audits()) + checks.extend(minimize_access.get_audits()) + checks.extend(pam.get_audits()) + checks.extend(profile.get_audits()) + checks.extend(securetty.get_audits()) + checks.extend(suid_sgid.get_audits()) + checks.extend(sysctl.get_audits()) + + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("OS hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/apt.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/apt.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce41b0043134e256d9c20ee729f1c4345faa3f9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/apt.py @@ -0,0 +1,37 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.utils import get_settings +from charmhelpers.contrib.hardening.audits.apt import ( + AptConfig, + RestrictedPackages, +) + + +def get_audits(): + """Get OS hardening apt audits. + + :returns: dictionary of audits + """ + audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}])] + + settings = get_settings('os') + clean_packages = settings['security']['packages_clean'] + if clean_packages: + security_packages = settings['security']['packages_list'] + if security_packages: + audits.append(RestrictedPackages(security_packages)) + + return audits diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/limits.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/limits.py new file mode 100644 index 0000000000000000000000000000000000000000..e94f5ebef360c7c80c35eba8243d3e7f7dcbb14d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/limits.py @@ -0,0 +1,53 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import ( + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening security limits audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Ensure that the /etc/security/limits.d directory is only writable + # by the root user, but others can execute and read. + audits.append(DirectoryPermissionAudit('/etc/security/limits.d', + user='root', group='root', + mode=0o755)) + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', + SecurityLimitsContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0440)) + return audits + + +class SecurityLimitsContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'disable_core_dump': + not settings['security']['kernel_enable_core_dump']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/login.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/login.py new file mode 100644 index 0000000000000000000000000000000000000000..fe2bc6ef34a0dae612c2617dc1d13390f651e419 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/login.py @@ -0,0 +1,65 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import string_types + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening login.defs audits. + + :returns: dictionary of audits + """ + audits = [TemplatedFile('/etc/login.defs', LoginContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0444)] + return audits + + +class LoginContext(object): + + def __call__(self): + settings = utils.get_settings('os') + + # Octal numbers in yaml end up being turned into decimal, + # so check if the umask is entered as a string (e.g. '027') + # or as an octal umask as we know it (e.g. 002). If its not + # a string assume it to be octal and turn it into an octal + # string. + umask = settings['environment']['umask'] + if not isinstance(umask, string_types): + umask = '%s' % oct(umask) + + ctxt = { + 'additional_user_paths': + settings['environment']['extra_user_paths'], + 'umask': umask, + 'pwd_max_age': settings['auth']['pw_max_age'], + 'pwd_min_age': settings['auth']['pw_min_age'], + 'uid_min': settings['auth']['uid_min'], + 'sys_uid_min': settings['auth']['sys_uid_min'], + 'sys_uid_max': settings['auth']['sys_uid_max'], + 'gid_min': settings['auth']['gid_min'], + 'sys_gid_min': settings['auth']['sys_gid_min'], + 'sys_gid_max': settings['auth']['sys_gid_max'], + 'login_retries': settings['auth']['retries'], + 'login_timeout': settings['auth']['timeout'], + 'chfn_restrict': settings['auth']['chfn_restrict'], + 'allow_login_without_home': settings['auth']['allow_homeless'] + } + + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/minimize_access.py new file mode 100644 index 0000000000000000000000000000000000000000..6e64be003be0b89d1416b22c35c43b6024979361 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -0,0 +1,50 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + ReadOnly, +) +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening access audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Remove write permissions from $PATH folders for all regular users. + # This prevents changing system-wide commands from normal users. + path_folders = {'/usr/local/sbin', + '/usr/local/bin', + '/usr/sbin', + '/usr/bin', + '/bin'} + extra_user_paths = settings['environment']['extra_user_paths'] + path_folders.update(extra_user_paths) + audits.append(ReadOnly(path_folders)) + + # Only allow the root user to have access to the shadow file. + audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) + + if 'change_user' not in settings['security']['users_allow']: + # su should only be accessible to user and group root, unless it is + # expressly defined to allow users to change to root via the + # security_users_allow config option. + audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) + + return audits diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/pam.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/pam.py new file mode 100644 index 0000000000000000000000000000000000000000..9b38d5f0cf0b16282968825b79b44d80a1a7f577 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/pam.py @@ -0,0 +1,132 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from subprocess import ( + check_output, + CalledProcessError, +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) +from charmhelpers.fetch import ( + apt_install, + apt_purge, + apt_update, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + DeletedFile, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +def get_audits(): + """Get OS hardening PAM authentication audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + if settings['auth']['pam_passwdqc_enable']: + audits.append(PasswdqcPAM('/etc/passwdqc.conf')) + + if settings['auth']['retries']: + audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) + else: + audits.append(DeletedFile('/usr/share/pam-configs/tally2')) + + return audits + + +class PasswdqcPAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_pam_passwdqc_options'] = \ + settings['auth']['pam_passwdqc_options'] + + return ctxt + + +class PasswdqcPAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(PasswdqcPAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=PasswdqcPAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + for pkg in ['libpam-ccreds', 'libpam-cracklib']: + log("Purging package '%s'" % pkg, level=DEBUG), + apt_purge(pkg) + + apt_update(fatal=True) + for pkg in ['libpam-passwdqc']: + log("Installing package '%s'" % pkg, level=DEBUG), + apt_install(pkg) + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) + + +class Tally2PAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] + ctxt['auth_retries'] = settings['auth']['retries'] + + return ctxt + + +class Tally2PAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(Tally2PAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=Tally2PAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + apt_purge('libpam-ccreds') + apt_update(fatal=True) + apt_install('libpam-modules') + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/profile.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/profile.py new file mode 100644 index 0000000000000000000000000000000000000000..2727428da9241ccf88a60843d05dffb26cebac96 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/profile.py @@ -0,0 +1,49 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening profile audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) + return audits + + +class ProfileContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/securetty.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/securetty.py new file mode 100644 index 0000000000000000000000000000000000000000..34cd02178c1fbf1c7d467af0814ad9fd4199dc3d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -0,0 +1,37 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening Secure TTY audits. + + :returns: dictionary of audits + """ + audits = [] + audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), + template_dir=TEMPLATES_DIR, + mode=0o0400, user='root', group='root')) + return audits + + +class SecureTTYContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ttys': settings['auth']['root_ttys']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/suid_sgid.py new file mode 100644 index 0000000000000000000000000000000000000000..bcbe3fde07ea0716e2de6d1d4e103fcb19166c14 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -0,0 +1,129 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit +from charmhelpers.contrib.hardening import utils + + +BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', + '/usr/libexec/openssh/ssh-keysign', + '/usr/lib/openssh/ssh-keysign', + '/sbin/netreport', + '/usr/sbin/usernetctl', + '/usr/sbin/userisdnctl', + '/usr/sbin/pppd', + '/usr/bin/lockfile', + '/usr/bin/mail-lock', + '/usr/bin/mail-unlock', + '/usr/bin/mail-touchlock', + '/usr/bin/dotlockfile', + '/usr/bin/arping', + '/usr/sbin/uuidd', + '/usr/bin/mtr', + '/usr/lib/evolution/camel-lock-helper-1.2', + '/usr/lib/pt_chown', + '/usr/lib/eject/dmcrypt-get-device', + '/usr/lib/mc/cons.saver'] + +WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', + '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', + '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', + '/usr/bin/passwd', '/usr/bin/ssh-agent', + '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', + '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', + '/bin/ping6', '/usr/bin/traceroute6.iputils', + '/sbin/mount.nfs', '/sbin/umount.nfs', + '/sbin/mount.nfs4', '/sbin/umount.nfs4', + '/usr/bin/crontab', + '/usr/bin/wall', '/usr/bin/write', + '/usr/bin/screen', + '/usr/bin/mlocate', + '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', + '/bin/fusermount', + '/usr/bin/pkexec', + '/usr/bin/sudo', '/usr/bin/sudoedit', + '/usr/sbin/postdrop', '/usr/sbin/postqueue', + '/usr/sbin/suexec', + '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', + '/usr/kerberos/bin/ksu', + '/usr/sbin/ccreds_validate', + '/usr/bin/Xorg', + '/usr/bin/X', + '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', + '/usr/lib/vte/gnome-pty-helper', + '/usr/lib/libvte9/gnome-pty-helper', + '/usr/lib/libvte-2.90-9/gnome-pty-helper'] + + +def get_audits(): + """Get OS hardening suid/sgid audits. + + :returns: dictionary of audits + """ + checks = [] + settings = utils.get_settings('os') + if not settings['security']['suid_sgid_enforce']: + log("Skipping suid/sgid hardening", level=INFO) + return checks + + # Build the blacklist and whitelist of files for suid/sgid checks. + # There are a total of 4 lists: + # 1. the system blacklist + # 2. the system whitelist + # 3. the user blacklist + # 4. the user whitelist + # + # The blacklist is the set of paths which should NOT have the suid/sgid bit + # set and the whitelist is the set of paths which MAY have the suid/sgid + # bit setl. The user whitelist/blacklist effectively override the system + # whitelist/blacklist. + u_b = settings['security']['suid_sgid_blacklist'] + u_w = settings['security']['suid_sgid_whitelist'] + + blacklist = set(BLACKLIST) - set(u_w + u_b) + whitelist = set(WHITELIST) - set(u_b + u_w) + + checks.append(NoSUIDSGIDAudit(blacklist)) + + dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] + + if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: + # If the policy is a dry_run (e.g. complain only) or remove unknown + # suid/sgid bits then find all of the paths which have the suid/sgid + # bit set and then remove the whitelisted paths. + root_path = settings['environment']['root_path'] + unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) + checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) + + return checks + + +def find_paths_with_suid_sgid(root_path): + """Finds all paths/files which have an suid/sgid bit enabled. + + Starting with the root_path, this will recursively find all paths which + have an suid or sgid bit set. + """ + cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', + '-type', 'f', '!', '-path', '/proc/*', '-print'] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = p.communicate() + return set(out.split('\n')) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/sysctl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/sysctl.py new file mode 100644 index 0000000000000000000000000000000000000000..f1ea5813036b11893e8b9a986bf30a2f7a541b5d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -0,0 +1,209 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import platform +import re +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, + WARNING, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s +net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_ratelimit=100 +net.ipv4.icmp_ratemask=88089 +net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s +net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s +net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s +net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s +net.ipv4.tcp_rfc1337=1 +net.ipv4.tcp_syncookies=1 +net.ipv4.conf.all.shared_media=1 +net.ipv4.conf.default.shared_media=1 +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=0 +net.ipv4.conf.default.secure_redirects=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 +net.ipv4.conf.all.log_martians=0 +net.ipv6.conf.default.router_solicitations=0 +net.ipv6.conf.default.accept_ra_rtr_pref=0 +net.ipv6.conf.default.accept_ra_pinfo=0 +net.ipv6.conf.default.accept_ra_defrtr=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.default.dad_transmits=0 +net.ipv6.conf.default.max_addresses=1 +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +kernel.modules_disabled=%(kernel_modules_disabled)s +kernel.sysrq=%(kernel_sysrq)s +fs.suid_dumpable=%(fs_suid_dumpable)s +kernel.randomize_va_space=2 +""" + + +def get_audits(): + """Get OS hardening sysctl audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Apply the sysctl settings which are configured to be applied. + audits.append(SysctlConf()) + # Make sure that only root has access to the sysctl.conf file, and + # that it is read-only. + audits.append(FilePermissionAudit('/etc/sysctl.conf', + user='root', + group='root', mode=0o0440)) + # If module loading is not enabled, then ensure that the modules + # file has the appropriate permissions and rebuild the initramfs + if not settings['security']['kernel_enable_module_loading']: + audits.append(ModulesTemplate()) + + return audits + + +class ModulesContext(object): + + def __call__(self): + settings = utils.get_settings('os') + with open('/proc/cpuinfo', 'r') as fd: + cpuinfo = fd.readlines() + + for line in cpuinfo: + match = re.search(r"^vendor_id\s+:\s+(.+)", line) + if match: + vendor = match.group(1) + + if vendor == "GenuineIntel": + vendor = "intel" + elif vendor == "AuthenticAMD": + vendor = "amd" + + ctxt = {'arch': platform.processor(), + 'cpuVendor': vendor, + 'desktop_enable': settings['general']['desktop_enable']} + + return ctxt + + +class ModulesTemplate(object): + + def __init__(self): + super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', + ModulesContext(), + templates_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + subprocess.check_call(['update-initramfs', '-u']) + + +class SysCtlHardeningContext(object): + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'sysctl': {}} + + log("Applying sysctl settings", level=INFO) + extras = {'net_ipv4_ip_forward': 0, + 'net_ipv6_conf_all_forwarding': 0, + 'net_ipv6_conf_all_disable_ipv6': 1, + 'net_ipv4_tcp_timestamps': 0, + 'net_ipv4_conf_all_arp_ignore': 0, + 'net_ipv4_conf_all_arp_announce': 0, + 'kernel_sysrq': 0, + 'fs_suid_dumpable': 0, + 'kernel_modules_disabled': 1} + + if settings['sysctl']['ipv6_enable']: + extras['net_ipv6_conf_all_disable_ipv6'] = 0 + + if settings['sysctl']['forwarding']: + extras['net_ipv4_ip_forward'] = 1 + extras['net_ipv6_conf_all_forwarding'] = 1 + + if settings['sysctl']['arp_restricted']: + extras['net_ipv4_conf_all_arp_ignore'] = 1 + extras['net_ipv4_conf_all_arp_announce'] = 2 + + if settings['security']['kernel_enable_module_loading']: + extras['kernel_modules_disabled'] = 0 + + if settings['sysctl']['kernel_enable_sysrq']: + sysrq_val = settings['sysctl']['kernel_secure_sysrq'] + extras['kernel_sysrq'] = sysrq_val + + if settings['security']['kernel_enable_core_dump']: + extras['fs_suid_dumpable'] = 1 + + settings.update(extras) + for d in (SYSCTL_DEFAULTS % settings).split(): + d = d.strip().partition('=') + key = d[0].strip() + path = os.path.join('/proc/sys', key.replace('.', '/')) + if not os.path.exists(path): + log("Skipping '%s' since '%s' does not exist" % (key, path), + level=WARNING) + continue + + ctxt['sysctl'][key] = d[2] or None + + # Translate for python3 + return {'sysctl_settings': + [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + + +class SysctlConf(TemplatedFile): + """An audit check for sysctl settings.""" + def __init__(self): + self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' + super(SysctlConf, self).__init__(self.conffile, + SysCtlHardeningContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + try: + subprocess.check_call(['sysctl', '-p', self.conffile]) + except subprocess.CalledProcessError as e: + # NOTE: on some systems if sysctl cannot apply all settings it + # will return non-zero as well. + log("sysctl command returned an error (maybe some " + "keys could not be set) - %s" % (e), + level=WARNING) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf new file mode 100644 index 0000000000000000000000000000000000000000..0014191fc8152fd9147b3fb5446987e6e62f2d77 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% if disable_core_dump -%} +# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. +* hard core 0 +{% endif %} \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 0000000000000000000000000000000000000000..616cef46f492f682aca28c71a6e20176870a36f2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf new file mode 100644 index 0000000000000000000000000000000000000000..101f1e1d709c268890553957f30c93259681ce59 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf @@ -0,0 +1,7 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% for key, value in sysctl_settings -%} +{{ key }}={{ value }} +{% endfor -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/login.defs b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/login.defs new file mode 100644 index 0000000000000000000000000000000000000000..db137d6dbb7a3a850294407199225392a880cfc2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/login.defs @@ -0,0 +1,349 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# +# /etc/login.defs - Configuration control definitions for the login package. +# +# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. +# If unspecified, some arbitrary (and possibly incorrect) value will +# be assumed. All other items are optional - if not specified then +# the described action or option will be inhibited. +# +# Comment lines (lines beginning with "#") and blank lines are ignored. +# +# Modified for Linux. --marekm + +# REQUIRED for useradd/userdel/usermod +# Directory where mailboxes reside, _or_ name of file, relative to the +# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, +# MAIL_DIR takes precedence. +# +# Essentially: +# - MAIL_DIR defines the location of users mail spool files +# (for mbox use) by appending the username to MAIL_DIR as defined +# below. +# - MAIL_FILE defines the location of the users mail spool files as the +# fully-qualified filename obtained by prepending the user home +# directory before $MAIL_FILE +# +# NOTE: This is no more used for setting up users MAIL environment variable +# which is, starting from shadow 4.0.12-1 in Debian, entirely the +# job of the pam_mail PAM modules +# See default PAM configuration files provided for +# login, su, etc. +# +# This is a temporary situation: setting these variables will soon +# move to /etc/default/useradd and the variables will then be +# no more supported +MAIL_DIR /var/mail +#MAIL_FILE .mail + +# +# Enable logging and display of /var/log/faillog login failure info. +# This option conflicts with the pam_tally PAM module. +# +FAILLOG_ENAB yes + +# +# Enable display of unknown usernames when login failures are recorded. +# +# WARNING: Unknown usernames may become world readable. +# See #290803 and #298773 for details about how this could become a security +# concern +LOG_UNKFAIL_ENAB no + +# +# Enable logging of successful logins +# +LOG_OK_LOGINS yes + +# +# Enable "syslog" logging of su activity - in addition to sulog file logging. +# SYSLOG_SG_ENAB does the same for newgrp and sg. +# +SYSLOG_SU_ENAB yes +SYSLOG_SG_ENAB yes + +# +# If defined, all su activity is logged to this file. +# +#SULOG_FILE /var/log/sulog + +# +# If defined, file which maps tty line to TERM environment parameter. +# Each line of the file is in a format something like "vt100 tty01". +# +#TTYTYPE_FILE /etc/ttytype + +# +# If defined, login failures will be logged here in a utmp format +# last, when invoked as lastb, will read /var/log/btmp, so... +# +FTMP_FILE /var/log/btmp + +# +# If defined, the command name to display when running "su -". For +# example, if this is defined as "su" then a "ps" will display the +# command is "-su". If not defined, then "ps" would display the +# name of the shell actually being run, e.g. something like "-sh". +# +SU_NAME su + +# +# If defined, file which inhibits all the usual chatter during the login +# sequence. If a full pathname, then hushed mode will be enabled if the +# user's name or shell are found in the file. If not a full pathname, then +# hushed mode will be enabled if the file exists in the user's home directory. +# +HUSHLOGIN_FILE .hushlogin +#HUSHLOGIN_FILE /etc/hushlogins + +# +# *REQUIRED* The default PATH settings, for superuser and normal users. +# +# (they are minimal, add the rest in the shell startup files) +ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} + +# +# Terminal permissions +# +# TTYGROUP Login tty will be assigned this group ownership. +# TTYPERM Login tty will be set to this permission. +# +# If you have a "write" program which is "setgid" to a special group +# which owns the terminals, define TTYGROUP to the group number and +# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign +# TTYPERM to either 622 or 600. +# +# In Debian /usr/bin/bsd-write or similar programs are setgid tty +# However, the default and recommended value for TTYPERM is still 0600 +# to not allow anyone to write to anyone else console or terminal + +# Users can still allow other people to write them by issuing +# the "mesg y" command. + +TTYGROUP tty +TTYPERM 0600 + +# +# Login configuration initializations: +# +# ERASECHAR Terminal ERASE character ('\010' = backspace). +# KILLCHAR Terminal KILL character ('\025' = CTRL/U). +# UMASK Default "umask" value. +# +# The ERASECHAR and KILLCHAR are used only on System V machines. +# +# UMASK is the default umask value for pam_umask and is used by +# useradd and newusers to set the mode of the new home directories. +# 022 is the "historical" value in Debian for UMASK +# 027, or even 077, could be considered better for privacy +# There is no One True Answer here : each sysadmin must make up his/her +# mind. +# +# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value +# for private user groups, i. e. the uid is the same as gid, and username is +# the same as the primary group name: for these, the user permissions will be +# used as group permissions, e. g. 022 will become 002. +# +# Prefix these values with "0" to get octal, "0x" to get hexadecimal. +# +ERASECHAR 0177 +KILLCHAR 025 +UMASK {{ umask }} + +# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. +# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. +USERGROUPS_ENAB yes + +# +# Password aging controls: +# +# PASS_MAX_DAYS Maximum number of days a password may be used. +# PASS_MIN_DAYS Minimum number of days allowed between password changes. +# PASS_WARN_AGE Number of days warning given before a password expires. +# +PASS_MAX_DAYS {{ pwd_max_age }} +PASS_MIN_DAYS {{ pwd_min_age }} +PASS_WARN_AGE 7 + +# +# Min/max values for automatic uid selection in useradd +# +UID_MIN {{ uid_min }} +UID_MAX 60000 +# System accounts +SYS_UID_MIN {{ sys_uid_min }} +SYS_UID_MAX {{ sys_uid_max }} + +# Min/max values for automatic gid selection in groupadd +GID_MIN {{ gid_min }} +GID_MAX 60000 +# System accounts +SYS_GID_MIN {{ sys_gid_min }} +SYS_GID_MAX {{ sys_gid_max }} + +# +# Max number of login retries if password is bad. This will most likely be +# overriden by PAM, since the default pam_unix module has it's own built +# in of 3 retries. However, this is a safe fallback in case you are using +# an authentication module that does not enforce PAM_MAXTRIES. +# +LOGIN_RETRIES {{ login_retries }} + +# +# Max time in seconds for login +# +LOGIN_TIMEOUT {{ login_timeout }} + +# +# Which fields may be changed by regular users using chfn - use +# any combination of letters "frwh" (full name, room number, work +# phone, home phone). If not defined, no changes are allowed. +# For backward compatibility, "yes" = "rwh" and "no" = "frwh". +# +{% if chfn_restrict %} +CHFN_RESTRICT {{ chfn_restrict }} +{% endif %} + +# +# Should login be allowed if we can't cd to the home directory? +# Default in no. +# +DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} + +# +# If defined, this command is run when removing a user. +# It should remove any at/cron/print jobs etc. owned by +# the user to be removed (passed as the first argument). +# +#USERDEL_CMD /usr/sbin/userdel_local + +# +# Enable setting of the umask group bits to be the same as owner bits +# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is +# the same as gid, and username is the same as the primary group name. +# +# If set to yes, userdel will remove the user´s group if it contains no +# more members, and useradd will create by default a group with the name +# of the user. +# +USERGROUPS_ENAB yes + +# +# Instead of the real user shell, the program specified by this parameter +# will be launched, although its visible name (argv[0]) will be the shell's. +# The program may do whatever it wants (logging, additional authentification, +# banner, ...) before running the actual shell. +# +# FAKE_SHELL /bin/fakeshell + +# +# If defined, either full pathname of a file containing device names or +# a ":" delimited list of device names. Root logins will be allowed only +# upon these devices. +# +# This variable is used by login and su. +# +#CONSOLE /etc/consoles +#CONSOLE console:tty01:tty02:tty03:tty04 + +# +# List of groups to add to the user's supplementary group set +# when logging in on the console (as determined by the CONSOLE +# setting). Default is none. +# +# Use with caution - it is possible for users to gain permanent +# access to these groups, even when not logged in on the console. +# How to do it is left as an exercise for the reader... +# +# This variable is used by login and su. +# +#CONSOLE_GROUPS floppy:audio:cdrom + +# +# If set to "yes", new passwords will be encrypted using the MD5-based +# algorithm compatible with the one used by recent releases of FreeBSD. +# It supports passwords of unlimited length and longer salt strings. +# Set to "no" if you need to copy encrypted passwords to other systems +# which don't understand the new algorithm. Default is "no". +# +# This variable is deprecated. You should use ENCRYPT_METHOD. +# +MD5_CRYPT_ENAB no + +# +# If set to MD5 , MD5-based algorithm will be used for encrypting password +# If set to SHA256, SHA256-based algorithm will be used for encrypting password +# If set to SHA512, SHA512-based algorithm will be used for encrypting password +# If set to DES, DES-based algorithm will be used for encrypting password (default) +# Overrides the MD5_CRYPT_ENAB option +# +# Note: It is recommended to use a value consistent with +# the PAM modules configuration. +# +ENCRYPT_METHOD SHA512 + +# +# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. +# +# Define the number of SHA rounds. +# With a lot of rounds, it is more difficult to brute forcing the password. +# But note also that it more CPU resources will be needed to authenticate +# users. +# +# If not specified, the libc will choose the default number of rounds (5000). +# The values must be inside the 1000-999999999 range. +# If only one of the MIN or MAX values is set, then this value will be used. +# If MIN > MAX, the highest value will be used. +# +# SHA_CRYPT_MIN_ROUNDS 5000 +# SHA_CRYPT_MAX_ROUNDS 5000 + +################# OBSOLETED BY PAM ############## +# # +# These options are now handled by PAM. Please # +# edit the appropriate file in /etc/pam.d/ to # +# enable the equivelants of them. +# +############### + +#MOTD_FILE +#DIALUPS_CHECK_ENAB +#LASTLOG_ENAB +#MAIL_CHECK_ENAB +#OBSCURE_CHECKS_ENAB +#PORTTIME_CHECKS_ENAB +#SU_WHEEL_ONLY +#CRACKLIB_DICTPATH +#PASS_CHANGE_TRIES +#PASS_ALWAYS_WARN +#ENVIRON_FILE +#NOLOGINS_FILE +#ISSUE_FILE +#PASS_MIN_LEN +#PASS_MAX_LEN +#ULIMIT +#ENV_HZ +#CHFN_AUTH +#CHSH_AUTH +#FAIL_DELAY + +################# OBSOLETED ####################### +# # +# These options are no more handled by shadow. # +# # +# Shadow utilities will display a warning if they # +# still appear. # +# # +################################################### + +# CLOSE_SESSIONS +# LOGIN_STRING +# NO_PASSWORD_CONSOLE +# QMAIL_DIR + + + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/modules b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/modules new file mode 100644 index 0000000000000000000000000000000000000000..ef0354ee35fa363b303bb22c6ed0d2d1196aed52 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/modules @@ -0,0 +1,117 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +# Arch +# ---- +# +# Modules for certains builds, contains support modules and some CPU-specific optimizations. + +{% if arch == "x86_64" -%} +# Optimize for x86_64 cryptographic features +twofish-x86_64-3way +twofish-x86_64 +aes-x86_64 +salsa20-x86_64 +blowfish-x86_64 +{% endif -%} + +{% if cpuVendor == "intel" -%} +# Intel-specific optimizations +ghash-clmulni-intel +aesni-intel +kvm-intel +{% endif -%} + +{% if cpuVendor == "amd" -%} +# AMD-specific optimizations +kvm-amd +{% endif -%} + +kvm + + +# Crypto +# ------ + +# Some core modules which comprise strong cryptography. +blowfish_common +blowfish_generic +ctr +cts +lrw +lzo +rmd160 +rmd256 +rmd320 +serpent +sha512_generic +twofish_common +twofish_generic +xts +zlib + + +# Drivers +# ------- + +# Basics +lp +rtc +loop + +# Filesystems +ext2 +btrfs + +{% if desktop_enable -%} +# Desktop +psmouse +snd +snd_ac97_codec +snd_intel8x0 +snd_page_alloc +snd_pcm +snd_timer +soundcore +usbhid +{% endif -%} + +# Lib +# --- +xz + + +# Net +# --- + +# All packets needed for netfilter rules (ie iptables, ebtables). +ip_tables +x_tables +iptable_filter +iptable_nat + +# Targets +ipt_LOG +ipt_REJECT + +# Modules +xt_connlimit +xt_tcpudp +xt_recent +xt_limit +xt_conntrack +nf_conntrack +nf_conntrack_ipv4 +nf_defrag_ipv4 +xt_state +nf_nat + +# Addons +xt_pknock \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/passwdqc.conf new file mode 100644 index 0000000000000000000000000000000000000000..f98d14e57428c106692e0f57e8b381f2b0a12c44 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/passwdqc.conf @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: passwdqc password strength enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Password-Type: Primary +Password: + requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh new file mode 100644 index 0000000000000000000000000000000000000000..fd2de791b96fbb8889811daf7340d1f2ca2ab3a6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Disable core dumps via soft limits for all users. Compliance to this setting +# is voluntary and can be modified by users up to a hard limit. This setting is +# a sane default. +ulimit -S -c 0 > /dev/null 2>&1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/securetty b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/securetty new file mode 100644 index 0000000000000000000000000000000000000000..15b18d4e2f45747845d0b65c06997f154ef674a4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/securetty @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# A list of TTYs, from which root can log in +# see `man securetty` for reference +{% if ttys -%} +{% for tty in ttys -%} +{{ tty }} +{% endfor -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/tally2 b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/tally2 new file mode 100644 index 0000000000000000000000000000000000000000..d9620299c55e51abbee1017a227c217cd4a9fd33 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/host/templates/tally2 @@ -0,0 +1,14 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: tally2 lockout after failed attempts enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Auth-Type: Primary +Auth-Initial: + required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} +Account-Type: Primary +Account-Initial: + required pam_tally2.so diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1990d8513bbeef067a8d9a2168e1952efb2961dc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.mysql.checks import config + + +def run_mysql_checks(): + log("Starting MySQL hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("MySQL hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/checks/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a79f33b74a5c2972a82f0b4d8de8d1073dc293ed --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -0,0 +1,87 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + WARNING, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get MySQL hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: + log("MySQL does not appear to be installed on this node - " + "skipping mysql hardening", level=WARNING) + return [] + + settings = utils.get_settings('mysql') + hardening_settings = settings['hardening'] + my_cnf = hardening_settings['mysql-conf'] + + audits = [ + FilePermissionAudit(paths=[my_cnf], user='root', + group='root', mode=0o0600), + + TemplatedFile(hardening_settings['hardening-conf'], + MySQLConfContext(), + TEMPLATES_DIR, + mode=0o0750, + user='mysql', + group='root', + service_actions=[{'service': 'mysql', + 'actions': ['restart']}]), + + # MySQL and Percona charms do not allow configuration of the + # data directory, so use the default. + DirectoryPermissionAudit('/var/lib/mysql', + user='mysql', + group='mysql', + recursive=False, + mode=0o755), + + DirectoryPermissionAudit('/etc/mysql', + user='root', + group='root', + recursive=False, + mode=0o700), + ] + + return audits + + +class MySQLConfContext(object): + """Defines the set of key/value pairs to set in a mysql config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/mysql/conf.d/hardening.cnf file. + """ + def __call__(self): + settings = utils.get_settings('mysql') + # Translate for python3 + return {'mysql_settings': + [(k, v) for k, v in six.iteritems(settings['security'])]} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf new file mode 100644 index 0000000000000000000000000000000000000000..8242586cd66360b7e6ae33f13018363b95cd4ea9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf @@ -0,0 +1,12 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +[mysqld] +{% for setting, value in mysql_settings -%} +{% if value == 'True' -%} +{{ setting }} +{% elif value != 'None' and value != None -%} +{{ setting }} = {{ value }} +{% endif -%} +{% endfor -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edaf484b39f8c7353cebb2f4b68944c6493ba7b3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.ssh.checks import config + + +def run_ssh_checks(): + log("Starting SSH hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("SSH hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/checks/config.py new file mode 100644 index 0000000000000000000000000000000000000000..41bed2d1e7b031182edcf62710876e4073dfbc6e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -0,0 +1,435 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_iface_addr, + is_ip, +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get SSH hardening config audits. + + :returns: dictionary of audits + """ + audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), + SSHDConfigFileContentAudit()] + return audits + + +class SSHConfigContext(object): + + type = 'client' + + def get_macs(self, allow_weak_mac): + if allow_weak_mac: + weak_macs = 'weak' + else: + weak_macs = 'default' + + default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' + macs = {'default': default, + 'weak': default + ',hmac-sha1'} + + default = ('hmac-sha2-512-etm@openssh.com,' + 'hmac-sha2-256-etm@openssh.com,' + 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' + 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') + macs_66 = {'default': default, + 'weak': default + ',hmac-sha1'} + + # Use newer ciphers on Ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) + macs = macs_66 + + return macs[weak_macs] + + def get_kexs(self, allow_weak_kex): + if allow_weak_kex: + weak_kex = 'weak' + else: + weak_kex = 'default' + + default = 'diffie-hellman-group-exchange-sha256' + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex = {'default': default, + 'weak': weak} + + default = ('curve25519-sha256@libssh.org,' + 'diffie-hellman-group-exchange-sha256') + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex_66 = {'default': default, + 'weak': weak} + + # Use newer kex on Ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new key exchange ' + 'algorithms', level=DEBUG) + kex = kex_66 + + return kex[weak_kex] + + def get_ciphers(self, cbc_required): + if cbc_required: + weak_ciphers = 'weak' + else: + weak_ciphers = 'default' + + default = 'aes256-ctr,aes192-ctr,aes128-ctr' + cipher = {'default': default, + 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} + + default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' + 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') + ciphers_66 = {'default': default, + 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} + + # Use newer ciphers on ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new ciphers', + level=DEBUG) + cipher = ciphers_66 + + return cipher[weak_ciphers] + + def get_listening(self, listen=['0.0.0.0']): + """Returns a list of addresses SSH can list on + + Turns input into a sensible list of IPs SSH can listen on. Input + must be a python list of interface names, IPs and/or CIDRs. + + :param listen: list of IPs, CIDRs, interface names + + :returns: list of IPs available on the host + """ + if listen == ['0.0.0.0']: + return listen + + value = [] + for network in listen: + try: + ip = get_address_in_network(network=network, fatal=True) + except ValueError: + if is_ip(network): + ip = network + else: + try: + ip = get_iface_addr(iface=network, fatal=False)[0] + except IndexError: + continue + value.append(ip) + if value == []: + return ['0.0.0.0'] + return value + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'addr_family': addr_family, + 'remote_hosts': settings['common']['remote_hosts'], + 'password_auth_allowed': + settings['client']['password_authentication'], + 'ports': settings['common']['ports'], + 'ciphers': self.get_ciphers(settings['client']['cbc_required']), + 'macs': self.get_macs(settings['client']['weak_hmac']), + 'kexs': self.get_kexs(settings['client']['weak_kex']), + 'roaming': settings['client']['roaming'], + } + return ctxt + + +class SSHConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/ssh_config' + super(SSHConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHConfigContext(), + user='root', + group='root', + mode=0o0644) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['client']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHDConfigContext(SSHConfigContext): + + type = 'server' + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'ssh_ip': self.get_listening(settings['server']['listen_to']), + 'password_auth_allowed': + settings['server']['password_authentication'], + 'ports': settings['common']['ports'], + 'addr_family': addr_family, + 'ciphers': self.get_ciphers(settings['server']['cbc_required']), + 'macs': self.get_macs(settings['server']['weak_hmac']), + 'kexs': self.get_kexs(settings['server']['weak_kex']), + 'host_key_files': settings['server']['host_key_files'], + 'allow_root_with_key': settings['server']['allow_root_with_key'], + 'password_authentication': + settings['server']['password_authentication'], + 'use_priv_sep': settings['server']['use_privilege_separation'], + 'use_pam': settings['server']['use_pam'], + 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], + 'print_motd': settings['server']['print_motd'], + 'print_last_log': settings['server']['print_last_log'], + 'client_alive_interval': + settings['server']['alive_interval'], + 'client_alive_count': settings['server']['alive_count'], + 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], + 'allow_agent_forwarding': + settings['server']['allow_agent_forwarding'], + 'deny_users': settings['server']['deny_users'], + 'allow_users': settings['server']['allow_users'], + 'deny_groups': settings['server']['deny_groups'], + 'allow_groups': settings['server']['allow_groups'], + 'use_dns': settings['server']['use_dns'], + 'sftp_enable': settings['server']['sftp_enable'], + 'sftp_group': settings['server']['sftp_group'], + 'sftp_chroot': settings['server']['sftp_chroot'], + 'max_auth_tries': settings['server']['max_auth_tries'], + 'max_sessions': settings['server']['max_sessions'], + } + return ctxt + + +class SSHDConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/sshd_config' + super(SSHDConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHDConfigContext(), + user='root', + group='root', + mode=0o0600, + service_actions=[{'service': 'ssh', + 'actions': + ['restart']}]) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['server']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/ssh_config' + super(SSHConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['client']['weak_hmac']: + self.fail_cases.append(r'^MACs.+,hmac-sha1$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['client']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['client']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['client']['roaming']: + self.pass_cases.append(r'^UseRoaming yes$') + else: + self.fail_cases.append(r'^UseRoaming yes$') + + return super(SSHConfigFileContentAudit, self).is_compliant(*args, + **kwargs) + + +class SSHDConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/sshd_config' + super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['server']['sftp_enable']: + self.pass_cases.append(r'^Subsystem\ssftp') + else: + self.fail_cases.append(r'^Subsystem\ssftp') + + return super(SSHDConfigFileContentAudit, self).is_compliant(*args, + **kwargs) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/templates/ssh_config new file mode 100644 index 0000000000000000000000000000000000000000..9742d8e2a32cd5da01a9dcb691a5a1201ed93050 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/templates/ssh_config @@ -0,0 +1,70 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +# Restrict the following configuration to be limited to this Host. +{% if remote_hosts -%} +Host {{ ' '.join(remote_hosts) }} +{% endif %} +ForwardAgent no +ForwardX11 no +ForwardX11Trusted yes +RhostsRSAAuthentication no +RSAAuthentication yes +PasswordAuthentication {{ password_auth_allowed }} +HostbasedAuthentication no +GSSAPIAuthentication no +GSSAPIDelegateCredentials no +GSSAPIKeyExchange no +GSSAPITrustDNS no +BatchMode no +CheckHostIP yes +AddressFamily {{ addr_family }} +ConnectTimeout 0 +StrictHostKeyChecking ask +IdentityFile ~/.ssh/identity +IdentityFile ~/.ssh/id_rsa +IdentityFile ~/.ssh/id_dsa +# The port at the destination should be defined +{% for port in ports -%} +Port {{ port }} +{% endfor %} +Protocol 2 +Cipher 3des +{% if ciphers -%} +Ciphers {{ ciphers }} +{%- endif %} +{% if macs -%} +MACs {{ macs }} +{%- endif %} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{%- endif %} +EscapeChar ~ +Tunnel no +TunnelDevice any:any +PermitLocalCommand no +VisualHostKey no +RekeyLimit 1G 1h +SendEnv LANG LC_* +HashKnownHosts yes +{% if roaming -%} +UseRoaming {{ roaming }} +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/templates/sshd_config new file mode 100644 index 0000000000000000000000000000000000000000..5f87298a8119bcab1d2578bcaefd068e5af167c4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/ssh/templates/sshd_config @@ -0,0 +1,159 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +{% for port in ports -%} +Port {{ port }} +{% endfor -%} +AddressFamily {{ addr_family }} +# Use these options to restrict which interfaces/protocols sshd will bind to +{% if ssh_ip -%} +{% for ip in ssh_ip -%} +ListenAddress {{ ip }} +{% endfor %} +{%- else -%} +ListenAddress :: +ListenAddress 0.0.0.0 +{% endif -%} +Protocol 2 +{% if ciphers -%} +Ciphers {{ ciphers }} +{% endif -%} +{% if macs -%} +MACs {{ macs }} +{% endif -%} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{% endif -%} +# HostKeys for protocol version 2 +{% for keyfile in host_key_files -%} +HostKey {{ keyfile }} +{% endfor -%} + +# Privilege Separation is turned on for security +{% if use_priv_sep -%} +UsePrivilegeSeparation {{ use_priv_sep }} +{% endif -%} + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel VERBOSE + +# Authentication: +LoginGraceTime 30s +{% if allow_root_with_key -%} +PermitRootLogin without-password +{% else -%} +PermitRootLogin no +{% endif %} +PermitTunnel no +PermitUserEnvironment no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication {{ password_authentication }} + +# Kerberos options +KerberosAuthentication no +KerberosGetAFSToken no +KerberosOrLocalPasswd no +KerberosTicketCleanup yes + +# GSSAPI options +GSSAPIAuthentication no +GSSAPICleanupCredentials yes + +X11Forwarding {{ allow_x11_forwarding }} +X11DisplayOffset 10 +X11UseLocalhost yes +GatewayPorts no +PrintMotd {{ print_motd }} +PrintLastLog {{ print_last_log }} +TCPKeepAlive no +UseLogin no + +ClientAliveInterval {{ client_alive_interval }} +ClientAliveCountMax {{ client_alive_count }} +AllowTcpForwarding {{ allow_tcp_forwarding }} +AllowAgentForwarding {{ allow_agent_forwarding }} + +MaxStartups 10:30:100 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM {{ use_pam }} + +{% if deny_users -%} +DenyUsers {{ deny_users }} +{% endif -%} +{% if allow_users -%} +AllowUsers {{ allow_users }} +{% endif -%} +{% if deny_groups -%} +DenyGroups {{ deny_groups }} +{% endif -%} +{% if allow_groups -%} +AllowGroups allow_groups +{% endif -%} +UseDNS {{ use_dns }} +MaxAuthTries {{ max_auth_tries }} +MaxSessions {{ max_sessions }} + +{% if sftp_enable -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +Subsystem sftp internal-sftp -l VERBOSE + +## These lines must appear at the *end* of sshd_config +Match Group {{ sftp_group }} +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory {{ sftp_chroot }} +{% else -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +## These lines must appear at the *end* of sshd_config +Match Group sftponly +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory /sftpchroot/home/%u +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/templating.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/templating.py new file mode 100644 index 0000000000000000000000000000000000000000..5b6765f7edeee4bed739fd354c6f7bdf0a8c952e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/templating.py @@ -0,0 +1,73 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) + +try: + from jinja2 import FileSystemLoader, Environment +except ImportError: + from charmhelpers.fetch import apt_install + from charmhelpers.fetch import apt_update + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment + + +# NOTE: function separated from main rendering code to facilitate easier +# mocking in unit tests. +def write(path, data): + with open(path, 'wb') as out: + out.write(data) + + +def get_template_path(template_dir, path): + """Returns the template file which would be used to render the path. + + The path to the template file is returned. + :param template_dir: the directory the templates are located in + :param path: the file path to be written to. + :returns: path to the template file + """ + return os.path.join(template_dir, os.path.basename(path)) + + +def render_and_write(template_dir, path, context): + """Renders the specified template into the file. + + :param template_dir: the directory to load the template from + :param path: the path to write the templated contents to + :param context: the parameters to pass to the rendering engine + """ + env = Environment(loader=FileSystemLoader(template_dir)) + template_file = os.path.basename(path) + template = env.get_template(template_file) + log('Rendering from template: %s' % template.name, level=DEBUG) + rendered_content = template.render(context) + if not rendered_content: + log("Render returned None - skipping '%s'" % path, + level=WARNING) + return + + write(path, rendered_content.encode('utf-8').strip()) + log('Wrote template %s' % path, level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7485c28c8748ba366dba54f1e3b8f7e6a7c619 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/hardening/utils.py @@ -0,0 +1,155 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import grp +import os +import pwd +import six +import yaml + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) + + +# Global settings cache. Since each hook fire entails a fresh module import it +# is safe to hold this in memory and not risk missing config changes (since +# they will result in a new hook fire and thus re-import). +__SETTINGS__ = {} + + +def _get_defaults(modules): + """Load the default config for the provided modules. + + :param modules: stack modules config defaults to lookup. + :returns: modules default config dictionary. + """ + default = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml' % (modules)) + return yaml.safe_load(open(default)) + + +def _get_schema(modules): + """Load the config schema for the provided modules. + + NOTE: this schema is intended to have 1-1 relationship with they keys in + the default config and is used a means to verify valid overrides provided + by the user. + + :param modules: stack modules config schema to lookup. + :returns: modules default schema dictionary. + """ + schema = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml.schema' % (modules)) + return yaml.safe_load(open(schema)) + + +def _get_user_provided_overrides(modules): + """Load user-provided config overrides. + + :param modules: stack modules to lookup in user overrides yaml file. + :returns: overrides dictionary. + """ + overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], + 'hardening.yaml') + if os.path.exists(overrides): + log("Found user-provided config overrides file '%s'" % + (overrides), level=DEBUG) + settings = yaml.safe_load(open(overrides)) + if settings and settings.get(modules): + log("Applying '%s' overrides" % (modules), level=DEBUG) + return settings.get(modules) + + log("No overrides found for '%s'" % (modules), level=DEBUG) + else: + log("No hardening config overrides file '%s' found in charm " + "root dir" % (overrides), level=DEBUG) + + return {} + + +def _apply_overrides(settings, overrides, schema): + """Get overrides config overlayed onto modules defaults. + + :param modules: require stack modules config. + :returns: dictionary of modules config with user overrides applied. + """ + if overrides: + for k, v in six.iteritems(overrides): + if k in schema: + if schema[k] is None: + settings[k] = v + elif type(schema[k]) is dict: + settings[k] = _apply_overrides(settings[k], overrides[k], + schema[k]) + else: + raise Exception("Unexpected type found in schema '%s'" % + type(schema[k]), level=ERROR) + else: + log("Unknown override key '%s' - ignoring" % (k), level=INFO) + + return settings + + +def get_settings(modules): + global __SETTINGS__ + if modules in __SETTINGS__: + return __SETTINGS__[modules] + + schema = _get_schema(modules) + settings = _get_defaults(modules) + overrides = _get_user_provided_overrides(modules) + __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) + return __SETTINGS__[modules] + + +def ensure_permissions(path, user, group, permissions, maxdepth=-1): + """Ensure permissions for path. + + If path is a file, apply to file and return. If path is a directory, + apply recursively (if required) to directory contents and return. + + :param user: user name + :param group: group name + :param permissions: octal permissions + :param maxdepth: maximum recursion depth. A negative maxdepth allows + infinite recursion and maxdepth=0 means no recursion. + :returns: None + """ + if not os.path.exists(path): + log("File '%s' does not exist - cannot set permissions" % (path), + level=WARNING) + return + + _user = pwd.getpwnam(user) + os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) + os.chmod(path, permissions) + + if maxdepth == 0: + log("Max recursion depth reached - skipping further recursion", + level=DEBUG) + return + elif maxdepth > 0: + maxdepth -= 1 + + if os.path.isdir(path): + contents = glob.glob("%s/*" % (path)) + for c in contents: + ensure_permissions(c, user=user, group=group, + permissions=permissions, maxdepth=maxdepth) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/mellanox/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/mellanox/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b088de84e4b288b551603816fc10eebfa7b1503 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/mellanox/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/mellanox/infiniband.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/mellanox/infiniband.py new file mode 100644 index 0000000000000000000000000000000000000000..0edb2314114738c79859f51e505de05e5c7c8fcc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/mellanox/infiniband.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = "Jorge Niedbalski " + +import six + +from charmhelpers.fetch import ( + apt_install, + apt_update, +) + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + +try: + from netifaces import interfaces as network_interfaces +except ImportError: + if six.PY2: + apt_install('python-netifaces') + else: + apt_install('python3-netifaces') + from netifaces import interfaces as network_interfaces + +import os +import re +import subprocess + +from charmhelpers.core.kernel import modprobe + +REQUIRED_MODULES = ( + "mlx4_ib", + "mlx4_en", + "mlx4_core", + "ib_ipath", + "ib_mthca", + "ib_srpt", + "ib_srp", + "ib_ucm", + "ib_isert", + "ib_iser", + "ib_ipoib", + "ib_cm", + "ib_uverbs" + "ib_umad", + "ib_sa", + "ib_mad", + "ib_core", + "ib_addr", + "rdma_ucm", +) + +REQUIRED_PACKAGES = ( + "ibutils", + "infiniband-diags", + "ibverbs-utils", +) + +IPOIB_DRIVERS = ( + "ib_ipoib", +) + +ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version" + + +class DeviceInfo(object): + pass + + +def install_packages(): + apt_update() + apt_install(REQUIRED_PACKAGES, fatal=True) + + +def load_modules(): + for module in REQUIRED_MODULES: + modprobe(module, persist=True) + + +def is_enabled(): + """Check if infiniband is loaded on the system""" + return os.path.exists(ABI_VERSION_FILE) + + +def stat(): + """Return full output of ibstat""" + return subprocess.check_output(["ibstat"]) + + +def devices(): + """Returns a list of IB enabled devices""" + return subprocess.check_output(['ibstat', '-l']).splitlines() + + +def device_info(device): + """Returns a DeviceInfo object with the current device settings""" + + status = subprocess.check_output([ + 'ibstat', device, '-s']).splitlines() + + regexes = { + "CA type: (.*)": "device_type", + "Number of ports: (.*)": "num_ports", + "Firmware version: (.*)": "fw_ver", + "Hardware version: (.*)": "hw_ver", + "Node GUID: (.*)": "node_guid", + "System image GUID: (.*)": "sys_guid", + } + + device = DeviceInfo() + + for line in status: + for expression, key in regexes.items(): + matches = re.search(expression, line) + if matches: + setattr(device, key, matches.group(1)) + + return device + + +def ipoib_interfaces(): + """Return a list of IPOIB capable ethernet interfaces""" + interfaces = [] + + for interface in network_interfaces(): + try: + driver = re.search('^driver: (.+)$', subprocess.check_output([ + 'ethtool', '-i', + interface]), re.M).group(1) + + if driver in IPOIB_DRIVERS: + interfaces.append(interface) + except Exception: + log("Skipping interface %s" % interface, level=INFO) + continue + + return interfaces diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ip.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ip.py new file mode 100644 index 0000000000000000000000000000000000000000..b13277bb57c9227b1d9dfecf4f6750740e5a262a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ip.py @@ -0,0 +1,602 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import re +import subprocess +import six +import socket + +from functools import partial + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + config, + log, + network_get_primary_address, + unit_get, + WARNING, + NoNetworkBinding, +) + +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) + +try: + import netifaces +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) + import netifaces + +try: + import netaddr +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network(s): %s" % network) + raise ValueError(errmsg) + + +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + +def get_address_in_network(network, fallback=None, fatal=False): + """Get an IPv4 or IPv6 address within the network from the host. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + if network is None: + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + else: + return None + + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + try: + addresses = netifaces.ifaddresses(iface) + except ValueError: + # If an instance was deleted between + # netifaces.interfaces() run and now, its interfaces are gone + continue + if network.version == 4 and netifaces.AF_INET in addresses: + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + + return None + + +def is_ipv6(address): + """Determine whether provided address is IPv6 or not.""" + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] + return None + + +get_iface_for_address = partial(_get_for_address, key='iface') + + +get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + +def format_ipv6_addr(address): + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. + """ + if is_ipv6(address): + return "[%s]" % address + + return None + + +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError: + return True + + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """Return the assigned IP address for a given interface, if any. + + :param iface: network interface on which address(es) are expected to + be found. + :param inet_type: inet address family + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :return: list of ip addresses + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + + if not exc_list: + exc_list = [] + + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception("Unknown inet type '%s'" % str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("Interface '%s' not found " % (iface)) + else: + return [] + + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + + return sorted(addresses) + + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. + + :param iface: network interface on which ipv6 address(es) are expected to + be found. + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :param dynamic_only: only recognise dynamic addresses + :return: list of ipv6 addresses + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd).decode('UTF-8') + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' does not have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] + + +def is_bridge_member(nic): + """Check if a given nic is a member of a bridge.""" + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + + return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4/IPv6 address + address = netaddr.IPAddress(address) + return True + except (netaddr.AddrFormatError, ValueError): + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except Exception: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + + if not result: + try: + result = socket.gethostbyaddr(address)[0] + except Exception: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including passed cidr network and + IPv6. + + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') + + @param interface: string name of the relation. + @param cidr_network: string CIDR Network to select an address from. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) + except NoNetworkBinding: + log("No network binding for {}".format(interface), WARNING) + address = get_host_ip(unit_get('private-address')) + + if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fd001bc2eaa9b1b511bbd1816e1089521935a50a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/__init__.py @@ -0,0 +1,541 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' Helpers for interacting with OpenvSwitch ''' +import hashlib +import subprocess +import os +import six + +from charmhelpers.fetch import apt_install + + +from charmhelpers.core.hookenv import ( + log, WARNING, INFO, DEBUG +) +from charmhelpers.core.host import ( + service +) + + +BRIDGE_TEMPLATE = """\ +# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067 + +auto {linuxbridge_port} +iface {linuxbridge_port} inet manual + pre-up ip link add name {linuxbridge_port} type veth peer name {ovsbridge_port} + pre-up ip link set {ovsbridge_port} master {bridge} + pre-up ip link set {ovsbridge_port} up + up ip link set {linuxbridge_port} up + down ip link del {linuxbridge_port} +""" + +MAX_KERNEL_INTERFACE_NAME_LEN = 15 + + +def get_bridges(): + """Return list of the bridges on the default openvswitch + + :returns: List of bridge names + :rtype: List[str] + :raises: subprocess.CalledProcessError if ovs-vsctl fails + """ + cmd = ["ovs-vsctl", "list-br"] + lines = subprocess.check_output(cmd).decode('utf-8').split("\n") + maybe_bridges = [l.strip() for l in lines] + return [b for b in maybe_bridges if b] + + +def get_bridge_ports(name): + """Return a list the ports on a named bridge + + :param name: the name of the bridge to list + :type name: str + :returns: List of ports on the named bridge + :rtype: List[str] + :raises: subprocess.CalledProcessError if the ovs-vsctl command fails. If + the named bridge doesn't exist, then the exception will be raised. + """ + cmd = ["ovs-vsctl", "--", "list-ports", name] + lines = subprocess.check_output(cmd).decode('utf-8').split("\n") + maybe_ports = [l.strip() for l in lines] + return [p for p in maybe_ports if p] + + +def get_bridges_and_ports_map(): + """Return dictionary of bridge to ports for the default openvswitch + + :returns: a mapping of bridge name to a list of ports. + :rtype: Dict[str, List[str]] + :raises: subprocess.CalledProcessError if any of the underlying ovs-vsctl + command fail. + """ + return {b: get_bridge_ports(b) for b in get_bridges()} + + +def _dict_to_vsctl_set(data, table, entity): + """Helper that takes dictionary and provides ``ovs-vsctl set`` commands + + :param data: Additional data to attach to interface + The keys in the data dictionary map directly to column names in the + OpenvSwitch table specified as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type data: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param table: Name of table to operate on + :type table: str + :param entity: Name of entity to operate on + :type entity: str + :returns: '--' separated ``ovs-vsctl set`` commands + :rtype: Iterator[Tuple[str, str, str, str, str]] + """ + for (k, v) in data.items(): + if isinstance(v, dict): + entries = { + '{}:{}'.format(k, dk): dv for (dk, dv) in v.items()} + else: + entries = {k: v} + for (colk, colv) in entries.items(): + yield ('--', 'set', table, entity, '{}={}'.format(colk, colv)) + + +def add_bridge(name, datapath_type=None, brdata=None, exclusive=False): + """Add the named bridge to openvswitch and set/update bridge data for it + + :param name: Name of bridge to create + :type name: str + :param datapath_type: Add datapath_type to bridge (DEPRECATED, use brdata) + :type datapath_type: Optional[str] + :param brdata: Additional data to attach to bridge + The keys in the brdata dictionary map directly to column names in the + OpenvSwitch bridge table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type brdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param exclusive: If True, raise exception if bridge exists + :type exclusive: bool + :raises: subprocess.CalledProcessError + """ + log('Creating bridge {}'.format(name)) + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-br', name)) + if brdata: + for setcmd in _dict_to_vsctl_set(brdata, 'bridge', name): + cmd.extend(setcmd) + if datapath_type is not None: + log('DEPRECATION WARNING: add_bridge called with datapath_type, ' + 'please use the brdata keyword argument instead.') + cmd += ['--', 'set', 'bridge', name, + 'datapath_type={}'.format(datapath_type)] + subprocess.check_call(cmd) + + +def del_bridge(name): + """Delete the named bridge from openvswitch + + :param name: Name of bridge to remove + :type name: str + :raises: subprocess.CalledProcessError + """ + log('Deleting bridge {}'.format(name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name]) + + +def add_bridge_port(name, port, promisc=False, ifdata=None, exclusive=False, + linkup=True, portdata=None): + """Add port to bridge and optionally set/update interface data for it + + :param name: Name of bridge to attach port to + :type name: str + :param port: Name of port as represented in netdev + :type port: str + :param promisc: Whether to set promiscuous mode on interface + True=on, False=off, None leave untouched + :type promisc: Optional[bool] + :param ifdata: Additional data to attach to interface + The keys in the ifdata dictionary map directly to column names in the + OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param exclusive: If True, raise exception if port exists + :type exclusive: bool + :param linkup: Bring link up + :type linkup: bool + :param portdata: Additional data to attach to port. Similar to ifdata. + :type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-port', name, port)) + for ovs_table, data in (('Interface', ifdata), ('Port', portdata)): + if data: + for setcmd in _dict_to_vsctl_set(data, ovs_table, port): + cmd.extend(setcmd) + + log('Adding port {} to bridge {}'.format(port, name)) + subprocess.check_call(cmd) + if linkup: + # This is mostly a workaround for CI environments, in the real world + # the bare metal provider would most likely have configured and brought + # up the link for us. + subprocess.check_call(["ip", "link", "set", port, "up"]) + if promisc: + subprocess.check_call(["ip", "link", "set", port, "promisc", "on"]) + elif promisc is False: + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def del_bridge_port(name, port): + """Delete a port from the named openvswitch bridge + + :param name: Name of bridge to remove port from + :type name: str + :param port: Name of port to remove + :type port: str + :raises: subprocess.CalledProcessError + """ + log('Deleting port {} from bridge {}'.format(port, name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port", + name, port]) + subprocess.check_call(["ip", "link", "set", port, "down"]) + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def add_bridge_bond(bridge, port, interfaces, portdata=None, ifdatamap=None, + exclusive=False): + """Add bonded port in bridge from interfaces. + + :param bridge: Name of bridge to add bonded port to + :type bridge: str + :param port: Name of created port + :type port: str + :param interfaces: Underlying interfaces that make up the bonded port + :type interfaces: Iterator[str] + :param portdata: Additional data to attach to the created bond port + See _dict_to_vsctl_set() for detailed description. + Example: + { + 'bond-mode': 'balance-tcp', + 'lacp': 'active', + 'other-config': { + 'lacp-time': 'fast', + }, + } + :type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param ifdatamap: Map of data to attach to created bond interfaces + See _dict_to_vsctl_set() for detailed description. + Example: + { + 'eth0': { + 'type': 'dpdk', + 'mtu-request': '9000', + 'options': { + 'dpdk-devargs': '0000:01:00.0', + }, + }, + } + :type ifdatamap: Optional[Dict[str,Dict[str,Union[str,Dict[str,str]]]]] + :param exclusive: If True, raise exception if port exists + :type exclusive: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-bond', bridge, port)) + cmd.extend(interfaces) + if portdata: + for setcmd in _dict_to_vsctl_set(portdata, 'port', port): + cmd.extend(setcmd) + if ifdatamap: + for ifname, ifdata in ifdatamap.items(): + for setcmd in _dict_to_vsctl_set(ifdata, 'Interface', ifname): + cmd.extend(setcmd) + subprocess.check_call(cmd) + + +def add_ovsbridge_linuxbridge(name, bridge, ifdata=None): + """Add linux bridge to the named openvswitch bridge + + :param name: Name of ovs bridge to be added to Linux bridge + :type name: str + :param bridge: Name of Linux bridge to be added to ovs bridge + :type name: str + :param ifdata: Additional data to attach to interface + The keys in the ifdata dictionary map directly to column names in the + OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + """ + try: + import netifaces + except ImportError: + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) + import netifaces + + # NOTE(jamespage): + # Older code supported addition of a linuxbridge directly + # to an OVS bridge; ensure we don't break uses on upgrade + existing_ovs_bridge = port_to_br(bridge) + if existing_ovs_bridge is not None: + log('Linuxbridge {} is already directly in use' + ' by OVS bridge {}'.format(bridge, existing_ovs_bridge), + level=INFO) + return + + # NOTE(jamespage): + # preserve existing naming because interfaces may already exist. + ovsbridge_port = "veth-" + name + linuxbridge_port = "veth-" + bridge + if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or + len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN): + # NOTE(jamespage): + # use parts of hashed bridgename (openstack style) when + # a bridge name exceeds 15 chars + hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest() + base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:]) + ovsbridge_port = "cvo{}".format(base) + linuxbridge_port = "cvb{}".format(base) + + interfaces = netifaces.interfaces() + for interface in interfaces: + if interface == ovsbridge_port or interface == linuxbridge_port: + log('Interface {} already exists'.format(interface), level=INFO) + return + + log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name), + level=INFO) + + check_for_eni_source() + + with open('/etc/network/interfaces.d/{}.cfg'.format( + linuxbridge_port), 'w') as config: + config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port, + ovsbridge_port=ovsbridge_port, + bridge=bridge)) + + subprocess.check_call(["ifup", linuxbridge_port]) + add_bridge_port(name, linuxbridge_port, ifdata=ifdata) + + +def is_linuxbridge_interface(port): + ''' Check if the interface is a linuxbridge bridge + :param port: Name of an interface to check whether it is a Linux bridge + :returns: True if port is a Linux bridge''' + + if os.path.exists('/sys/class/net/' + port + '/bridge'): + log('Interface {} is a Linux bridge'.format(port), level=DEBUG) + return True + else: + log('Interface {} is not a Linux bridge'.format(port), level=DEBUG) + return False + + +def set_manager(manager): + ''' Set the controller for the local openvswitch ''' + log('Setting manager for local ovs to {}'.format(manager)) + subprocess.check_call(['ovs-vsctl', 'set-manager', + 'ssl:{}'.format(manager)]) + + +def set_Open_vSwitch_column_value(column_value): + """ + Calls ovs-vsctl and sets the 'column_value' in the Open_vSwitch table. + + :param column_value: + See http://www.openvswitch.org//ovs-vswitchd.conf.db.5.pdf for + details of the relevant values. + :type str + :raises CalledProcessException: possibly ovsdb-server is not running + """ + log('Setting {} in the Open_vSwitch table'.format(column_value)) + subprocess.check_call(['ovs-vsctl', 'set', 'Open_vSwitch', '.', column_value]) + + +CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem' + + +def get_certificate(): + ''' Read openvswitch certificate from disk ''' + if os.path.exists(CERT_PATH): + log('Reading ovs certificate from {}'.format(CERT_PATH)) + with open(CERT_PATH, 'r') as cert: + full_cert = cert.read() + begin_marker = "-----BEGIN CERTIFICATE-----" + end_marker = "-----END CERTIFICATE-----" + begin_index = full_cert.find(begin_marker) + end_index = full_cert.rfind(end_marker) + if end_index == -1 or begin_index == -1: + raise RuntimeError("Certificate does not contain valid begin" + " and end markers.") + full_cert = full_cert[begin_index:(end_index + len(end_marker))] + return full_cert + else: + log('Certificate not found', level=WARNING) + return None + + +def check_for_eni_source(): + ''' Juju removes the source line when setting up interfaces, + replace if missing ''' + + with open('/etc/network/interfaces', 'r') as eni: + for line in eni: + if line == 'source /etc/network/interfaces.d/*': + return + with open('/etc/network/interfaces', 'a') as eni: + eni.write('\nsource /etc/network/interfaces.d/*') + + +def full_restart(): + ''' Full restart and reload of openvswitch ''' + if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'): + service('start', 'openvswitch-force-reload-kmod') + else: + service('force-reload-kmod', 'openvswitch-switch') + + +def enable_ipfix(bridge, target, + cache_active_timeout=60, + cache_max_flows=128, + sampling=64): + '''Enable IPFIX on bridge to target. + :param bridge: Bridge to monitor + :param target: IPFIX remote endpoint + :param cache_active_timeout: The maximum period in seconds for + which an IPFIX flow record is cached + and aggregated before being sent + :param cache_max_flows: The maximum number of IPFIX flow records + that can be cached at a time + :param sampling: The rate at which packets should be sampled and + sent to each target collector + ''' + cmd = [ + 'ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--', + '--id=@i', 'create', 'IPFIX', + 'targets="{}"'.format(target), + 'sampling={}'.format(sampling), + 'cache_active_timeout={}'.format(cache_active_timeout), + 'cache_max_flows={}'.format(cache_max_flows), + ] + log('Enabling IPfix on {}.'.format(bridge)) + subprocess.check_call(cmd) + + +def disable_ipfix(bridge): + '''Diable IPFIX on target bridge. + :param bridge: Bridge to modify + ''' + cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix'] + subprocess.check_call(cmd) + + +def port_to_br(port): + '''Determine the bridge that contains a port + :param port: Name of port to check for + :returns str: OVS bridge containing port or None if not found + ''' + try: + return subprocess.check_output( + ['ovs-vsctl', 'port-to-br', port] + ).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def ovs_appctl(target, args): + """Run `ovs-appctl` for target with args and return output. + + :param target: Name of daemon to contact. Unless target begins with '/', + `ovs-appctl` looks for a pidfile and will build the path to + a /var/run/openvswitch/target.pid.ctl for you. + :type target: str + :param args: Command and arguments to pass to `ovs-appctl` + :type args: Tuple[str, ...] + :returns: Output from command + :rtype: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-appctl', '-t', target] + cmd.extend(args) + return subprocess.check_output(cmd, universal_newlines=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/ovn.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/ovn.py new file mode 100644 index 0000000000000000000000000000000000000000..2075f11acfbeb3d0d614cf3db5a1b535bf128824 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/ovn.py @@ -0,0 +1,233 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import subprocess +import uuid + +from . import utils + + +OVN_RUNDIR = '/var/run/ovn' +OVN_SYSCONFDIR = '/etc/ovn' + + +def ovn_appctl(target, args, rundir=None, use_ovs_appctl=False): + """Run ovn/ovs-appctl for target with args and return output. + + :param target: Name of daemon to contact. Unless target begins with '/', + `ovn-appctl` looks for a pidfile and will build the path to + a /var/run/ovn/target.pid.ctl for you. + :type target: str + :param args: Command and arguments to pass to `ovn-appctl` + :type args: Tuple[str, ...] + :param rundir: Override path to sockets + :type rundir: Optional[str] + :param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03, + set this to True to use ``ovs-appctl`` instead. + :type use_ovs_appctl: bool + :returns: Output from command + :rtype: str + :raises: subprocess.CalledProcessError + """ + # NOTE(fnordahl): The ovsdb-server processes for the OVN databases use a + # non-standard naming scheme for their daemon control socket and we need + # to pass the full path to the socket. + if target in ('ovnnb_db', 'ovnsb_db',): + target = os.path.join(rundir or OVN_RUNDIR, target + '.ctl') + + if use_ovs_appctl: + tool = 'ovs-appctl' + else: + tool = 'ovn-appctl' + + return utils._run(tool, '-t', target, *args) + + +class OVNClusterStatus(object): + + def __init__(self, name, cluster_id, server_id, address, status, role, + term, leader, vote, election_timer, log, + entries_not_yet_committed, entries_not_yet_applied, + connections, servers): + """Initialize and populate OVNClusterStatus object. + + Use class initializer so we can define types in a compatible manner. + + :param name: Name of schema used for database + :type name: str + :param cluster_id: UUID of cluster + :type cluster_id: uuid.UUID + :param server_id: UUID of server + :type server_id: uuid.UUID + :param address: OVSDB connection method + :type address: str + :param status: Status text + :type status: str + :param role: Role of server + :type role: str + :param term: Election term + :type term: int + :param leader: Short form UUID of leader + :type leader: str + :param vote: Vote + :type vote: str + :param election_timer: Current value of election timer + :type election_timer: int + :param log: Log + :type log: str + :param entries_not_yet_committed: Entries not yet committed + :type entries_not_yet_committed: int + :param entries_not_yet_applied: Entries not yet applied + :type entries_not_yet_applied: int + :param connections: Connections + :type connections: str + :param servers: Servers in the cluster + [('0ea6', 'ssl:192.0.2.42:6643')] + :type servers: List[Tuple[str,str]] + """ + self.name = name + self.cluster_id = cluster_id + self.server_id = server_id + self.address = address + self.status = status + self.role = role + self.term = term + self.leader = leader + self.vote = vote + self.election_timer = election_timer + self.log = log + self.entries_not_yet_committed = entries_not_yet_committed + self.entries_not_yet_applied = entries_not_yet_applied + self.connections = connections + self.servers = servers + + def __eq__(self, other): + return ( + self.name == other.name and + self.cluster_id == other.cluster_id and + self.server_id == other.server_id and + self.address == other.address and + self.status == other.status and + self.role == other.role and + self.term == other.term and + self.leader == other.leader and + self.vote == other.vote and + self.election_timer == other.election_timer and + self.log == other.log and + self.entries_not_yet_committed == other.entries_not_yet_committed and + self.entries_not_yet_applied == other.entries_not_yet_applied and + self.connections == other.connections and + self.servers == other.servers) + + @property + def is_cluster_leader(self): + """Retrieve status information from clustered OVSDB. + + :returns: Whether target is cluster leader + :rtype: bool + """ + return self.leader == 'self' + + +def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None): + """Retrieve status information from clustered OVSDB. + + :param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can + also be full path to control socket. + :type target: str + :param schema: Database schema name, deduced from target if not provided + :type schema: Optional[str] + :param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03, + set this to True to use ``ovs-appctl`` instead. + :type use_ovs_appctl: bool + :param rundir: Override path to sockets + :type rundir: Optional[str] + :returns: cluster status data object + :rtype: OVNClusterStatus + :raises: subprocess.CalledProcessError, KeyError, RuntimeError + """ + schema_map = { + 'ovnnb_db': 'OVN_Northbound', + 'ovnsb_db': 'OVN_Southbound', + } + if schema and schema not in schema_map.keys(): + raise RuntimeError('Unknown schema provided: "{}"'.format(schema)) + + status = {} + k = '' + for line in ovn_appctl(target, + ('cluster/status', schema or schema_map[target]), + rundir=rundir, + use_ovs_appctl=use_ovs_appctl).splitlines(): + if k and line.startswith(' '): + # there is no key which means this is a instance of a multi-line/ + # multi-value item, populate the List which is already stored under + # the key. + if k == 'servers': + status[k].append( + tuple(line.replace(')', '').lstrip().split()[0:4:3])) + else: + status[k].append(line.lstrip()) + elif ':' in line: + # this is a line with a key + k, v = line.split(':', 1) + k = k.lower() + k = k.replace(' ', '_') + if v: + # this is a line with both key and value + if k in ('cluster_id', 'server_id',): + v = v.replace('(', '') + v = v.replace(')', '') + status[k] = tuple(v.split()) + else: + status[k] = v.lstrip() + else: + # this is a line with only key which means a multi-line/ + # multi-value item. Store key as List which will be + # populated on subsequent iterations. + status[k] = [] + return OVNClusterStatus( + status['name'], + uuid.UUID(status['cluster_id'][1]), + uuid.UUID(status['server_id'][1]), + status['address'], + status['status'], + status['role'], + int(status['term']), + status['leader'], + status['vote'], + int(status['election_timer']), + status['log'], + int(status['entries_not_yet_committed']), + int(status['entries_not_yet_applied']), + status['connections'], + status['servers']) + + +def is_northd_active(): + """Query `ovn-northd` for active status. + + Note that the active status information for ovn-northd is available for + OVN 20.03 and onward. + + :returns: True if local `ovn-northd` instance is active, False otherwise + :rtype: bool + """ + try: + for line in ovn_appctl('ovn-northd', ('status',)).splitlines(): + if line.startswith('Status:') and 'active' in line: + return True + except subprocess.CalledProcessError: + pass + return False diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/ovsdb.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/ovsdb.py new file mode 100644 index 0000000000000000000000000000000000000000..5e50bc36333bde56674a82ad6c88e0d5de44ee07 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/ovsdb.py @@ -0,0 +1,206 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import uuid + +from . import utils + + +class SimpleOVSDB(object): + """Simple interface to OVSDB through the use of command line tools. + + OVS and OVN is managed through a set of databases. These databases have + similar command line tools to manage them. We make use of the similarity + to provide a generic class that can be used to manage them. + + The OpenvSwitch project does provide a Python API, but on the surface it + appears to be a bit too involved for our simple use case. + + Examples: + sbdb = SimpleOVSDB('ovn-sbctl') + for chs in sbdb.chassis: + print(chs) + + ovsdb = SimpleOVSDB('ovs-vsctl') + for br in ovsdb.bridge: + if br['name'] == 'br-test': + ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed') + """ + + # For validation we keep a complete map of currently known good tool and + # table combinations. This requires maintenance down the line whenever + # upstream adds things that downstream wants, and the cost of maintaining + # that will most likely be lower then the cost of finding the needle in + # the haystack whenever downstream code misspells something. + _tool_table_map = { + 'ovs-vsctl': ( + 'autoattach', + 'bridge', + 'ct_timeout_policy', + 'ct_zone', + 'controller', + 'datapath', + 'flow_sample_collector_set', + 'flow_table', + 'ipfix', + 'interface', + 'manager', + 'mirror', + 'netflow', + 'open_vswitch', + 'port', + 'qos', + 'queue', + 'ssl', + 'sflow', + ), + 'ovn-nbctl': ( + 'acl', + 'address_set', + 'connection', + 'dhcp_options', + 'dns', + 'forwarding_group', + 'gateway_chassis', + 'ha_chassis', + 'ha_chassis_group', + 'load_balancer', + 'load_balancer_health_check', + 'logical_router', + 'logical_router_policy', + 'logical_router_port', + 'logical_router_static_route', + 'logical_switch', + 'logical_switch_port', + 'meter', + 'meter_band', + 'nat', + 'nb_global', + 'port_group', + 'qos', + 'ssl', + ), + 'ovn-sbctl': ( + 'address_set', + 'chassis', + 'connection', + 'controller_event', + 'dhcp_options', + 'dhcpv6_options', + 'dns', + 'datapath_binding', + 'encap', + 'gateway_chassis', + 'ha_chassis', + 'ha_chassis_group', + 'igmp_group', + 'ip_multicast', + 'logical_flow', + 'mac_binding', + 'meter', + 'meter_band', + 'multicast_group', + 'port_binding', + 'port_group', + 'rbac_permission', + 'rbac_role', + 'sb_global', + 'ssl', + 'service_monitor', + ), + } + + def __init__(self, tool): + """SimpleOVSDB constructor. + + :param tool: Which tool with database commands to operate on. + Usually one of `ovs-vsctl`, `ovn-nbctl`, `ovn-sbctl` + :type tool: str + """ + if tool not in self._tool_table_map: + raise RuntimeError( + 'tool must be one of "{}"'.format(self._tool_table_map.keys())) + self._tool = tool + + def __getattr__(self, table): + if table not in self._tool_table_map[self._tool]: + raise AttributeError( + 'table "{}" not known for use with "{}"' + .format(table, self._tool)) + return self.Table(self._tool, table) + + class Table(object): + """Methods to interact with contents of OVSDB tables. + + NOTE: At the time of this writing ``find`` is the only command + line argument to OVSDB manipulating tools that actually supports + JSON output. + """ + + def __init__(self, tool, table): + """SimpleOVSDBTable constructor. + + :param table: Which table to operate on + :type table: str + """ + self._tool = tool + self._table = table + + def _find_tbl(self, condition=None): + """Run and parse output of OVSDB `find` command. + + :param condition: An optional RFC 7047 5.1 match condition + :type condition: Optional[str] + :returns: Dictionary with data + :rtype: Dict[str, any] + """ + # When using json formatted output to OVS commands Internal OVSDB + # notation may occur that require further deserializing. + # Reference: https://tools.ietf.org/html/rfc7047#section-5.1 + ovs_type_cb_map = { + 'uuid': uuid.UUID, + # FIXME sets also appear to sometimes contain type/value tuples + 'set': list, + 'map': dict, + } + cmd = [self._tool, '-f', 'json', 'find', self._table] + if condition: + cmd.append(condition) + output = utils._run(*cmd) + data = json.loads(output) + for row in data['data']: + values = [] + for col in row: + if isinstance(col, list): + f = ovs_type_cb_map.get(col[0], str) + values.append(f(col[1])) + else: + values.append(col) + yield dict(zip(data['headings'], values)) + + def __iter__(self): + return self._find_tbl() + + def clear(self, rec, col): + utils._run(self._tool, 'clear', self._table, rec, col) + + def find(self, condition): + return self._find_tbl(condition=condition) + + def remove(self, rec, col, value): + utils._run(self._tool, 'remove', self._table, rec, col, value) + + def set(self, rec, col, value): + utils._run(self._tool, 'set', self._table, rec, + '{}={}'.format(col, value)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..53c9b4ddab6d68d2478d4161e658c70e2caa6a74 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ovs/utils.py @@ -0,0 +1,26 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess + + +def _run(*args): + """Run a process, check result, capture decoded output from STDOUT. + + :param args: Command and arguments to run + :type args: Tuple[str, ...] + :returns: Information about the completed process + :rtype: str + :raises subprocess.CalledProcessError + """ + return subprocess.check_output(args, universal_newlines=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ufw.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ufw.py new file mode 100644 index 0000000000000000000000000000000000000000..b9bf7c9df5576615e23225a7fdb6c11a28931ba4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/network/ufw.py @@ -0,0 +1,386 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains helpers to add and remove ufw rules. + +Examples: + +- open SSH port for subnet 10.0.3.0/24: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp') + +- open service by name as defined in /etc/services: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('ssh', 'open') + +- close service by port number: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('4949', 'close') # munin +""" +import os +import re +import subprocess + +from charmhelpers.core import hookenv +from charmhelpers.core.kernel import modprobe, is_module_loaded + +__author__ = "Felipe Reyes " + + +class UFWError(Exception): + pass + + +class UFWIPv6Error(UFWError): + pass + + +def is_enabled(): + """ + Check if `ufw` is enabled + + :returns: True if ufw is enabled + """ + output = subprocess.check_output(['ufw', 'status'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Status: active\n', output, re.M) + + return len(m) >= 1 + + +def is_ipv6_ok(soft_fail=False): + """ + Check if IPv6 support is present and ip6tables functional + + :param soft_fail: If set to True and IPv6 support is broken, then reports + that the host doesn't have IPv6 support, otherwise a + UFWIPv6Error exception is raised. + :returns: True if IPv6 is working, False otherwise + """ + + # do we have IPv6 in the machine? + if os.path.isdir('/proc/sys/net/ipv6'): + # is ip6tables kernel module loaded? + if not is_module_loaded('ip6_tables'): + # ip6tables support isn't complete, let's try to load it + try: + modprobe('ip6_tables') + # great, we can load the module + return True + except subprocess.CalledProcessError as ex: + hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, + level="WARN") + # we are in a world where ip6tables isn't working + if soft_fail: + # so we inform that the machine doesn't have IPv6 + return False + else: + raise UFWIPv6Error("IPv6 firewall support broken") + else: + # the module is present :) + return True + + else: + # the system doesn't have IPv6 + return False + + +def disable_ipv6(): + """ + Disable ufw IPv6 support in /etc/default/ufw + """ + exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g', + '/etc/default/ufw']) + if exit_code == 0: + hookenv.log('IPv6 support in ufw disabled', level='INFO') + else: + hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR") + raise UFWError("Couldn't disable IPv6 support in ufw") + + +def enable(soft_fail=False): + """ + Enable ufw + + :param soft_fail: If set to True silently disables IPv6 support in ufw, + otherwise a UFWIPv6Error exception is raised when IP6 + support is broken. + :returns: True if ufw is successfully enabled + """ + if is_enabled(): + return True + + if not is_ipv6_ok(soft_fail): + disable_ipv6() + + output = subprocess.check_output(['ufw', 'enable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall('^Firewall is active and enabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be enabled", level='WARN') + return False + else: + hookenv.log("ufw enabled", level='INFO') + return True + + +def reload(): + """ + Reload ufw + + :returns: True if ufw is successfully enabled + """ + output = subprocess.check_output(['ufw', 'reload'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall('^Firewall reloaded\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be reloaded", level='WARN') + return False + else: + hookenv.log("ufw reloaded", level='INFO') + return True + + +def disable(): + """ + Disable ufw + + :returns: True if ufw is successfully disabled + """ + if not is_enabled(): + return True + + output = subprocess.check_output(['ufw', 'disable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Firewall stopped and disabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be disabled", level='WARN') + return False + else: + hookenv.log("ufw disabled", level='INFO') + return True + + +def default_policy(policy='deny', direction='incoming'): + """ + Changes the default policy for traffic `direction` + + :param policy: allow, deny or reject + :param direction: traffic direction, possible values: incoming, outgoing, + routed + """ + if policy not in ['allow', 'deny', 'reject']: + raise UFWError(('Unknown policy %s, valid values: ' + 'allow, deny, reject') % policy) + + if direction not in ['incoming', 'outgoing', 'routed']: + raise UFWError(('Unknown direction %s, valid values: ' + 'incoming, outgoing, routed') % direction) + + output = subprocess.check_output(['ufw', 'default', policy, direction], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + hookenv.log(output, level='DEBUG') + + m = re.findall("^Default %s policy changed to '%s'\n" % (direction, + policy), + output, re.M) + if len(m) == 0: + hookenv.log("ufw couldn't change the default policy to %s for %s" + % (policy, direction), level='WARN') + return False + else: + hookenv.log("ufw default policy for %s changed to %s" + % (direction, policy), level='INFO') + return True + + +def modify_access(src, dst='any', port=None, proto=None, action='allow', + index=None, prepend=False, comment=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :type src: Optional[str] + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :type dst: Optional[str] + :param port: destiny port + :type port: Optional[int] + :param proto: protocol (tcp or udp) + :type proto: Optional[str] + :param action: `allow` or `delete` + :type action: str + :param index: if different from None the rule is inserted at the given + `index`. + :type index: Optional[int] + :param prepend: Whether to insert the rule before all other rules matching + the rule's IP type. + :type prepend: bool + :param comment: Create the rule with a comment + :type comment: Optional[str] + """ + if not is_enabled(): + hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') + return + + if action == 'delete': + if index is not None: + cmd = ['ufw', '--force', 'delete', str(index)] + else: + cmd = ['ufw', 'delete', 'allow'] + elif index is not None: + cmd = ['ufw', 'insert', str(index), action] + elif prepend: + cmd = ['ufw', 'prepend', action] + else: + cmd = ['ufw', action] + + if src is not None: + cmd += ['from', src] + + if dst is not None: + cmd += ['to', dst] + + if port is not None: + cmd += ['port', str(port)] + + if proto is not None: + cmd += ['proto', proto] + + if comment: + cmd.extend(['comment', comment]) + + hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG') + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + (stdout, stderr) = p.communicate() + + hookenv.log(stdout, level='INFO') + + if p.returncode != 0: + hookenv.log(stderr, level='ERROR') + hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd), + p.returncode), + level='ERROR') + + +def grant_access(src, dst='any', port=None, proto=None, index=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + :param index: if different from None the rule is inserted at the given + `index`. + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='allow', + index=index) + + +def revoke_access(src, dst='any', port=None, proto=None): + """ + Revoke access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='delete') + + +def service(name, action): + """ + Open/close access to a service + + :param name: could be a service name defined in `/etc/services` or a port + number. + :param action: `open` or `close` + """ + if action == 'open': + subprocess.check_output(['ufw', 'allow', str(name)], + universal_newlines=True) + elif action == 'close': + subprocess.check_output(['ufw', 'delete', 'allow', str(name)], + universal_newlines=True) + else: + raise UFWError(("'{}' not supported, use 'allow' " + "or 'delete'").format(action)) + + +def status(): + """Retrieve firewall rules as represented by UFW. + + :returns: Tuples with rule number and data + (1, {'to': '', 'action':, 'from':, '', ipv6: True, 'comment': ''}) + :rtype: Iterator[Tuple[int, Dict[str, Union[bool, str]]]] + """ + cp = subprocess.check_output(('ufw', 'status', 'numbered',), + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in cp.splitlines(): + if not line.startswith('['): + continue + ipv6 = True if '(v6)' in line else False + line = line.replace('(v6)', '') + line = line.replace('[', '') + line = line.replace(']', '') + line = line.replace('Anywhere', 'any') + row = line.split() + yield (int(row[0]), { + 'to': row[1], + 'action': ' '.join(row[2:4]).lower(), + 'from': row[4], + 'ipv6': ipv6, + 'comment': row[6] if len(row) > 5 and row[5] == '#' else '', + }) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/alternatives.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/alternatives.py new file mode 100644 index 0000000000000000000000000000000000000000..547de09c6d818772191b519618fa32b08b0e6eff --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/alternatives.py @@ -0,0 +1,44 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/deployment.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..dd3aebe97dd04bf42a2c4ee7c7b7b83b1917a678 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,384 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + use_source = use_source or [] + no_origin = no_origin or [] + + # Charms which should use the source config option + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy', 'percona-cluster', 'lxd'])) + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=None): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait(timeout=timeout) + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) + # Check for ready messages + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) + + releases = { + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, + ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('bionic', 'cloud:bionic-stein'): self.bionic_stein, + ('bionic', 'cloud:bionic-train'): self.bionic_train, + ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, + ('cosmic', None): self.cosmic_rocky, + ('disco', None): self.disco_stein, + ('eoan', None): self.eoan_train, + ('focal', None): self.focal_ussuri, + } + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('trusty', 'icehouse'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_percona_service_entry(self, memory_constraint=None): + """Return a amulet service entry for percona cluster. + + :param memory_constraint: Override the default memory constraint + in the service entry. + :type memory_constraint: str + :returns: Amulet service entry. + :rtype: dict + """ + memory_constraint = memory_constraint or '3072M' + svc_entry = { + 'name': 'percona-cluster', + 'constraints': {'mem': memory_constraint}} + if self._get_openstack_release() <= self.trusty_mitaka: + svc_entry['location'] = 'cs:trusty/percona-cluster' + return svc_entry + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder-ceph', + 'glance' + ] + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata + pools = [ + 'rbd', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..14864198926b82afe382d07263034b020ad43c09 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1593 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib +import urlparse + +import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 +import heatclient.v1.client as heat_client +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions + +import novaclient.client as nova_client +import novaclient +import pika +import swiftclient + +from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) +from charmhelpers.core.host import CompareHostReleases + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', + 'xenial_newton', 'yakkety_newton', + 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', + 'xenial_queens', 'bionic_queens', + 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein', + 'bionic_train', 'eoan_train', + 'bionic_ussuri', 'focal_ussuri', +] + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, expected_num_eps=3): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != expected_num_eps: + return 'Unexpected number of endpoints found' + + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + @retry_on_exception(num_retries=5, base_delay=1) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel.get('api_version') != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel.get('api_version'), api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + + def authenticate_cinder_admin(self, keystone, api_version=2): + """Authenticates admin user with cinder.""" + self.log.debug('Authenticating cinder admin...') + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](session=keystone.session) + + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + else: + auth = v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None, api_version=2): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + # 11 => xenial_queens + if api_version == 3 or (openstack_release and openstack_release >= 11): + client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + if not keystone_ip: + keystone_ip = keystone_sentry.info['public-address'] + + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: + user_domain_name = 'admin_domain' + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) + + def authenticate_glance_admin(self, keystone, force_v1_client=False): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + interface='adminURL') + if not force_v1_client and keystone.session: + return glance_clientv2.Client("2", session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + + def glance_create_image(self, glance, image_name, image_url, + download_dir='tests', + hypervisor_type=None, + disk_format='qcow2', + architecture='x86_64', + container_format='bare'): + """Download an image and upload it to glance, validate its status + and return an image object pointer. KVM defaults, can override for + LXD. + + :param glance: pointer to authenticated glance api connection + :param image_name: display name for new image + :param image_url: url to retrieve + :param download_dir: directory to store downloaded image file + :param hypervisor_type: glance image hypervisor property + :param disk_format: glance image disk format + :param architecture: glance image architecture property + :param container_format: glance image container format + :returns: glance image pointer + """ + self.log.debug('Creating glance image ({}) from ' + '{}...'.format(image_name, image_url)) + + # Download image + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + abs_file_name = os.path.join(download_dir, image_name) + if not os.path.exists(abs_file_name): + opener.retrieve(image_url, abs_file_name) + + # Create glance image + glance_properties = { + 'architecture': architecture, + } + if hypervisor_type: + glance_properties['hypervisor_type'] = hypervisor_type + # Create glance image + if float(glance.version) < 2.0: + with open(abs_file_name) as f: + image = glance.images.create( + name=image_name, + is_public=True, + disk_format=disk_format, + container_format=container_format, + properties=glance_properties, + data=f) + else: + image = glance.images.create( + name=image_name, + visibility="public", + disk_format=disk_format, + container_format=container_format) + glance.images.upload(image.id, open(abs_file_name, 'rb')) + glance.images.update(image.id, **glance_properties) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == container_format \ + and val_img_dfmt == disk_format: + self.log.debug(msg_attr) + else: + msg = ('Image validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def create_cirros_image(self, glance, image_name, hypervisor_type=None): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :param hypervisor_type: glance image hypervisor property + :returns: glance image pointer + """ + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'glance_create_image instead of ' + 'create_cirros_image.') + + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Get cirros image URL + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + f.close() + + return self.glance_create_image( + glance, + image_name, + cirros_url, + hypervisor_type=hypervisor_type) + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.glance.find_image(image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except Exception: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.is_open is True + assert connection.is_closing is False + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if CompareHostReleases(ubuntu_release) <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/audits/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/audits/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7f7e5f79a5d5fe3cb374814e32ea16f6060f4f27 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/audits/__init__.py @@ -0,0 +1,212 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenStack Security Audit code""" + +import collections +from enum import Enum +import traceback + +from charmhelpers.core.host import cmp_pkgrevno +import charmhelpers.contrib.openstack.utils as openstack_utils +import charmhelpers.core.hookenv as hookenv + + +class AuditType(Enum): + OpenStackSecurityGuide = 1 + + +_audits = {} + +Audit = collections.namedtuple('Audit', 'func filters') + + +def audit(*args): + """Decorator to register an audit. + + These are used to generate audits that can be run on a + deployed system that matches the given configuration + + :param args: List of functions to filter tests against + :type args: List[Callable[Dict]] + """ + def wrapper(f): + test_name = f.__name__ + if _audits.get(test_name): + raise RuntimeError( + "Test name '{}' used more than once" + .format(test_name)) + non_callables = [fn for fn in args if not callable(fn)] + if non_callables: + raise RuntimeError( + "Configuration includes non-callable filters: {}" + .format(non_callables)) + _audits[test_name] = Audit(func=f, filters=args) + return f + return wrapper + + +def is_audit_type(*args): + """This audit is included in the specified kinds of audits. + + :param *args: List of AuditTypes to include this audit in + :type args: List[AuditType] + :rtype: Callable[Dict] + """ + def _is_audit_type(audit_options): + if audit_options.get('audit_type') in args: + return True + else: + return False + return _is_audit_type + + +def since_package(pkg, pkg_version): + """This audit should be run after the specified package version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _since_package(audit_options=None): + return cmp_pkgrevno(pkg, pkg_version) >= 0 + + return _since_package + + +def before_package(pkg, pkg_version): + """This audit should be run before the specified package version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _before_package(audit_options=None): + return not since_package(pkg, pkg_version)() + + return _before_package + + +def since_openstack_release(pkg, release): + """This audit should run after the specified OpenStack version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _since_openstack_release(audit_options=None): + _release = openstack_utils.get_os_codename_package(pkg) + return openstack_utils.CompareOpenStackReleases(_release) >= release + + return _since_openstack_release + + +def before_openstack_release(pkg, release): + """This audit should run before the specified OpenStack version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _before_openstack_release(audit_options=None): + return not since_openstack_release(pkg, release)() + + return _before_openstack_release + + +def it_has_config(config_key): + """This audit should be run based on specified config keys. + + :param config_key: Config key to look for + :type config_key: str + :rtype: Callable[Dict] + """ + def _it_has_config(audit_options): + return audit_options.get(config_key) is not None + + return _it_has_config + + +def run(audit_options): + """Run the configured audits with the specified audit_options. + + :param audit_options: Configuration for the audit + :type audit_options: Config + + :rtype: Dict[str, str] + """ + errors = {} + results = {} + for name, audit in sorted(_audits.items()): + result_name = name.replace('_', '-') + if result_name in audit_options.get('excludes', []): + print( + "Skipping {} because it is" + "excluded in audit config" + .format(result_name)) + continue + if all(p(audit_options) for p in audit.filters): + try: + audit.func(audit_options) + print("{}: PASS".format(name)) + results[result_name] = { + 'success': True, + } + except AssertionError as e: + print("{}: FAIL ({})".format(name, e)) + results[result_name] = { + 'success': False, + 'message': e, + } + except Exception as e: + print("{}: ERROR ({})".format(name, e)) + errors[name] = e + results[result_name] = { + 'success': False, + 'message': e, + } + for name, error in errors.items(): + print("=" * 20) + print("Error in {}: ".format(name)) + traceback.print_tb(error.__traceback__) + print() + return results + + +def action_parse_results(result): + """Parse the result of `run` in the context of an action. + + :param result: The result of running the security-checklist + action on a unit + :type result: Dict[str, Dict[str, str]] + :rtype: int + """ + passed = True + for test, result in result.items(): + if result['success']: + hookenv.action_set({test: 'PASS'}) + else: + hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) + passed = False + if not passed: + hookenv.action_fail("One or more tests failed") + return 0 if passed else 1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/audits/openstack_security_guide.py new file mode 100644 index 0000000000000000000000000000000000000000..79740ed0c103e841b6e280af920f8be65e3d1d0d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -0,0 +1,270 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import configparser +import glob +import os.path +import subprocess + +from charmhelpers.contrib.openstack.audits import ( + audit, + AuditType, + # filters + is_audit_type, + it_has_config, +) + +from charmhelpers.core.hookenv import ( + cached, +) + +""" +The Security Guide suggests a specific list of files inside the +config directory for the service having 640 specifically, but +by ensuring the containing directory is 750, only the owner can +write, and only the group can read files within the directory. + +By restricting access to the containing directory, we can more +effectively ensure that there is no accidental leakage if a new +file is added to the service without being added to the security +guide, and to this check. +""" +FILE_ASSERTIONS = { + 'barbican': { + '/etc/barbican': {'group': 'barbican', 'mode': '750'}, + }, + 'ceph-mon': { + '/var/lib/charm/ceph-mon/ceph.conf': + {'owner': 'root', 'group': 'root', 'mode': '644'}, + '/etc/ceph/ceph.client.admin.keyring': + {'owner': 'ceph', 'group': 'ceph'}, + '/etc/ceph/rbdmap': {'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} + }, + 'ceph-osd': { + '/var/lib/charm/ceph-osd/ceph.conf': + {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, + '/var/lib/ceph/radosgw': + {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + }, + 'cinder': { + '/etc/cinder': {'group': 'cinder', 'mode': '750'}, + }, + 'glance': { + '/etc/glance': {'group': 'glance', 'mode': '750'}, + }, + 'keystone': { + '/etc/keystone': + {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, + }, + 'manilla': { + '/etc/manila': {'group': 'manilla', 'mode': '750'}, + }, + 'neutron-gateway': { + '/etc/neutron': {'group': 'neutron', 'mode': '750'}, + }, + 'neutron-api': { + '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, + }, + 'nova-cloud-controller': { + '/etc/nova': {'group': 'nova', 'mode': '750'}, + }, + 'nova-compute': { + '/etc/nova/': {'group': 'nova', 'mode': '750'}, + }, + 'openstack-dashboard': { + # From security guide + '/etc/openstack-dashboard/local_settings.py': + {'group': 'horizon', 'mode': '640'}, + }, +} + +Ownership = collections.namedtuple('Ownership', 'owner group mode') + + +@cached +def _stat(file): + """ + Get the Ownership information from a file. + + :param file: The path to a file to stat + :type file: str + :returns: owner, group, and mode of the specified file + :rtype: Ownership + :raises subprocess.CalledProcessError: If the underlying stat fails + """ + out = subprocess.check_output( + ['stat', '-c', '%U %G %a', file]).decode('utf-8') + return Ownership(*out.strip().split(' ')) + + +@cached +def _config_ini(path): + """ + Parse an ini file + + :param path: The path to a file to parse + :type file: str + :returns: Configuration contained in path + :rtype: Dict + """ + # When strict is enabled, duplicate options are not allowed in the + # parsed INI; however, Oslo allows duplicate values. This change + # causes us to ignore the duplicate values which is acceptable as + # long as we don't validate any multi-value options + conf = configparser.ConfigParser(strict=False) + conf.read(path) + return dict(conf) + + +def _validate_file_ownership(owner, group, file_name, optional=False): + """ + Validate that a specified file is owned by `owner:group`. + + :param owner: Name of the owner + :type owner: str + :param group: Name of the group + :type group: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert owner == ownership.owner, \ + "{} has an incorrect owner: {} should be {}".format( + file_name, ownership.owner, owner) + assert group == ownership.group, \ + "{} has an incorrect group: {} should be {}".format( + file_name, ownership.group, group) + print("Validate ownership of {}: PASS".format(file_name)) + + +def _validate_file_mode(mode, file_name, optional=False): + """ + Validate that a specified file has the specified permissions. + + :param mode: file mode that is desires + :type owner: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert mode == ownership.mode, \ + "{} has an incorrect mode: {} should be {}".format( + file_name, ownership.mode, mode) + print("Validate mode of {}: PASS".format(file_name)) + + +@cached +def _config_section(config, section): + """Read the configuration file and return a section.""" + path = os.path.join(config.get('config_path'), config.get('config_file')) + conf = _config_ini(path) + return conf.get(section) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_ownership(config): + """Verify that configuration files are owned by the correct user/group.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + owner = options.get('owner', config.get('owner', 'root')) + group = options.get('group', config.get('group', 'root')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_ownership(owner, group, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_ownership(owner, group, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_permissions(config): + """Verify that permissions on configuration files are secure enough.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + mode = options.get('mode', config.get('permissions', '600')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_mode(mode, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_mode(mode, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_keystone(audit_options): + """Validate that the service uses Keystone for authentication.""" + section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'api / DEFAULT'" + assert section.get('auth_strategy') == "keystone", \ + "Application is not using Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_keystone(audit_options): + """Verify that TLS is used to communicate with Keystone.""" + section = _config_section(audit_options, 'keystone_authtoken') + assert section is not None, "Missing section 'keystone_authtoken'" + assert not section.get('insecure') and \ + "https://" in section.get("auth_uri"), \ + "TLS is not used for Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_glance(audit_options): + """Verify that TLS is used to communicate with Glance.""" + section = _config_section(audit_options, 'glance') + assert section is not None, "Missing section 'glance'" + assert not section.get('insecure') and \ + "https://" in section.get("api_servers"), \ + "TLS is not used for Glance" diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/cert_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/cert_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b494af64aeae55db44b669990725de81705104b2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/cert_utils.py @@ -0,0 +1,289 @@ +# Copyright 2014-2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charm certificats. + +import os +import json + +from charmhelpers.contrib.network.ip import ( + get_hostname, + resolve_network_cidr, +) +from charmhelpers.core.hookenv import ( + local_unit, + network_get_primary_address, + config, + related_units, + relation_get, + relation_ids, + unit_get, + NoNetworkBinding, + log, + WARNING, +) +from charmhelpers.contrib.openstack.ip import ( + ADMIN, + resolve_address, + get_vip_in_network, + INTERNAL, + PUBLIC, + ADDRESS_MAP) + +from charmhelpers.core.host import ( + mkdir, + write_file, +) + +from charmhelpers.contrib.hahelpers.apache import ( + install_ca_cert +) + + +class CertRequest(object): + + """Create a request for certificates to be generated + """ + + def __init__(self, json_encode=True): + self.entries = [] + self.hostname_entry = None + self.json_encode = json_encode + + def add_entry(self, net_type, cn, addresses): + """Add a request to the batch + + :param net_type: str netwrok space name request is for + :param cn: str Canonical Name for certificate + :param addresses: [] List of addresses to be used as SANs + """ + self.entries.append({ + 'cn': cn, + 'addresses': addresses}) + + def add_hostname_cn(self): + """Add a request for the hostname of the machine""" + ip = unit_get('private-address') + addresses = [ip] + # If a vip is being used without os-hostname config or + # network spaces then we need to ensure the local units + # cert has the approriate vip in the SAN list + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + self.hostname_entry = { + 'cn': get_hostname(ip), + 'addresses': addresses} + + def add_hostname_cn_ip(self, addresses): + """Add an address to the SAN list for the hostname request + + :param addr: [] List of address to be added + """ + for addr in addresses: + if addr not in self.hostname_entry['addresses']: + self.hostname_entry['addresses'].append(addr) + + def get_request(self): + """Generate request from the batched up entries + + """ + if self.hostname_entry: + self.entries.append(self.hostname_entry) + request = {} + for entry in self.entries: + sans = sorted(list(set(entry['addresses']))) + request[entry['cn']] = {'sans': sans} + if self.json_encode: + req = {'cert_requests': json.dumps(request, sort_keys=True)} + else: + req = {'cert_requests': request} + req['unit_name'] = local_unit().replace('/', '_') + return req + + +def get_certificate_request(json_encode=True): + """Generate a certificatee requests based on the network confioguration + + """ + req = CertRequest(json_encode=json_encode) + req.add_hostname_cn() + # Add os-hostname entries + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['override']) + try: + net_addr = resolve_address(endpoint_type=net_type) + ip = network_get_primary_address( + ADDRESS_MAP[net_type]['binding']) + addresses = [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + if net_config: + req.add_entry( + net_type, + net_config, + addresses) + else: + # There is network address with no corresponding hostname. + # Add the ip to the hostname cert to allow for this. + req.add_hostname_cn_ip(addresses) + except NoNetworkBinding: + log("Skipping request for certificate for ip in {} space, no " + "local address found".format(net_type), WARNING) + return req.get_request() + + +def create_ip_cert_links(ssl_dir, custom_hostname_link=None): + """Create symlinks for SAN records + + :param ssl_dir: str Directory to create symlinks in + :param custom_hostname_link: str Additional link to be created + """ + hostname = get_hostname(unit_get('private-address')) + hostname_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(hostname)) + hostname_key = os.path.join( + ssl_dir, + 'key_{}'.format(hostname)) + # Add links to hostname cert, used if os-hostname vars not set + for net_type in [INTERNAL, ADMIN, PUBLIC]: + try: + addr = resolve_address(endpoint_type=net_type) + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(hostname_cert) and not os.path.isfile(cert): + os.symlink(hostname_cert, cert) + os.symlink(hostname_key, key) + except NoNetworkBinding: + log("Skipping creating cert symlink for ip in {} space, no " + "local address found".format(net_type), WARNING) + if custom_hostname_link: + custom_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(custom_hostname_link)) + custom_key = os.path.join( + ssl_dir, + 'key_{}'.format(custom_hostname_link)) + if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): + os.symlink(hostname_cert, custom_cert) + os.symlink(hostname_key, custom_key) + + +def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): + """Install the certs passed into the ssl dir and append the chain if + provided. + + :param ssl_dir: str Directory to create symlinks in + :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} + :param chain: str Chain to be appended to certs + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str + """ + for cn, bundle in certs.items(): + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + cert_data = bundle['cert'] + if chain: + # Append chain file so that clients that trust the root CA will + # trust certs signed by an intermediate in the chain + cert_data = cert_data + os.linesep + chain + write_file( + path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, + content=cert_data, perms=0o640) + write_file( + path=os.path.join(ssl_dir, key_filename), owner=user, group=group, + content=bundle['key'], perms=0o640) + + +def process_certificates(service_name, relation_id, unit, + custom_hostname_link=None, user='root', group='root'): + """Process the certificates supplied down the relation + + :param service_name: str Name of service the certifcates are for. + :param relation_id: str Relation id providing the certs + :param unit: str Unit providing the certs + :param custom_hostname_link: str Name of custom link to create + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str + :returns: True if certificates processed for local unit or False + :rtype: bool + """ + data = relation_get(rid=relation_id, unit=unit) + ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) + mkdir(path=ssl_dir) + name = local_unit().replace('/', '_') + certs = data.get('{}.processed_requests'.format(name)) + chain = data.get('chain') + ca = data.get('ca') + if certs: + certs = json.loads(certs) + install_ca_cert(ca.encode()) + install_certs(ssl_dir, certs, chain, user=user, group=group) + create_ip_cert_links( + ssl_dir, + custom_hostname_link=custom_hostname_link) + return True + return False + + +def get_requests_for_local_unit(relation_name=None): + """Extract any certificates data targeted at this unit down relation_name. + + :param relation_name: str Name of relation to check for data. + :returns: List of bundles of certificates. + :rtype: List of dicts + """ + local_name = local_unit().replace('/', '_') + raw_certs_key = '{}.processed_requests'.format(local_name) + relation_name = relation_name or 'certificates' + bundles = [] + for rid in relation_ids(relation_name): + for unit in related_units(rid): + data = relation_get(rid=rid, unit=unit) + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key])}) + return bundles + + +def get_bundle_for_cn(cn, relation_name=None): + """Extract certificates for the given cn. + + :param cn: str Canonical Name on certificate. + :param relation_name: str Relation to check for certificates down. + :returns: Dictionary of certificate data, + :rtype: dict. + """ + entries = get_requests_for_local_unit(relation_name) + cert_bundle = {} + for entry in entries: + for _cn, bundle in entry['certs'].items(): + if _cn == cn: + cert_bundle = { + 'cert': bundle['cert'], + 'key': bundle['key'], + 'chain': entry['chain'], + 'ca': entry['ca']} + break + if cert_bundle: + break + return cert_bundle diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/context.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/context.py new file mode 100644 index 0000000000000000000000000000000000000000..42abccf7cb9eabc063bc8d87f016e59f4dc8f898 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,3177 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import copy +import enum +import glob +import hashlib +import json +import math +import os +import re +import socket +import time + +from base64 import b64decode +from subprocess import check_call, CalledProcessError + +import six + +from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( + _config_ini as config_ini +) + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) +from charmhelpers.core.hookenv import ( + NoNetworkBinding, + config, + is_relation_made, + local_unit, + log, + relation_get, + relation_ids, + related_units, + relation_set, + unit_get, + unit_private_ip, + charm_name, + DEBUG, + INFO, + ERROR, + status_set, + network_get_primary_address, + WARNING, +) + +from charmhelpers.core.sysctl import create as sysctl_create +from charmhelpers.core.strutils import bool_from_string +from charmhelpers.contrib.openstack.exceptions import OSContextError + +from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, + list_nics, + get_nic_hwaddr, + mkdir, + write_file, + pwgen, + lsb_release, + CompareHostReleases, + is_container, +) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, + determine_api_port, + https, + is_clustered, +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, + install_ca_cert, +) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, + parse_data_port_mappings, +) +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + INTERNAL, + ADMIN, + PUBLIC, + ADDRESS_MAP, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv4_addr, + get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_bridge_member, + is_ipv6_disabled, + get_relation_ip, +) +from charmhelpers.contrib.openstack.utils import ( + config_flags_parser, + get_os_codename_install_source, + enable_memcache, + CompareOpenStackReleases, + os_release, +) +from charmhelpers.core.unitdata import kv + +try: + from sriov_netplan_shim import pci +except ImportError: + # The use of the function and contexts that require the pci module is + # optional. + pass + +try: + import psutil +except ImportError: + if six.PY2: + apt_install('python-psutil', fatal=True) + else: + apt_install('python3-psutil', fatal=True) + import psutil + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] +HAPROXY_RUN_DIR = '/var/run/haproxy/' +DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" + + +def ensure_packages(packages): + """Install but do not upgrade required plugin packages.""" + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + _missing.append(k) + + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level=INFO) + return False + + return True + + +class OSContextGenerator(object): + """Base class for all context generators.""" + interfaces = [] + related = False + complete = False + missing_data = [] + + def __call__(self): + raise NotImplementedError + + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), + level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, database=None, user=None, relation_prefix=None, + ssl_dir=None, relation_id=None): + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ + self.relation_prefix = relation_prefix + self.database = database + self.user = user + self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] + self.relation_id = relation_id + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) + raise OSContextError + + ctxt = {} + + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network( + access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return None # Defer any further hook execution for now.... + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.interfaces[0]) + + rel = (get_os_codename_install_source(config('openstack-origin')) or + 'icehouse') + for rid in rids: + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host + ctxt = { + 'database_host': host, + 'database': self.database, + 'database_user': self.user, + 'database_password': rdata.get(password_setting), + 'database_type': 'mysql+pymysql' + } + # Port is being introduced with LP Bug #1876188 + # but it not currently required and may not be set in all + # cases, particularly in classic charms. + port = rdata.get('db_port') + if port: + ctxt['database_port'] = port + if CompareOpenStackReleases(rel) < 'queens': + ctxt['database_type'] = 'mysql' + if self.context_complete(ctxt): + db_ssl(rdata, ctxt, self.ssl_dir) + return ctxt + return {} + + +class PostgresqlDBContext(OSContextGenerator): + interfaces = ['pgsql-db'] + + def __init__(self, database=None): + self.database = database + + def __call__(self): + self.database = self.database or config('database') + if self.database is None: + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.interfaces[0]): + self.related = True + for unit in related_units(rid): + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} + if self.context_complete(ctxt): + return ctxt + + return {} + + +def db_ssl(rdata, ctxt, ssl_dir): + if 'ssl_ca' in rdata and ssl_dir: + ca_path = os.path.join(ssl_dir, 'db-client.ca') + with open(ca_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_ca'])) + + ctxt['database_ssl_ca'] = ca_path + elif 'ssl_ca' in rdata: + log("Charm not setup for ssl support but ssl ca found", level=INFO) + return ctxt + + if 'ssl_cert' in rdata: + cert_path = os.path.join( + ssl_dir, 'db-client.cert') + if not os.path.exists(cert_path): + log("Waiting 1m for ssl client cert validity", level=INFO) + time.sleep(60) + + with open(cert_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_cert'])) + + ctxt['database_ssl_cert'] = cert_path + key_path = os.path.join(ssl_dir, 'db-client.key') + with open(key_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_key'])) + + ctxt['database_ssl_key'] = key_path + + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-service'): + self.service = service + self.service_user = service_user + self.rel_name = rel_name + self.interfaces = [self.rel_name] + + def _setup_pki_cache(self): + if self.service and self.service_user: + # This is required for pki token signing if we don't want /tmp to + # be used. + cachedir = '/var/cache/%s' % (self.service) + if not os.path.isdir(cachedir): + log("Creating service cache dir %s" % (cachedir), level=DEBUG) + mkdir(path=cachedir, owner=self.service_user, + group=self.service_user, perms=0o700) + + return cachedir + return None + + def _get_pkg_name(self, python_name='keystonemiddleware'): + """Get corresponding distro installed package for python + package name. + + :param python_name: nameof the python package + :type: string + """ + pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) + + for pkg in pkg_names: + if not filter_installed_packages((pkg,)): + return pkg + + return None + + def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): + """Build Jinja2 context for full rendering of [keystone_authtoken] + section with variable names included. Re-constructed from former + template 'section-keystone-auth-mitaka'. + + :param ctxt: Jinja2 context returned from self.__call__() + :type: dict + :param keystonemiddleware_os_rel: OpenStack release name of + keystonemiddleware package installed + """ + c = collections.OrderedDict((('auth_type', 'password'),)) + + # 'www_authenticate_uri' replaced 'auth_uri' since Stein, + # see keystonemiddleware upstream sources for more info + if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + else: + c.update(( + ('auth_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))), + ('project_domain_name', ctxt.get('admin_domain_name', '')), + ('user_domain_name', ctxt.get('admin_domain_name', '')), + ('project_name', ctxt.get('admin_tenant_name', '')), + ('username', ctxt.get('admin_user', '')), + ('password', ctxt.get('admin_password', '')), + ('signing_dir', ctxt.get('signing_dir', '')),)) + + return c + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + keystonemiddleware_os_release = None + if self._get_pkg_name(): + keystonemiddleware_os_release = os_release(self._get_pkg_name()) + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version}) + + if float(api_version) > 2: + ctxt.update({ + 'admin_domain_name': rdata.get('service_domain'), + 'service_project_id': rdata.get('service_tenant_id'), + 'service_domain_id': rdata.get('service_domain_id')}) + + # we keep all veriables in ctxt for compatibility and + # add nested dictionary for keystone_authtoken generic + # templating + if keystonemiddleware_os_release: + ctxt['keystone_authtoken'] = \ + self._get_keystone_authtoken_ctxt( + ctxt, keystonemiddleware_os_release) + + if self.context_complete(ctxt): + # NOTE(jamespage) this is required for >= icehouse + # so a missing value just indicates keystone needs + # upgrading + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') + return ctxt + + return {} + + +class IdentityCredentialsContext(IdentityServiceContext): + '''Context for identity-credentials interface type''' + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-credentials'): + super(IdentityCredentialsContext, self).__init__(service, + service_user, + rel_name) + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + credentials_host = rdata.get('credentials_host') + credentials_host = ( + format_ipv6_addr(credentials_host) or credentials_host + ) + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('credentials_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({ + 'service_port': rdata.get('credentials_port'), + 'service_host': credentials_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('credentials_project'), + 'admin_tenant_id': rdata.get('credentials_project_id'), + 'admin_user': rdata.get('credentials_username'), + 'admin_password': rdata.get('credentials_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version + }) + + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('domain')}) + + if self.context_complete(ctxt): + return ctxt + + return {} + + +class NovaVendorMetadataContext(OSContextGenerator): + """Context used for configuring nova vendor metadata on nova.conf file.""" + + def __init__(self, os_release_pkg, interfaces=None): + """Initialize the NovaVendorMetadataContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + :param interfaces: list of string values to be used as the Context's + relation interfaces. + :type interfaces: List[str] + """ + self.os_release_pkg = os_release_pkg + if interfaces is not None: + self.interfaces = interfaces + + def __call__(self): + cmp_os_release = CompareOpenStackReleases( + os_release(self.os_release_pkg)) + ctxt = {'vendor_data': False} + + vdata_providers = [] + vdata = config('vendor-data') + vdata_url = config('vendor-data-url') + + if vdata: + try: + # validate the JSON. If invalid, we do not set anything here + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data'] = True + # Mitaka does not support DynamicJSON + # so vendordata_providers is not needed + if cmp_os_release > 'mitaka': + vdata_providers.append('StaticJSON') + + if vdata_url: + if cmp_os_release > 'mitaka': + ctxt['vendor_data_url'] = vdata_url + vdata_providers.append('DynamicJSON') + else: + log('Dynamic vendor data unsupported' + ' for {}.'.format(cmp_os_release), level=ERROR) + if vdata_providers: + ctxt['vendordata_providers'] = ','.join(vdata_providers) + + return ctxt + + +class NovaVendorMetadataJSONContext(OSContextGenerator): + """Context used for writing nova vendor metadata json file.""" + + def __init__(self, os_release_pkg): + """Initialize the NovaVendorMetadataJSONContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + """ + self.os_release_pkg = os_release_pkg + + def __call__(self): + ctxt = {'vendor_data_json': '{}'} + + vdata = config('vendor-data') + if vdata: + try: + # validate the JSON. If invalid, we return empty. + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data_json'] = vdata + + return ctxt + + +class AMQPContext(OSContextGenerator): + + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, + relation_id=None): + self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] + self.relation_id = relation_id + + def __call__(self): + log('Generating template context for amqp', level=DEBUG) + conf = config() + if self.relation_prefix: + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + + try: + username = conf[user_setting] + vhost = conf[vhost_setting] + except KeyError as e: + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) + raise OSContextError + + ctxt = {} + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.rel_name) + for rid in rids: + ha_vip_only = False + self.related = True + transport_hosts = None + rabbitmq_port = '5672' + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip + transport_hosts = [vip] + else: + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + transport_hosts = [host] + + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) + if ssl_port: + ctxt['rabbit_ssl_port'] = ssl_port + rabbitmq_port = ssl_port + + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) + if ssl_ca: + ctxt['rabbit_ssl_ca'] = ssl_ca + + if relation_get('ha_queues', rid=rid, unit=unit) is not None: + ctxt['rabbitmq_ha_queues'] = True + + ha_vip_only = relation_get('ha-vip-only', + rid=rid, unit=unit) is not None + + if self.context_complete(ctxt): + if 'rabbit_ssl_ca' in ctxt: + if not self.ssl_dir: + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) + break + + ca_path = os.path.join( + self.ssl_dir, 'rabbit-client-ca.pem') + with open(ca_path, 'wb') as fh: + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) + ctxt['rabbit_ssl_ca'] = ca_path + + # Sufficient information found = break out! + break + + # Used for active/active rabbitmq >= grizzly + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): + rabbitmq_hosts = [] + for unit in related_units(rid): + host = relation_get('private-address', rid=rid, unit=unit) + if not relation_get('password', rid=rid, unit=unit): + log( + ("Skipping {} password not sent which indicates " + "unit is not ready.".format(host)), + level=DEBUG) + continue + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + rabbitmq_hosts = sorted(rabbitmq_hosts) + ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) + transport_hosts = rabbitmq_hosts + + if transport_hosts: + transport_url_hosts = ','.join([ + "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], + ctxt['rabbitmq_password'], + host_, + rabbitmq_port) + for host_ in transport_hosts]) + ctxt['transport_url'] = "rabbit://{}/{}".format( + transport_url_hosts, vhost) + + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) + if oslo_messaging_flags: + ctxt['oslo_messaging_flags'] = config_flags_parser( + oslo_messaging_flags) + + oslo_messaging_driver = conf.get( + 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) + if oslo_messaging_driver: + ctxt['oslo_messaging_driver'] = oslo_messaging_driver + + notification_format = conf.get('notification-format', None) + if notification_format: + ctxt['notification_format'] = notification_format + + notification_topics = conf.get('notification-topics', None) + if notification_topics: + ctxt['notification_topics'] = notification_topics + + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) + if send_notifications_to_logs: + ctxt['send_notifications_to_logs'] = send_notifications_to_logs + + if not self.complete: + return {} + + return ctxt + + +class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" + interfaces = ['ceph'] + + def __call__(self): + if not relation_ids('ceph'): + return {} + + log('Generating template context for ceph', level=DEBUG) + mon_hosts = [] + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } + for rid in relation_ids('ceph'): + for unit in related_units(rid): + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('rbd_features'): + default_features = relation_get('rbd-features', rid=rid, unit=unit) + if default_features is not None: + ctxt['rbd_features'] = default_features + + ceph_addrs = relation_get('ceph-public-address', rid=rid, + unit=unit) + if ceph_addrs: + for addr in ceph_addrs.split(' '): + mon_hosts.append(format_ipv6_addr(addr) or addr) + else: + priv_addr = relation_get('private-address', rid=rid, + unit=unit) + mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) + + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not self.context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + return ctxt + + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'auth' not in ctxt or 'key' not in ctxt: + return False + return super(CephContext, self).context_complete(ctxt) + + +class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + + :side effect: mkdir is called on HAPROXY_RUN_DIR + """ + interfaces = ['cluster'] + + def __init__(self, singlenode_mode=False, + address_types=ADDRESS_TYPES): + self.address_types = address_types + self.singlenode_mode = singlenode_mode + + def __call__(self): + if not os.path.isdir(HAPROXY_RUN_DIR): + mkdir(path=HAPROXY_RUN_DIR) + if not relation_ids('cluster') and not self.singlenode_mode: + return {} + + l_unit = local_unit().replace('/', '-') + cluster_hosts = collections.OrderedDict() + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in self.address_types: + cfg_opt = 'os-{}-network'.format(addr_type) + # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather + # than 'internal' + if addr_type == 'internal': + _addr_map_type = INTERNAL + else: + _addr_map_type = addr_type + # Network spaces aware + laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], + config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = { + 'network': "{}/{}".format(laddr, + netmask), + 'backends': collections.OrderedDict([(l_unit, + laddr)]) + } + for rid in relation_ids('cluster'): + for unit in sorted(related_units(rid)): + # API Charms will need to set {addr_type}-address with + # get_relation_ip(addr_type) + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) add backend based on get_relation_ip - this + # will either be the only backend or the fallback if no acls + # match in the frontend + # Network spaces aware + addr = get_relation_ip('cluster') + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = { + 'network': "{}/{}".format(addr, netmask), + 'backends': collections.OrderedDict([(l_unit, + addr)]) + } + for rid in relation_ids('cluster'): + for unit in sorted(related_units(rid)): + # API Charms will need to set their private-address with + # get_relation_ip('cluster') + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + + if config('haproxy-queue-timeout'): + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') + + if config('haproxy-connect-timeout'): + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') + + if config('prefer-ipv6'): + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + + ctxt['ipv6_enabled'] = not is_ipv6_disabled() + + ctxt['stat_port'] = '8888' + + db = kv() + ctxt['stat_password'] = db.get('stat-password') + if not ctxt['stat_password']: + ctxt['stat_password'] = db.set('stat-password', + pwgen(32)) + db.flush() + + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) + rids = relation_ids('image-service') + if not rids: + return {} + + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) + return {} + + +class ApacheSSLContext(OSContextGenerator): + """Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + user = group = 'root' + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] + check_call(cmd) + + def configure_cert(self, cn=None): + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cert and key: + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert), owner=self.user, + group=self.group, perms=0o640) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key), owner=self.user, + group=self.group, perms=0o640) + + def configure_ca(self): + ca_cert = get_ca_cert() + if ca_cert: + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and + hostnamr or vip (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, hostname_in_net_a), + (address_in_net_b, hostname_in_net_b), + ...] + + or, if no hostnames(s) available: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['config']) + # NOTE(jamespage): Fallback must always be private address + # as this is used to bind services on the + # local unit. + fallback = unit_get("private-address") + if net_config: + addr = get_address_in_network(net_config, + fallback) + else: + try: + addr = network_get_primary_address( + ADDRESS_MAP[net_type]['binding'] + ) + except (NotImplementedError, NoNetworkBinding): + addr = fallback + + endpoint = resolve_address(net_type) + addresses.append((addr, endpoint)) + + return sorted(set(addresses)) + + def __call__(self): + if isinstance(self.external_ports, six.string_types): + self.external_ports = [self.external_ports] + + if not self.external_ports or not https(): + return {} + + use_keystone_ca = True + for rid in relation_ids('certificates'): + if related_units(rid): + use_keystone_ca = False + + if use_keystone_ca: + self.configure_ca() + + self.enable_modules() + + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + if use_keystone_ca: + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in addresses: + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) + return ctxt + + +class NeutronContext(OSContextGenerator): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + for pkgs in self.packages: + ensure_packages(pkgs) + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def nuage_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nuage_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'vsp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nuage_ctxt + + def nvp_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nvp_ctxt + + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags + + return n1kv_ctxt + + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + + def neutron_ctxt(self): + if https(): + proto = 'https' + else: + proto = 'http' + + if is_clustered(): + host = config('vip') + else: + host = unit_get('private-address') + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} + return ctxt + + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + + def midonet_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + midonet_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + mido_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'midonet', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': midonet_config} + + return mido_ctxt + + def __call__(self): + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = self.neutron_ctxt() + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + elif self.plugin in ['nvp', 'nsx']: + ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) + elif self.plugin == 'vsp': + ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) + elif self.plugin == 'midonet': + ctxt.update(self.midonet_ctxt()) + + alchemy_flags = config('neutron-alchemy-flags') + if alchemy_flags: + flags = config_flags_parser(alchemy_flags) + ctxt['neutron_alchemy_flags'] = flags + + return ctxt + + +class NeutronPortContext(OSContextGenerator): + + def resolve_ports(self, ports): + """Resolve NICs not yet bound to bridge(s) + + If hwaddress provided then returns resolved hwaddress otherwise NIC. + """ + if not ports: + return None + + hwaddr_to_nic = {} + hwaddr_to_ip = {} + extant_nics = list_nics() + + for nic in extant_nics: + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + + hwaddr = get_nic_hwaddr(nic) + hwaddr_to_nic[hwaddr] = nic + addresses = get_ipv4_addr(nic, fatal=False) + addresses += get_ipv6_addr(iface=nic, fatal=False) + hwaddr_to_ip[hwaddr] = addresses + + resolved = [] + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) + for entry in ports: + if re.match(mac_regex, entry): + # NIC is in known NICs and does NOT hace an IP address + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: + # If the nic is part of a bridge then don't use it + if is_bridge_member(hwaddr_to_nic[entry]): + continue + + # Entry is a MAC address for a valid interface that doesn't + # have an IP address assigned yet. + resolved.append(hwaddr_to_nic[entry]) + elif entry in extant_nics: + # If the passed entry is not a MAC address and the interface + # exists, assume it's a valid interface, and that the user put + # it there on purpose (we can trust it to be the real external + # network). + resolved.append(entry) + + # Ensure no duplicates + return list(set(resolved)) + + +class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} + + return {self._template_flag: + config_flags_parser(config_flags)} + + +class LibvirtConfigFlagsContext(OSContextGenerator): + """ + This context provides support for extending + the libvirt section through user-defined flags. + """ + def __call__(self): + ctxt = {} + libvirt_flags = config('libvirt-flags') + if libvirt_flags: + ctxt['libvirt_flags'] = config_flags_parser( + libvirt_flags) + return ctxt + + +class SubordinateConfigContext(OSContextGenerator): + + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple services. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json:: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as:: + + ctxt = { + ... other context ... + 'subordinate_configuration': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + """ + + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.config_file = config_file + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] + + def __call__(self): + ctxt = {'sections': {}} + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except Exception: + log('Could not parse JSON from ' + 'subordinate_configuration setting from %s' + % rid, level=ERROR) + continue + + for service in self.services: + if service not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s service' + % (rid, service), level=INFO) + continue + + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s' + % (rid, self.config_file), level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + + +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + + return ctxt + + +class SyslogContext(OSContextGenerator): + + def __call__(self): + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +MAX_DEFAULT_WORKERS = 4 +DEFAULT_MULTIPLIER = 2 + + +def _calculate_workers(): + ''' + Determine the number of worker processes based on the CPU + count of the unit containing the application. + + Workers will be limited to MAX_DEFAULT_WORKERS in + container environments where no worker-multipler configuration + option been set. + + @returns int: number of worker processes to use + ''' + multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) + if multiplier > 0 and count == 0: + count = 1 + + if config('worker-multiplier') is None and is_container(): + # NOTE(jamespage): Limit unconfigured worker-multiplier + # to MAX_DEFAULT_WORKERS to avoid insane + # worker configuration in LXD containers + # on large servers + # Reference: https://pad.lv/1665270 + count = min(count, MAX_DEFAULT_WORKERS) + + return count + + +def _num_cpus(): + ''' + Compatibility wrapper for calculating the number of CPU's + a unit has. + + @returns: int: number of CPU cores detected + ''' + try: + return psutil.cpu_count() + except AttributeError: + return psutil.NUM_CPUS + + +class WorkerConfigContext(OSContextGenerator): + + def __call__(self): + ctxt = {"workers": _calculate_workers()} + return ctxt + + +class WSGIWorkerConfigContext(WorkerConfigContext): + + def __init__(self, name=None, script=None, admin_script=None, + public_script=None, user=None, group=None, + process_weight=1.00, + admin_process_weight=0.25, public_process_weight=0.75): + self.service_name = name + self.user = user or name + self.group = group or name + self.script = script + self.admin_script = admin_script + self.public_script = public_script + self.process_weight = process_weight + self.admin_process_weight = admin_process_weight + self.public_process_weight = public_process_weight + + def __call__(self): + total_processes = _calculate_workers() + ctxt = { + "service_name": self.service_name, + "user": self.user, + "group": self.group, + "script": self.script, + "admin_script": self.admin_script, + "public_script": self.public_script, + "processes": int(math.ceil(self.process_weight * total_processes)), + "admin_processes": int(math.ceil(self.admin_process_weight * + total_processes)), + "public_processes": int(math.ceil(self.public_process_weight * + total_processes)), + "threads": 1, + } + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + + return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} + + +class NeutronAPIContext(OSContextGenerator): + ''' + Inspects current neutron-plugin-api relation for neutron settings. Return + defaults if it is not present. + ''' + interfaces = ['neutron-plugin-api'] + + def __call__(self): + self.neutron_defaults = { + 'l2_population': { + 'rel_key': 'l2-population', + 'default': False, + }, + 'overlay_network_type': { + 'rel_key': 'overlay-network-type', + 'default': 'gre', + }, + 'neutron_security_groups': { + 'rel_key': 'neutron-security-groups', + 'default': False, + }, + 'network_device_mtu': { + 'rel_key': 'network-device-mtu', + 'default': None, + }, + 'enable_dvr': { + 'rel_key': 'enable-dvr', + 'default': False, + }, + 'enable_l3ha': { + 'rel_key': 'enable-l3ha', + 'default': False, + }, + 'dns_domain': { + 'rel_key': 'dns-domain', + 'default': None, + }, + 'polling_interval': { + 'rel_key': 'polling-interval', + 'default': 2, + }, + 'rpc_response_timeout': { + 'rel_key': 'rpc-response-timeout', + 'default': 60, + }, + 'report_interval': { + 'rel_key': 'report-interval', + 'default': 30, + }, + 'enable_qos': { + 'rel_key': 'enable-qos', + 'default': False, + }, + 'enable_nsg_logging': { + 'rel_key': 'enable-nsg-logging', + 'default': False, + }, + 'enable_nfg_logging': { + 'rel_key': 'enable-nfg-logging', + 'default': False, + }, + 'enable_port_forwarding': { + 'rel_key': 'enable-port-forwarding', + 'default': False, + }, + 'global_physnet_mtu': { + 'rel_key': 'global-physnet-mtu', + 'default': 1500, + }, + 'physical_network_mtus': { + 'rel_key': 'physical-network-mtus', + 'default': None, + }, + } + ctxt = self.get_neutron_options({}) + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + # The l2-population key is used by the context as a way of + # checking if the api service on the other end is sending data + # in a recent format. + if 'l2-population' in rdata: + ctxt.update(self.get_neutron_options(rdata)) + + extension_drivers = [] + + if ctxt['enable_qos']: + extension_drivers.append('qos') + + if ctxt['enable_nsg_logging']: + extension_drivers.append('log') + + ctxt['extension_drivers'] = ','.join(extension_drivers) + + l3_extension_plugins = [] + + if ctxt['enable_port_forwarding']: + l3_extension_plugins.append('port_forwarding') + + ctxt['l3_extension_plugins'] = l3_extension_plugins + + return ctxt + + def get_neutron_options(self, rdata): + settings = {} + for nkey in self.neutron_defaults.keys(): + defv = self.neutron_defaults[nkey]['default'] + rkey = self.neutron_defaults[nkey]['rel_key'] + if rkey in rdata.keys(): + if type(defv) is bool: + settings[nkey] = bool_from_string(rdata[rkey]) + else: + settings[nkey] = rdata[rkey] + else: + settings[nkey] = defv + return settings + + +class ExternalPortContext(NeutronPortContext): + + def __call__(self): + ctxt = {} + ports = config('ext-port') + if ports: + ports = [p.strip() for p in ports.split()] + ports = self.resolve_ports(ports) + if ports: + ctxt = {"ext_port": ports[0]} + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt['ext_port_mtu'] = mtu + + return ctxt + + +class DataPortContext(NeutronPortContext): + + def __call__(self): + ports = config('data-port') + if ports: + # Map of {bridge:port/mac} + portmap = parse_data_port_mappings(ports) + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. + resolved = self.resolve_ports(ports) + # Rebuild port index using resolved and filtered ports. + normalized = {get_nic_hwaddr(port): port for port in resolved + if port not in ports} + normalized.update({port: port for port in resolved + if port in ports}) + if resolved: + return {normalized[port]: bridge for port, bridge in + six.iteritems(portmap) if port in normalized.keys()} + + return None + + +class PhyNICMTUContext(DataPortContext): + + def __call__(self): + ctxt = {} + mappings = super(PhyNICMTUContext, self).__call__() + if mappings and mappings.keys(): + ports = sorted(mappings.keys()) + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + all_ports = set() + # If any of ports is a vlan device, its underlying device must have + # mtu applied first. + for port in ports: + for lport in glob.glob("/sys/class/net/%s/lower_*" % port): + lport = os.path.basename(lport) + all_ports.add(lport.split('_')[1]) + + all_ports = list(all_ports) + all_ports.extend(ports) + if mtu: + ctxt["devs"] = '\\n'.join(all_ports) + ctxt['mtu'] = mtu + + return ctxt + + +class NetworkServiceContext(OSContextGenerator): + + def __init__(self, rel_name='quantum-network-service'): + self.rel_name = rel_name + self.interfaces = [rel_name] + + def __call__(self): + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + ctxt = { + 'keystone_host': rdata.get('keystone_host'), + 'service_port': rdata.get('service_port'), + 'auth_port': rdata.get('auth_port'), + 'service_tenant': rdata.get('service_tenant'), + 'service_username': rdata.get('service_username'), + 'service_password': rdata.get('service_password'), + 'quantum_host': rdata.get('quantum_host'), + 'quantum_port': rdata.get('quantum_port'), + 'quantum_url': rdata.get('quantum_url'), + 'region': rdata.get('region'), + 'service_protocol': + rdata.get('service_protocol') or 'http', + 'auth_protocol': + rdata.get('auth_protocol') or 'http', + 'api_version': + rdata.get('api_version') or '2.0', + } + if self.context_complete(ctxt): + return ctxt + return {} + + +class InternalEndpointContext(OSContextGenerator): + """Internal endpoint context. + + This context provides the endpoint type used for communication between + services e.g. between Nova and Cinder internally. Openstack uses Public + endpoints by default so this allows admins to optionally use internal + endpoints. + """ + def __call__(self): + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class VolumeAPIContext(InternalEndpointContext): + """Volume API context. + + This context provides information regarding the volume endpoint to use + when communicating between services. It determines which version of the + API is appropriate for use. + + This value will be determined in the resulting context dictionary + returned from calling the VolumeAPIContext object. Information provided + by this context is as follows: + + volume_api_version: the volume api version to use, currently + 'v2' or 'v3' + volume_catalog_info: the information to use for a cinder client + configuration that consumes API endpoints from the keystone + catalog. This is defined as the type:name:endpoint_type string. + """ + # FIXME(wolsen) This implementation is based on the provider being able + # to specify the package version to check but does not guarantee that the + # volume service api version selected is available. In practice, it is + # quite likely the volume service *is* providing the v3 volume service. + # This should be resolved when the service-discovery spec is implemented. + def __init__(self, pkg): + """ + Creates a new VolumeAPIContext for use in determining which version + of the Volume API should be used for communication. A package codename + should be supplied for determining the currently installed OpenStack + version. + + :param pkg: the package codename to use in order to determine the + component version (e.g. nova-common). See + charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. + """ + super(VolumeAPIContext, self).__init__() + self._ctxt = None + if not pkg: + raise ValueError('package name must be provided in order to ' + 'determine current OpenStack version.') + self.pkg = pkg + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """Determines the Volume API endpoint information. + + Determines the appropriate version of the API that should be used + as well as the catalog_info string that would be supplied. Returns + a dict containing the volume_api_version and the volume_catalog_info. + """ + rel = os_release(self.pkg) + version = '2' + if CompareOpenStackReleases(rel) >= 'pike': + version = '3' + + service_type = 'volumev{version}'.format(version=version) + service_name = 'cinderv{version}'.format(version=version) + endpoint_type = 'publicURL' + if config('use-internal-endpoints'): + endpoint_type = 'internalURL' + catalog_info = '{type}:{name}:{endpoint}'.format( + type=service_type, name=service_name, endpoint=endpoint_type) + + return { + 'volume_api_version': version, + 'volume_catalog_info': catalog_info, + } + + def __call__(self): + return self.ctxt + + +class AppArmorContext(OSContextGenerator): + """Base class for apparmor contexts.""" + + def __init__(self, profile_name=None): + self._ctxt = None + self.aa_profile = profile_name + self.aa_utils_packages = ['apparmor-utils'] + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """ + Validate aa-profile-mode settings is disable, enforce, or complain. + + :return ctxt: Dictionary of the apparmor profile or None + """ + if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: + ctxt = {'aa_profile_mode': config('aa-profile-mode'), + 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} + if self.aa_profile: + ctxt['aa_profile'] = self.aa_profile + else: + ctxt = None + return ctxt + + def __call__(self): + return self.ctxt + + def install_aa_utils(self): + """ + Install packages required for apparmor configuration. + """ + log("Installing apparmor utils.") + ensure_packages(self.aa_utils_packages) + + def manually_disable_aa_profile(self): + """ + Manually disable an apparmor profile. + + If aa-profile-mode is set to disabled (default) this is required as the + template has been written but apparmor is yet unaware of the profile + and aa-disable aa-profile fails. Without this the profile would kick + into enforce mode on the next service restart. + + """ + profile_path = '/etc/apparmor.d' + disable_path = '/etc/apparmor.d/disable' + if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): + os.symlink(os.path.join(profile_path, self.aa_profile), + os.path.join(disable_path, self.aa_profile)) + + def setup_aa_profile(self): + """ + Setup an apparmor profile. + The ctxt dictionary will contain the apparmor profile mode and + the apparmor profile name. + Makes calls out to aa-disable, aa-complain, or aa-enforce to setup + the apparmor profile. + """ + self() + if not self.ctxt: + log("Not enabling apparmor Profile") + return + self.install_aa_utils() + cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] + cmd.append(self.ctxt['aa_profile']) + log("Setting up the apparmor profile for {} in {} mode." + "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) + try: + check_call(cmd) + except CalledProcessError as e: + # If aa-profile-mode is set to disabled (default) manual + # disabling is required as the template has been written but + # apparmor is yet unaware of the profile and aa-disable aa-profile + # fails. If aa-disable learns to read profile files first this can + # be removed. + if self.ctxt['aa_profile_mode'] == 'disable': + log("Manually disabling the apparmor profile for {}." + "".format(self.ctxt['aa_profile'])) + self.manually_disable_aa_profile() + return + status_set('blocked', "Apparmor profile {} failed to be set to {}." + "".format(self.ctxt['aa_profile'], + self.ctxt['aa_profile_mode'])) + raise e + + +class MemcacheContext(OSContextGenerator): + """Memcache context + + This context provides options for configuring a local memcache client and + server for both IPv4 and IPv6 + """ + + def __init__(self, package=None): + """ + @param package: Package to examine to extrapolate OpenStack release. + Used when charms have no openstack-origin config + option (ie subordinates) + """ + self.package = package + + def __call__(self): + ctxt = {} + ctxt['use_memcache'] = enable_memcache(package=self.package) + if ctxt['use_memcache']: + # Trusty version of memcached does not support ::1 as a listen + # address so use host file entry instead + release = lsb_release()['DISTRIB_CODENAME'].lower() + if is_ipv6_disabled(): + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '127.0.0.1' + else: + ctxt['memcache_server'] = 'localhost' + ctxt['memcache_server_formatted'] = '127.0.0.1' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = '{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + else: + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + return ctxt + + +class EnsureDirContext(OSContextGenerator): + ''' + Serves as a generic context to create a directory as a side-effect. + + Useful for software that supports drop-in files (.d) in conjunction + with config option-based templates. Examples include: + * OpenStack oslo.policy drop-in files; + * systemd drop-in config files; + * other software that supports overriding defaults with .d files + + Another use-case is when a subordinate generates a configuration for + primary to render in a separate directory. + + Some software requires a user to create a target directory to be + scanned for drop-in files with a specific format. This is why this + context is needed to do that before rendering a template. + ''' + + def __init__(self, dirname, **kwargs): + '''Used merely to ensure that a given directory exists.''' + self.dirname = dirname + self.kwargs = kwargs + + def __call__(self): + mkdir(self.dirname, **self.kwargs) + return {} + + +class VersionsContext(OSContextGenerator): + """Context to return the openstack and operating system versions. + + """ + def __init__(self, pkg='python-keystone'): + """Initialise context. + + :param pkg: Package to extrapolate openstack version from. + :type pkg: str + """ + self.pkg = pkg + + def __call__(self): + ostack = os_release(self.pkg) + osystem = lsb_release()['DISTRIB_CODENAME'].lower() + return { + 'openstack_release': ostack, + 'operating_system_release': osystem} + + +class LogrotateContext(OSContextGenerator): + """Common context generator for logrotate.""" + + def __init__(self, location, interval, count): + """ + :param location: Absolute path for the logrotate config file + :type location: str + :param interval: The interval for the rotations. Valid values are + 'daily', 'weekly', 'monthly', 'yearly' + :type interval: str + :param count: The logrotate count option configures the 'count' times + the log files are being rotated before being + :type count: int + """ + self.location = location + self.interval = interval + self.count = 'rotate {}'.format(count) + + def __call__(self): + ctxt = { + 'logrotate_logs_location': self.location, + 'logrotate_interval': self.interval, + 'logrotate_count': self.count, + } + return ctxt + + +class HostInfoContext(OSContextGenerator): + """Context to provide host information.""" + + def __init__(self, use_fqdn_hint_cb=None): + """Initialize HostInfoContext + + :param use_fqdn_hint_cb: Callback whose return value used to populate + `use_fqdn_hint` + :type use_fqdn_hint_cb: Callable[[], bool] + """ + # Store callback used to get hint for whether FQDN should be used + + # Depending on the workload a charm manages, the use of FQDN vs. + # shortname may be a deploy-time decision, i.e. behaviour can not + # change on charm upgrade or post-deployment configuration change. + + # The hint is passed on as a flag in the context to allow the decision + # to be made in the Jinja2 configuration template. + self.use_fqdn_hint_cb = use_fqdn_hint_cb + + def _get_canonical_name(self, name=None): + """Get the official FQDN of the host + + The implementation of ``socket.getfqdn()`` in the standard Python + library does not exhaust all methods of getting the official name + of a host ref Python issue https://bugs.python.org/issue5004 + + This function mimics the behaviour of a call to ``hostname -f`` to + get the official FQDN but returns an empty string if it is + unsuccessful. + + :param name: Shortname to get FQDN on + :type name: Optional[str] + :returns: The official FQDN for host or empty string ('') + :rtype: str + """ + name = name or socket.gethostname() + fqdn = '' + + if six.PY2: + exc = socket.error + else: + exc = OSError + + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) + except exc: + pass + else: + for addr in addrs: + if addr[3]: + if '.' in addr[3]: + fqdn = addr[3] + break + return fqdn + + def __call__(self): + name = socket.gethostname() + ctxt = { + 'host_fqdn': self._get_canonical_name(name) or name, + 'host': name, + 'use_fqdn_hint': ( + self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) + } + return ctxt + + +def validate_ovs_use_veth(*args, **kwargs): + """Validate OVS use veth setting for dhcp agents + + The ovs_use_veth setting is considered immutable as it will break existing + deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It + turns out this is no longer necessary. Ideally, all new deployments would + have this set to False. + + This function validates that the config value does not conflict with + previously deployed settings in dhcp_agent.ini. + + See LP Bug#1831935 for details. + + :returns: Status state and message + :rtype: Union[(None, None), (string, string)] + """ + existing_ovs_use_veth = ( + DHCPAgentContext.get_existing_ovs_use_veth()) + config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() + + # Check settings are set and not None + if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: + # Check for mismatch between existing config ini and juju config + if existing_ovs_use_veth != config_ovs_use_veth: + # Stop the line to avoid breakage + msg = ( + "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " + "does not match the juju config setting, {}. This may lead to " + "VMs being unable to receive a DHCP IP. Either change the " + "juju config setting or dhcp agents may need to be recreated." + .format(existing_ovs_use_veth, config_ovs_use_veth)) + log(msg, ERROR) + return ( + "blocked", + "Mismatched existing and configured ovs-use-veth. See log.") + + # Everything is OK + return None, None + + +class DHCPAgentContext(OSContextGenerator): + + def __call__(self): + """Return the DHCPAGentContext. + + Return all DHCP Agent INI related configuration. + ovs unit is attached to (as a subordinate) and the 'dns_domain' from + the neutron-plugin-api relations (if one is set). + + :returns: Dictionary context + :rtype: Dict + """ + + ctxt = {} + dnsmasq_flags = config('dnsmasq-flags') + if dnsmasq_flags: + ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) + ctxt['dns_servers'] = config('dns-servers') + + neutron_api_settings = NeutronAPIContext()() + + ctxt['debug'] = config('debug') + ctxt['instance_mtu'] = config('instance-mtu') + ctxt['ovs_use_veth'] = self.get_ovs_use_veth() + + ctxt['enable_metadata_network'] = config('enable-metadata-network') + ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') + + if neutron_api_settings.get('dns_domain'): + ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') + + # Override user supplied config for these plugins as these settings are + # mandatory + if config('plugin') in ['nvp', 'nsx', 'n1kv']: + ctxt['enable_metadata_network'] = True + ctxt['enable_isolated_metadata'] = True + + return ctxt + + @staticmethod + def get_existing_ovs_use_veth(): + """Return existing ovs_use_veth setting from dhcp_agent.ini. + + :returns: Boolean value of existing ovs_use_veth setting or None + :rtype: Optional[Bool] + """ + DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" + existing_ovs_use_veth = None + # If there is a dhcp_agent.ini file read the current setting + if os.path.isfile(DHCP_AGENT_INI): + # config_ini does the right thing and returns None if the setting is + # commented. + existing_ovs_use_veth = ( + config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) + # Convert to Bool if necessary + if isinstance(existing_ovs_use_veth, six.string_types): + return bool_from_string(existing_ovs_use_veth) + return existing_ovs_use_veth + + @staticmethod + def parse_ovs_use_veth(): + """Parse the ovs-use-veth config setting. + + Parse the string config setting for ovs-use-veth and return a boolean + or None. + + bool_from_string will raise a ValueError if the string is not falsy or + truthy. + + :raises: ValueError for invalid input + :returns: Boolean value of ovs-use-veth or None + :rtype: Optional[Bool] + """ + _config = config("ovs-use-veth") + # An unset parameter returns None. Just in case we will also check for + # an empty string: "". Ironically, (the problem we are trying to avoid) + # "False" returns True and "" returns False. + if _config is None or not _config: + # Return None + return + # bool_from_string handles many variations of true and false strings + # as well as upper and lowercases including: + # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] + return bool_from_string(_config) + + def get_ovs_use_veth(self): + """Return correct ovs_use_veth setting for use in dhcp_agent.ini. + + Get the right value from config or existing dhcp_agent.ini file. + Existing has precedence. Attempt to default to "False" without + disrupting existing deployments. Handle existing deployments and + upgrades safely. See LP Bug#1831935 + + :returns: Value to use for ovs_use_veth setting + :rtype: Bool + """ + _existing = self.get_existing_ovs_use_veth() + if _existing is not None: + return _existing + + _config = self.parse_ovs_use_veth() + if _config is None: + # New better default + return False + else: + return _config + + +EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) + + +def resolve_pci_from_mapping_config(config_key): + """Resolve local PCI devices from MAC addresses in mapping config. + + Note that this function keeps record of mac->PCI address lookups + in the local unit db as the devices will disappaear from the system + once bound. + + :param config_key: Configuration option key to parse data from + :type config_key: str + :returns: PCI device address to Tuple(entity, mac) map + :rtype: collections.OrderedDict[str,Tuple[str,str]] + """ + devices = pci.PCINetDevices() + resolved_devices = collections.OrderedDict() + db = kv() + # Note that ``parse_data_port_mappings`` returns Dict regardless of input + for mac, entity in parse_data_port_mappings(config(config_key)).items(): + pcidev = devices.get_device_from_mac(mac) + if pcidev: + # NOTE: store mac->pci allocation as post binding + # it disappears from PCIDevices. + db.set(mac, pcidev.pci_address) + db.flush() + + pci_address = db.get(mac) + if pci_address: + resolved_devices[pci_address] = EntityMac(entity, mac) + + return resolved_devices + + +class DPDKDeviceContext(OSContextGenerator): + + def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): + """Initialize DPDKDeviceContext. + + :param driver_key: Key to use when retrieving driver config. + :type driver_key: str + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.driver_key = driver_key or 'dpdk-driver' + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] + """ + driver = config(self.driver_key) + if driver is None: + return {} + # Resolve PCI devices for both directly used devices (_bridges) + # and devices for use in dpdk bonds (_bonds) + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return {'devices': pci_devices, + 'driver': driver} + + +class OVSDPDKDeviceContext(OSContextGenerator): + + def __init__(self, bridges_key=None, bonds_key=None): + """Initialize OVSDPDKDeviceContext. + + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + @staticmethod + def _parse_cpu_list(cpulist): + """Parses a linux cpulist for a numa node + + :returns: list of cores + :rtype: List[int] + """ + cores = [] + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + else: + cores.append(int(cpu_range)) + return cores + + def _numa_node_cores(self): + """Get map of numa node -> cpu core + + :returns: map of numa node -> cpu core + :rtype: Dict[str,List[int]] + """ + nodes = {} + node_regex = '/sys/devices/system/node/node*' + for node in glob.glob(node_regex): + index = node.lstrip('/sys/devices/system/node/node') + with open(os.path.join(node, 'cpulist')) as cpulist: + nodes[index] = self._parse_cpu_list(cpulist.read().strip()) + return nodes + + def cpu_mask(self): + """Get hex formatted CPU mask + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit. + :returns: hex formatted CPU mask + :rtype: str + """ + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') + + def socket_memory(self): + """Formatted list of socket memory configuration per NUMA node + + :returns: socket memory configuration per NUMA node + :rtype: str + """ + sm_size = config('dpdk-socket-memory') + node_regex = '/sys/devices/system/node/node*' + mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + if mem_list: + return ','.join(mem_list) + else: + return str(sm_size) + + def devices(self): + """List of PCI devices for use by DPDK + + :returns: List of PCI devices for use by DPDK + :rtype: collections.OrderedDict[str,str] + """ + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return pci_devices + + def _formatted_whitelist(self, flag): + """Flag formatted list of devices to whitelist + + :param flag: flag format to use + :type flag: str + :rtype: str + """ + whitelist = [] + for device in self.devices(): + whitelist.append(flag.format(device=device)) + return ' '.join(whitelist) + + def device_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the old style '-w' flag + + :returns: devices to whitelist prefixed by '-w ' + :rtype: str + """ + return self._formatted_whitelist('-w {device}') + + def pci_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the new style '--pci-whitelist' flag + + :returns: devices to whitelist prefixed by '--pci-whitelist ' + :rtype: str + """ + return self._formatted_whitelist('--pci-whitelist {device}') + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[bool,str]] + """ + ctxt = {} + whitelist = self.device_whitelist() + if whitelist: + ctxt['dpdk_enabled'] = config('enable-dpdk') + ctxt['device_whitelist'] = self.device_whitelist() + ctxt['socket_memory'] = self.socket_memory() + ctxt['cpu_mask'] = self.cpu_mask() + return ctxt + + +class BridgePortInterfaceMap(object): + """Build a map of bridge ports and interaces from charm configuration. + + NOTE: the handling of this detail in the charm is pre-deprecated. + + The long term goal is for network connectivity detail to be modelled in + the server provisioning layer (such as MAAS) which in turn will provide + a Netplan YAML description that will be used to drive Open vSwitch. + + Until we get to that reality the charm will need to configure this + detail based on application level configuration options. + + There is a established way of mapping interfaces to ports and bridges + in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we + will carry that forward. + + The relationship between bridge, port and interface(s). + +--------+ + | bridge | + +--------+ + | + +----------------+ + | port aka. bond | + +----------------+ + | | + +-+ +-+ + |i| |i| + |n| |n| + |t| |t| + |0| |N| + +-+ +-+ + """ + class interface_type(enum.Enum): + """Supported interface types. + + Supported interface types can be found in the ``iface_types`` column + in the ``Open_vSwitch`` table on a running system. + """ + dpdk = 'dpdk' + internal = 'internal' + system = 'system' + + def __str__(self): + """Return string representation of value. + + :returns: string representation of value. + :rtype: str + """ + return self.value + + def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, + global_mtu=None): + """Initialize map. + + :param bridges_key: Name of bridge:interface/port map config key + (default: 'data-port') + :type bridges_key: Optional[str] + :param bonds_key: Name of port-name:interface map config key + (default: 'dpdk-bond-mappings') + :type bonds_key: Optional[str] + :param enable_dpdk_key: Name of DPDK toggle config key + (default: 'enable-dpdk') + :type enable_dpdk_key: Optional[str] + :param global_mtu: Set a MTU on all interfaces at map initialization. + + The default is to have Open vSwitch get this from the underlying + interface as set up by bare metal provisioning. + + Note that you can augment the MTU on an individual interface basis + like this: + + ifdatamap = bpi.get_ifdatamap(bridge, port) + ifdatamap = { + port: { + **ifdata, + **{'mtu-request': my_individual_mtu_map[port]}, + } + for port, ifdata in ifdatamap.items() + } + :type global_mtu: Optional[int] + """ + bridges_key = bridges_key or 'data-port' + bonds_key = bonds_key or 'dpdk-bond-mappings' + enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' + self._map = collections.defaultdict( + lambda: collections.defaultdict(dict)) + self._ifname_mac_map = collections.defaultdict(list) + self._mac_ifname_map = {} + self._mac_pci_address_map = {} + + # First we iterate over the list of physical interfaces visible to the + # system and update interface name to mac and mac to interface name map + for ifname in list_nics(): + if not is_phy_iface(ifname): + continue + mac = get_nic_hwaddr(ifname) + self._ifname_mac_map[ifname] = [mac] + self._mac_ifname_map[mac] = ifname + + # check if interface is part of a linux bond + _bond_name = get_bond_master(ifname) + if _bond_name and _bond_name != ifname: + log('Add linux bond "{}" to map for physical interface "{}" ' + 'with mac "{}".'.format(_bond_name, ifname, mac), + level=DEBUG) + # for bonds we want to be able to get a list of the mac + # addresses for the physical interfaces the bond is made up of. + if self._ifname_mac_map.get(_bond_name): + self._ifname_mac_map[_bond_name].append(mac) + else: + self._ifname_mac_map[_bond_name] = [mac] + + # In light of the pre-deprecation notice in the docstring of this + # class we will expose the ability to configure OVS bonds as a + # DPDK-only feature, but generally use the data structures internally. + if config(enable_dpdk_key): + # resolve PCI address of interfaces listed in the bridges and bonds + # charm configuration options. Note that for already bound + # interfaces the helper will retrieve MAC address from the unit + # KV store as the information is no longer available in sysfs. + _pci_bridge_mac = resolve_pci_from_mapping_config( + bridges_key) + _pci_bond_mac = resolve_pci_from_mapping_config( + bonds_key) + + for pci_address, bridge_mac in _pci_bridge_mac.items(): + if bridge_mac.mac in self._mac_ifname_map: + # if we already have the interface name in our map it is + # visible to the system and therefore not bound to DPDK + continue + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[ifname] = [bridge_mac.mac] + self._mac_ifname_map[bridge_mac.mac] = ifname + self._mac_pci_address_map[bridge_mac.mac] = pci_address + + for pci_address, bond_mac in _pci_bond_mac.items(): + # for bonds we want to be able to get a list of macs from + # the bond name and also get at the interface name made up + # of the hash of the PCI address + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) + self._mac_ifname_map[bond_mac.mac] = ifname + self._mac_pci_address_map[bond_mac.mac] = pci_address + + config_bridges = config(bridges_key) or '' + for bridge, ifname_or_mac in ( + pair.split(':', 1) + for pair in config_bridges.split()): + if ':' in ifname_or_mac: + try: + ifname = self.ifname_from_mac(ifname_or_mac) + except KeyError: + # The interface is destined for a different unit in the + # deployment. + continue + macs = [ifname_or_mac] + else: + ifname = ifname_or_mac + macs = self.macs_from_ifname(ifname_or_mac) + + portname = ifname + for mac in macs: + try: + pci_address = self.pci_address_from_mac(mac) + iftype = self.interface_type.dpdk + ifname = self.ifname_from_mac(mac) + except KeyError: + pci_address = None + iftype = self.interface_type.system + + self.add_interface( + bridge, portname, ifname, iftype, pci_address, global_mtu) + + if not macs: + # We have not mapped the interface and it is probably some sort + # of virtual interface. Our user have put it in the config with + # a purpose so let's carry out their wish. LP: #1884743 + log('Add unmapped interface from config: name "{}" bridge "{}"' + .format(ifname, bridge), + level=DEBUG) + self.add_interface( + bridge, ifname, ifname, self.interface_type.system, None, + global_mtu) + + def __getitem__(self, key): + """Provide a Dict-like interface, get value of item. + + :param key: Key to look up value from. + :type key: any + :returns: Value + :rtype: any + """ + return self._map.__getitem__(key) + + def __iter__(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.__iter__() + + def __len__(self): + """Provide a Dict-like interface, measure the length of internal map. + + :returns: Length + :rtype: int + """ + return len(self._map) + + def items(self): + """Provide a Dict-like interface, iterate over items. + + :returns: Key Value pairs + :rtype: Iterator[any, any] + """ + return self._map.items() + + def keys(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.keys() + + def ifname_from_mac(self, mac): + """ + :returns: Name of interface + :rtype: str + :raises: KeyError + """ + return (get_bond_master(self._mac_ifname_map[mac]) or + self._mac_ifname_map[mac]) + + def macs_from_ifname(self, ifname): + """ + :returns: List of hardware address (MAC) of interface + :rtype: List[str] + :raises: KeyError + """ + return self._ifname_mac_map[ifname] + + def pci_address_from_mac(self, mac): + """ + :param mac: Hardware address (MAC) of interface + :type mac: str + :returns: PCI address of device associated with mac + :rtype: str + :raises: KeyError + """ + return self._mac_pci_address_map[mac] + + def add_interface(self, bridge, port, ifname, iftype, + pci_address, mtu_request): + """Add an interface to the map. + + :param bridge: Name of bridge on which the bond will be added + :type bridge: str + :param port: Name of port which will represent the bond on bridge + :type port: str + :param ifname: Name of interface that will make up the bonded port + :type ifname: str + :param iftype: Type of interface + :type iftype: BridgeBondMap.interface_type + :param pci_address: PCI address of interface + :type pci_address: Optional[str] + :param mtu_request: MTU to request for interface + :type mtu_request: Optional[int] + """ + self._map[bridge][port][ifname] = { + 'type': str(iftype), + } + if pci_address: + self._map[bridge][port][ifname].update({ + 'pci-address': pci_address, + }) + if mtu_request is not None: + self._map[bridge][port][ifname].update({ + 'mtu-request': str(mtu_request) + }) + + def get_ifdatamap(self, bridge, port): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bridge: Name of bridge on which the port will be added + :type bridge: str + :param port: Name of port which will represent one or more interfaces + :type port: str + """ + for _bridge, _ports in self.items(): + for _port, _interfaces in _ports.items(): + if _bridge == bridge and _port == port: + ifdatamap = {} + for name, data in _interfaces.items(): + ifdatamap.update({ + name: { + 'type': data['type'], + }, + }) + if data.get('mtu-request') is not None: + ifdatamap[name].update({ + 'mtu_request': data['mtu-request'], + }) + if data.get('pci-address'): + ifdatamap[name].update({ + 'options': { + 'dpdk-devargs': data['pci-address'], + }, + }) + return ifdatamap + + +class BondConfig(object): + """Container and helpers for bond configuration options. + + Data is put into a dictionary and a convenient config get interface is + provided. + """ + + DEFAULT_LACP_CONFIG = { + 'mode': 'balance-tcp', + 'lacp': 'active', + 'lacp-time': 'fast' + } + ALL_BONDS = 'ALL_BONDS' + + BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] + BOND_LACP = ['active', 'passive', 'off'] + BOND_LACP_TIME = ['fast', 'slow'] + + def __init__(self, config_key=None): + """Parse specified configuration option. + + :param config_key: Configuration key to retrieve data from + (default: ``dpdk-bond-config``) + :type config_key: Optional[str] + """ + self.config_key = config_key or 'dpdk-bond-config' + + self.lacp_config = { + self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) + } + + lacp_config = config(self.config_key) + if lacp_config: + lacp_config_map = lacp_config.split() + for entry in lacp_config_map: + bond, entry = entry.partition(':')[0:3:2] + if not bond: + bond = self.ALL_BONDS + + mode, entry = entry.partition(':')[0:3:2] + if not mode: + mode = self.DEFAULT_LACP_CONFIG['mode'] + assert mode in self.BOND_MODES, \ + "Bond mode {} is invalid".format(mode) + + lacp, entry = entry.partition(':')[0:3:2] + if not lacp: + lacp = self.DEFAULT_LACP_CONFIG['lacp'] + assert lacp in self.BOND_LACP, \ + "Bond lacp {} is invalid".format(lacp) + + lacp_time, entry = entry.partition(':')[0:3:2] + if not lacp_time: + lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] + assert lacp_time in self.BOND_LACP_TIME, \ + "Bond lacp-time {} is invalid".format(lacp_time) + + self.lacp_config[bond] = { + 'mode': mode, + 'lacp': lacp, + 'lacp-time': lacp_time + } + + def get_bond_config(self, bond): + """Get the LACP configuration for a bond + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Dict[str,str]] + """ + return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) + + def get_ovs_portdata(self, bond): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Union[str,Dict[str,str]]] + """ + bond_config = self.get_bond_config(bond) + return { + 'bond_mode': bond_config['mode'], + 'lacp': bond_config['lacp'], + 'other_config': { + 'lacp-time': bond_config['lacp-time'], + }, + } + + +class SRIOVContext(OSContextGenerator): + """Provide context for configuring SR-IOV devices.""" + + class sriov_config_mode(enum.Enum): + """Mode in which SR-IOV is configured. + + The configuration option identified by the ``numvfs_key`` parameter + is overloaded and defines in which mode the charm should interpret + the other SR-IOV-related configuration options. + """ + auto = 'auto' + blanket = 'blanket' + explicit = 'explicit' + + def _determine_numvfs(self, device, sriov_numvfs): + """Determine number of Virtual Functions (VFs) configured for device. + + :param device: Object describing a PCI Network interface card (NIC)/ + :type device: sriov_netplan_shim.pci.PCINetDevice + :param sriov_numvfs: Number of VFs requested for blanket configuration. + :type sriov_numvfs: int + :returns: Number of VFs to configure for device + :rtype: Optional[int] + """ + + def _get_capped_numvfs(requested): + """Get a number of VFs that does not exceed individual card limits. + + Depending and make and model of NIC the number of VFs supported + vary. Requesting more VFs than a card support would be a fatal + error, cap the requested number at the total number of VFs each + individual card supports. + + :param requested: Number of VFs requested + :type requested: int + :returns: Number of VFs allowed + :rtype: int + """ + actual = min(int(requested), int(device.sriov_totalvfs)) + if actual < int(requested): + log('Requested VFs ({}) too high for device {}. Falling back ' + 'to value supprted by device: {}' + .format(requested, device.interface_name, + device.sriov_totalvfs), + level=WARNING) + return actual + + if self._sriov_config_mode == self.sriov_config_mode.auto: + # auto-mode + # + # If device mapping configuration is present, return information + # on cards with mapping. + # + # If no device mapping configuration is present, return information + # for all cards. + # + # The maximum number of VFs supported by card will be used. + if (self._sriov_mapped_devices and + device.interface_name not in self._sriov_mapped_devices): + log('SR-IOV configured in auto mode: No device mapping for {}' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs(device.sriov_totalvfs) + elif self._sriov_config_mode == self.sriov_config_mode.blanket: + # blanket-mode + # + # User has specified a number of VFs that should apply to all + # cards with support for VFs. + return _get_capped_numvfs(sriov_numvfs) + elif self._sriov_config_mode == self.sriov_config_mode.explicit: + # explicit-mode + # + # User has given a list of interface names and associated number of + # VFs + if device.interface_name not in self._sriov_config_devices: + log('SR-IOV configured in explicit mode: No device:numvfs ' + 'pair for device {}, skipping.' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs( + self._sriov_config_devices[device.interface_name]) + else: + raise RuntimeError('This should not be reached') + + def __init__(self, numvfs_key=None, device_mappings_key=None): + """Initialize map from PCI devices and configuration options. + + :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') + :type numvfs_key: Optional[str] + :param device_mappings_key: Config key for device mappings + (default: 'sriov-device-mappings') + :type device_mappings_key: Optional[str] + :raises: RuntimeError + """ + numvfs_key = numvfs_key or 'sriov-numvfs' + device_mappings_key = device_mappings_key or 'sriov-device-mappings' + + devices = pci.PCINetDevices() + charm_config = config() + sriov_numvfs = charm_config.get(numvfs_key) or '' + sriov_device_mappings = charm_config.get(device_mappings_key) or '' + + # create list of devices from sriov_device_mappings config option + self._sriov_mapped_devices = [ + pair.split(':', 1)[1] + for pair in sriov_device_mappings.split() + ] + + # create map of device:numvfs from sriov_numvfs config option + self._sriov_config_devices = { + ifname: numvfs for ifname, numvfs in ( + pair.split(':', 1) for pair in sriov_numvfs.split() + if ':' in sriov_numvfs) + } + + # determine configuration mode from contents of sriov_numvfs + if sriov_numvfs == 'auto': + self._sriov_config_mode = self.sriov_config_mode.auto + elif sriov_numvfs.isdigit(): + self._sriov_config_mode = self.sriov_config_mode.blanket + elif ':' in sriov_numvfs: + self._sriov_config_mode = self.sriov_config_mode.explicit + else: + raise RuntimeError('Unable to determine mode of SR-IOV ' + 'configuration.') + + self._map = { + device.interface_name: self._determine_numvfs(device, sriov_numvfs) + for device in devices.pci_devices + if device.sriov and + self._determine_numvfs(device, sriov_numvfs) is not None + } + + def __call__(self): + """Provide SR-IOV context. + + :returns: Map interface name: min(configured, max) virtual functions. + Example: + { + 'eth0': 16, + 'eth1': 32, + 'eth2': 64, + } + :rtype: Dict[str,int] + """ + return self._map diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/exceptions.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..f85ae4f4cdbb6567cbdd896338bf88fbf3c9c0ec --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/exceptions.py @@ -0,0 +1,21 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class OSContextError(Exception): + """Raised when an error occurs during context generation. + + This exception is principally used in contrib.openstack.context + """ + pass diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9df5f746fbdf5491c640a77df907b71817cbc5af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/check_haproxy.sh new file mode 100755 index 0000000000000000000000000000000000000000..1df55db4816ec51d6732d68ea0a1e25e6f7b116e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -0,0 +1,34 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +CRITICAL=0 +NOTACTIVE='' +LOGFILE=/var/log/nagios/check_haproxy.log +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') + +typeset -i N_INSTANCES=0 +for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) +do + N_INSTANCES=N_INSTANCES+1 + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') + if [ $? != 0 ]; then + date >> $LOGFILE + echo $output >> $LOGFILE + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 + CRITICAL=1 + NOTACTIVE="${NOTACTIVE} $appserver" + fi +done + +if [ $CRITICAL = 1 ]; then + echo "CRITICAL:${NOTACTIVE}" + exit 2 +fi + +echo "OK: All haproxy instances ($N_INSTANCES) looking good" +exit 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh new file mode 100755 index 0000000000000000000000000000000000000000..91ce0246e66115994c3f518b36448f70100ecfc7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +# These should be config options at some stage +CURRQthrsh=0 +MAXQthrsh=100 + +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') + +HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) + +for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') +do + CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) + MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) + + if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then + echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" + exit 2 + fi +done + +echo "OK: All haproxy queue depths looking good" +exit 0 + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ha/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b088de84e4b288b551603816fc10eebfa7b1503 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ha/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a5cbdf535d495a09a0b91f41fdda09862e34140d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,348 @@ +# Copyright 2014-2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import hashlib +import json + +import re + +from charmhelpers.core.hookenv import ( + expected_related_units, + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, +) + +from charmhelpers.core.host import ( + lsb_release +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. + + @returns boolean + """ + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data + @returns dict: json encoded data for use with relation_set + """ + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { + 'cl_{}_haproxy'.format(service): _haproxy_res + } + + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname', 'os-access-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine endpoint_type name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_data['groups'] = { + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) + } + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) + + +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + """ + cluster_config = get_hacluster_config() + vip_group = [] + vips_to_delete = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface, netmask, fallback = get_vip_settings(vip) + + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' + if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) + + vip_group.append(vip_key) + + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + + if len(vip_group) >= 1: + key = VIP_GROUP_NAME.format(service=service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ip.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 0000000000000000000000000000000000000000..723aebc172e94a5b00c385d9861dd4d45c1bc753 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,197 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + NoNetworkBinding, + config, + unit_get, + service_name, + network_get_primary_address, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, + resolve_network_cidr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' +ACCESS = 'access' + +ADDRESS_MAP = { + PUBLIC: { + 'binding': 'public', + 'config': 'os-public-network', + 'fallback': 'public-address', + 'override': 'os-public-hostname', + }, + INTERNAL: { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, + ADMIN: { + 'binding': 'admin', + 'config': 'os-admin-network', + 'fallback': 'private-address', + 'override': 'os-admin-hostname', + }, + ACCESS: { + 'binding': 'access', + 'config': 'access-network', + 'fallback': 'private-address', + 'override': 'os-access-hostname', + }, +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = _get_scheme(configs) + + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + + return '%s://%s' % (scheme, address) + + +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + +def resolve_address(endpoint_type=PUBLIC, override=True): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured, or a Juju 2.0 extra-binding has been used. + + :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not + """ + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] + clustered = is_clustered() + + if clustered and vips: + if net_addr: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except (NotImplementedError, NoNetworkBinding): + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except (NotImplementedError, NoNetworkBinding): + resolved_address = fallback_addr + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address + + +def get_vip_in_network(network): + matching_vip = None + vips = config('vip') + if vips: + for vip in vips.split(): + if is_address_in_network(network, vip): + matching_vip = vip + return matching_vip diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/keystone.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e02ccd90155710901e444482b589aa264158e6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/neutron.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 0000000000000000000000000000000000000000..fb5607f3e73159d90236b2d7a4051aa82119e889 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,359 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Various utilies for dealing with Neutron and the renaming from Quantum. + +import six +from subprocess import check_output + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import ( + os_release, + CompareOpenStackReleases, +) + + +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + return 'linux-headers-%s' % kver + + +QUANTUM_CONF_DIR = '/etc/quantum' + + +def kernel_version(): + """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + kver = kver.split('.') + return (int(kver[0]), int(kver[1])) + + +def determine_dkms_package(): + """ Determine which DKMS package should be used based on kernel version """ + # NOTE: 3.13 kernels have support for GRE and VXLAN native + if kernel_version() >= (3, 13): + return [] + else: + return [headers_package(), 'openvswitch-datapath-dkms'] + + +# legacy + + +def quantum_plugins(): + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['quantum-plugin-openvswitch-agent']], + 'server_packages': ['quantum-server', + 'quantum-plugin-openvswitch'], + 'server_services': ['quantum-server'] + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['quantum-server', + 'quantum-plugin-nicira'], + 'server_services': ['quantum-server'] + } + } + + +NEUTRON_CONF_DIR = '/etc/neutron' + + +def neutron_plugins(): + release = os_release('nova-common') + plugins = { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['neutron-plugin-openvswitch-agent']], + 'server_packages': ['neutron-server', + 'neutron-plugin-openvswitch'], + 'server_services': ['neutron-server'] + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-nicira'], + 'server_services': ['neutron-server'] + }, + 'nsx': { + 'config': '/etc/neutron/plugins/vmware/nsx.ini', + 'driver': 'vmware', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-vmware'], + 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [], + 'services': [], + 'packages': [determine_dkms_package(), + ['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd'], + 'packages': [determine_dkms_package(), + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] + }, + 'vsp': { + 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', + 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], + 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' + '.plumgrid_plugin.NeutronPluginPLUMgridV2'), + 'contexts': [], + 'services': [], + 'packages': ['plumgrid-lxc', + 'iovisor-dkms'], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] + }, + 'midonet': { + 'config': '/etc/neutron/plugins/midonet/midonet.ini', + 'driver': 'midonet.neutron.plugin.MidonetPluginV2', + 'contexts': [], + 'services': [], + 'packages': [determine_dkms_package()], + 'server_packages': ['neutron-server', + 'python-neutron-plugin-midonet'], + 'server_services': ['neutron-server'] + } + } + if CompareOpenStackReleases(release) >= 'icehouse': + # NOTE: patch in ml2 plugin for icehouse onwards + plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['ovs']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + # NOTE: patch in vmware renames nvp->nsx for icehouse onwards + plugins['nvp'] = plugins['nsx'] + if CompareOpenStackReleases(release) >= 'kilo': + plugins['midonet']['driver'] = ( + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + if CompareOpenStackReleases(release) >= 'liberty': + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') + plugins['plumgrid']['driver'] = ( + 'networking_plumgrid.neutron.plugins' + '.plugin.NeutronPluginPLUMgridV2') + plugins['plumgrid']['server_packages'].remove( + 'neutron-plugin-plumgrid') + if CompareOpenStackReleases(release) >= 'mitaka': + plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') + plugins['nsx']['server_packages'].append('python-vmware-nsx') + plugins['nsx']['config'] = '/etc/neutron/nsx.ini' + plugins['vsp']['driver'] = ( + 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + if CompareOpenStackReleases(release) >= 'newton': + plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['vsp']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + return plugins + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise Exception + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise Exception + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' + + +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ + parsed = {} + if mappings: + mappings = mappings.split() + for m in mappings: + p = m.partition(':') + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue + else: + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() + + return parsed + + +def parse_bridge_mappings(mappings): + """Parse bridge mappings. + + Mappings must be a space-delimited list of provider:bridge mappings. + + Returns dict of the form {provider:bridge}. + """ + return parse_mappings(mappings) + + +def parse_data_port_mappings(mappings, default_bridge='br-data'): + """Parse data port mappings. + + Mappings must be a space-delimited list of bridge:port. + + Returns dict of the form {port:bridge} where ports may be mac addresses or + interface names. + """ + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) + if not _mappings or list(_mappings.values()) == ['']: + if not mappings: + return {} + + # For backwards-compatibility we need to support port-only provided in + # config. + _mappings = {mappings.split()[0]: default_bridge} + + ports = _mappings.keys() + if len(set(ports)) != len(ports): + raise Exception("It is not allowed to have the same port configured " + "on more than one bridge") + + return _mappings + + +def parse_vlan_range_mappings(mappings): + """Parse vlan range mappings. + + Mappings must be a space-delimited list of provider:start:end mappings. + + The start:end range is optional and may be omitted. + + Returns dict of the form {provider: (start, end)}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + return {} + + mappings = {} + for p, r in six.iteritems(_mappings): + mappings[p] = tuple(r.split(':')) + + return mappings diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/policyd.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 0000000000000000000000000000000000000000..f2bb21e9db926bd2c4de8ab3e8d10d0837af563a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,801 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc//policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. + +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: + + + juju attach-resource policyd-override=overrides.zip + +The policy override is enabled in the charm using: + + juju config use-policyd-override=true + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None, + user=None, + group=None, + config_changed=False): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool + """ + _user = service if user is None else user + _group = service if group is None else group + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() + remove_policy_success_file() + return + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) + return + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. + + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. + + See maybe_do_policyd_overrides() for more details on the params. + """ + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath, has_subdirs=False): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp, has_subdirs) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile, has_subdirs=False): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, + + This is to help with the special, additional, processing that the dashboard + charm requires. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + files = [] + for infolist_item in zipfile.infolist(): + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue + _dir, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + files.append((name, ext, name_ext, infolist_item)) + return files + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + """ + _user = service if user is None else user + _group = service if group is None else group + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) + if not os.path.exists(path): + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + +@contextlib.contextmanager +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] + """ + for f in os.listdir(path): + yield _FBDirectory(f) + + +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if is_policy_success_file_set(): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) + blacklist_paths = blacklist_paths or [] + completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group + try: + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + set_policy_success_file() + return completed diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ssh_migrations.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ssh_migrations.py new file mode 100644 index 0000000000000000000000000000000000000000..96b9f71d42d1c81539f78b8e1c4761f81d84c304 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/ssh_migrations.py @@ -0,0 +1,412 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + ERROR, + log, + relation_get, +) +from charmhelpers.contrib.network.ip import ( + is_ipv6, + ns_query, +) +from charmhelpers.contrib.openstack.utils import ( + get_hostname, + get_host_ip, + is_ip, +) + +NOVA_SSH_DIR = '/etc/nova/compute_ssh/' + + +def ssh_directory_for_unit(application_name, user=None): + """Return the directory used to store ssh assets for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified directory path. + :rtype: str + """ + if user: + application_name = "{}_{}".format(application_name, user) + _dir = os.path.join(NOVA_SSH_DIR, application_name) + for d in [NOVA_SSH_DIR, _dir]: + if not os.path.isdir(d): + os.mkdir(d) + for f in ['authorized_keys', 'known_hosts']: + f = os.path.join(_dir, f) + if not os.path.isfile(f): + open(f, 'w').close() + return _dir + + +def known_hosts(application_name, user=None): + """Return the known hosts file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'known_hosts') + + +def authorized_keys(application_name, user=None): + """Return the authorized keys file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'authorized_keys') + + +def ssh_known_host_key(host, application_name, user=None): + """Return the first entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Host key + :rtype: str or None + """ + cmd = [ + 'ssh-keygen', + '-f', known_hosts(application_name, user), + '-H', + '-F', + host] + try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. + output = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + # RC of 1 seems to be legitimate for most ssh-keygen -F calls. + if e.returncode == 1: + output = e.output + else: + raise + output = output.strip() + + if output: + # Bug #1500589 cmd has 0 rc on precise if entry not present + lines = output.split('\n') + if len(lines) >= 1: + return lines[0] + + return None + + +def remove_known_host(host, application_name, user=None): + """Remove the entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + log('Removing SSH known host entry for compute host at %s' % host) + cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] + subprocess.check_call(cmd) + + +def is_same_key(key_1, key_2): + """Extract the key from two host entries and compare them. + + :param key_1: Host key + :type key_1: str + :param key_2: Host key + :type key_2: str + """ + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + +def add_known_host(host, application_name, user=None): + """Add the given host key to the known hosts file. + + :param host: host name + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] + try: + remote_key = subprocess.check_output(cmd).strip() + except Exception as e: + log('Could not obtain SSH host key from %s' % host, level=ERROR) + raise e + + current_key = ssh_known_host_key(host, application_name, user) + if current_key and remote_key: + if is_same_key(remote_key, current_key): + log('Known host key for compute host %s up to date.' % host) + return + else: + remove_known_host(host, application_name, user) + + log('Adding SSH host key to known hosts for compute node at %s.' % host) + with open(known_hosts(application_name, user), 'a') as out: + out.write("{}\n".format(remote_key)) + + +def ssh_authorized_key_exists(public_key, application_name, user=None): + """Check if given key is in the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Whether given key is in the authorized_key file. + :rtype: boolean + """ + with open(authorized_keys(application_name, user)) as keys: + return ('%s' % public_key) in keys.read() + + +def add_authorized_key(public_key, application_name, user=None): + """Add given key to the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + with open(authorized_keys(application_name, user), 'a') as keys: + keys.write("{}\n".format(public_key)) + + +def ssh_compute_add_host_and_key(public_key, hostname, private_address, + application_name, user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param hostname: Hostname to collect host keys from. + :type hostname: str + :param private_address:aCorresponding private address for hostname + :type private_address: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + # If remote compute node hands us a hostname, ensure we have a + # known hosts entry for its IP, hostname and FQDN. + hosts = [private_address] + + if not is_ipv6(private_address): + if hostname: + hosts.append(hostname) + + if is_ip(private_address): + hn = get_hostname(private_address) + if hn: + hosts.append(hn) + short = hn.split('.')[0] + if ns_query(short): + hosts.append(short) + else: + hosts.append(get_host_ip(private_address)) + short = private_address.split('.')[0] + if ns_query(short): + hosts.append(short) + + for host in list(set(hosts)): + add_known_host(host, application_name, user) + + if not ssh_authorized_key_exists(public_key, application_name, user): + log('Saving SSH authorized key for compute host at %s.' % + private_address) + add_authorized_key(public_key, application_name, user) + + +def ssh_compute_add(public_key, application_name, rid=None, unit=None, + user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param rid: Relation id of the relation between this charm and the app. If + none is supplied it is assumed its the relation relating to + the current hook context. + :type rid: str + :param unit: Unit to add ssh asserts for if none is supplied it is assumed + its the unit relating to the current hook context. + :type unit: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + relation_data = relation_get(rid=rid, unit=unit) + ssh_compute_add_host_and_key( + public_key, + relation_data.get('hostname'), + relation_data.get('private-address'), + application_name, + user=user) + + +def ssh_known_hosts_lines(application_name, user=None): + """Return contents of known_hosts file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + known_hosts_list = [] + with open(known_hosts(application_name, user)) as hosts: + for hosts_line in hosts: + if hosts_line.rstrip(): + known_hosts_list.append(hosts_line.rstrip()) + return(known_hosts_list) + + +def ssh_authorized_keys_lines(application_name, user=None): + """Return contents of authorized_keys file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + authorized_keys_list = [] + + with open(authorized_keys(application_name, user)) as keys: + for authkey_line in keys: + if authkey_line.rstrip(): + authorized_keys_list.append(authkey_line.rstrip()) + return(authorized_keys_list) + + +def ssh_compute_remove(public_key, application_name, user=None): + """Remove given public key from authorized_keys file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + if not (os.path.isfile(authorized_keys(application_name, user)) or + os.path.isfile(known_hosts(application_name, user))): + return + + keys = ssh_authorized_keys_lines(application_name, user=None) + keys = [k.strip() for k in keys] + + if public_key not in keys: + return + + [keys.remove(key) for key in keys if key == public_key] + + with open(authorized_keys(application_name, user), 'w') as _keys: + keys = '\n'.join(keys) + if not keys.endswith('\n'): + keys += '\n' + _keys.write(keys) + + +def get_ssh_settings(application_name, user=None): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for the + app + user combination. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = {} + keys = {} + prefix = '' + if user: + prefix = '{}_'.format(user) + + for i, line in enumerate(ssh_known_hosts_lines( + application_name=application_name, user=user)): + settings['{}known_hosts_{}'.format(prefix, i)] = line + if settings: + settings['{}known_hosts_max_index'.format(prefix)] = len( + settings.keys()) + + for i, line in enumerate(ssh_authorized_keys_lines( + application_name=application_name, user=user)): + keys['{}authorized_keys_{}'.format(prefix, i)] = line + if keys: + keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) + settings.update(keys) + return settings + + +def get_all_user_ssh_settings(application_name): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for root user + and nova user. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = get_ssh_settings(application_name) + settings.update(get_ssh_settings(application_name, user='nova')) + return settings diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9df5f746fbdf5491c640a77df907b71817cbc5af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/ceph.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 0000000000000000000000000000000000000000..a11ce8ab85654a4d838e448f36fe3698b59f9531 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,24 @@ +############################################################################### +# [ WARNING ] +# ceph configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +[global] +{% if auth -%} +auth_supported = {{ auth }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} +{% endif -%} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} +{% if rbd_features %} +rbd default features = {{ rbd_features }} +{% endif %} + +[client] +{% if rbd_client_cache_settings -%} +{% for key, value in rbd_client_cache_settings.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{%- endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/git.upstart b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/git.upstart new file mode 100644 index 0000000000000000000000000000000000000000..4bed404bc01087c4dec6a44a56d17ed122e1d1e3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/git.upstart @@ -0,0 +1,17 @@ +description "{{ service_description }}" +author "Juju {{ service_name }} Charm " + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +exec start-stop-daemon --start --chuid {{ user_name }} \ + --chdir {{ start_dir }} --name {{ process_name }} \ + --exec {{ executable_name }} -- \ + {% for config_file in config_files -%} + --config-file={{ config_file }} \ + {% endfor -%} + {% if log_file -%} + --log-file={{ log_file }} + {% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d36af2aa86f91b2ee1249594fffd106843dd0676 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,77 @@ +global + log /var/lib/haproxy/dev/log local0 + log /var/lib/haproxy/dev/log local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + stats socket /var/run/haproxy/admin.sock mode 600 level admin + stats timeout 2m + +defaults + log global + mode tcp + option tcplog + option dontlognull + retries 3 +{%- if haproxy_queue_timeout %} + timeout queue {{ haproxy_queue_timeout }} +{%- else %} + timeout queue 9000 +{%- endif %} +{%- if haproxy_connect_timeout %} + timeout connect {{ haproxy_connect_timeout }} +{%- else %} + timeout connect 9000 +{%- endif %} +{%- if haproxy_client_timeout %} + timeout client {{ haproxy_client_timeout }} +{%- else %} + timeout client 90000 +{%- endif %} +{%- if haproxy_server_timeout %} + timeout server {{ haproxy_server_timeout }} +{%- else %} + timeout server 90000 +{%- endif %} + +listen stats + bind {{ local_host }}:{{ stat_port }} + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:{{ stat_password }} + +{% if frontends -%} +{% for service, ports in service_ports.items() -%} +frontend tcp-in_{{ service }} + bind *:{{ ports[0] }} + {% if ipv6_enabled -%} + bind :::{{ ports[0] }} + {% endif -%} + {% for frontend in frontends -%} + acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} + use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} + +{% for frontend in frontends -%} +backend {{ service }}_{{ frontend }} + balance leastconn + {% if backend_options -%} + {% if backend_options[service] -%} + {% for option in backend_options[service] -%} + {% for key, value in option.items() -%} + {{ key }} {{ value }} + {% endfor -%} + {% endfor -%} + {% endif -%} + {% endif -%} + {% for unit, address in frontends[frontend]['backends'].items() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor -%} +{% endfor -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/logrotate b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/logrotate new file mode 100644 index 0000000000000000000000000000000000000000..b2900d09a4ec2d04152ed7ce25bdc7346c349675 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/logrotate @@ -0,0 +1,9 @@ +/var/log/{{ logrotate_logs_location }}/*.log { + {{ logrotate_interval }} + {{ logrotate_count }} + compress + delaycompress + missingok + notifempty + copytruncate +} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/memcached.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/memcached.conf new file mode 100644 index 0000000000000000000000000000000000000000..26cb037c72beccdb1a4f9269844abdbac2406369 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/memcached.conf @@ -0,0 +1,53 @@ +############################################################################### +# [ WARNING ] +# memcached configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### + +# memcached default config file +# 2003 - Jay Bonci +# This configuration file is read by the start-memcached script provided as +# part of the Debian GNU/Linux distribution. + +# Run memcached as a daemon. This command is implied, and is not needed for the +# daemon to run. See the README.Debian that comes with this package for more +# information. +-d + +# Log memcached's output to /var/log/memcached +logfile /var/log/memcached.log + +# Be verbose +# -v + +# Be even more verbose (print client commands as well) +# -vv + +# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default +# Note that the daemon will grow to this size, but does not start out holding this much +# memory +-m 64 + +# Default connection port is 11211 +-p {{ memcache_port }} + +# Run the daemon as root. The start-memcached will default to running as root if no +# -u command is present in this config file +-u memcache + +# Specify which IP address to listen on. The default is to listen on all IP addresses +# This parameter is one of the only security measures that memcached has, so make sure +# it's listening on a firewalled interface. +-l {{ memcache_server }} + +# Limit the number of simultaneous incoming connections. The daemon default is 1024 +# -c 1024 + +# Lock down all paged memory. Consult with the README and homepage before you do this +# -k + +# Return error when memory is exhausted (rather than removing items) +# -M + +# Maximize core file limit +# -r diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 0000000000000000000000000000000000000000..f614b3fa71928b615a1f058c6928d3c98bed9575 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,29 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 0000000000000000000000000000000000000000..f614b3fa71928b615a1f058c6928d3c98bed9575 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,29 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken new file mode 100644 index 0000000000000000000000000000000000000000..5dcebe7c863728b040337616b492296f536b3ef3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -0,0 +1,12 @@ +{% if auth_host -%} +[keystone_authtoken] +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_plugin = password +project_domain_id = default +user_domain_id = default +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy new file mode 100644 index 0000000000000000000000000000000000000000..9356b2be4e7dabe089b4d7d39793146bb92f3f48 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy @@ -0,0 +1,10 @@ +{% if auth_host -%} +[keystone_authtoken] +# Juno specific config (Bug #1557223) +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} +identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka new file mode 100644 index 0000000000000000000000000000000000000000..c281868b16a885cd01a234984974af0349a5d242 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -0,0 +1,22 @@ +{% if auth_host -%} +[keystone_authtoken] +auth_type = password +{% if api_version == "3" -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3 +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3 +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only new file mode 100644 index 0000000000000000000000000000000000000000..d26a91fe1f00e2b12d094be727a53eddc87b829d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only @@ -0,0 +1,9 @@ +{% if auth_host -%} +[keystone_authtoken] +{% for option_name, option_value in keystone_authtoken.items() -%} +{{ option_name }} = {{ option_value }} +{% endfor -%} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-cache b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-cache new file mode 100644 index 0000000000000000000000000000000000000000..e056a32aafe32413cefb40b9414d064f2b0c8fd2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-cache @@ -0,0 +1,6 @@ +[cache] +{% if memcache_url %} +enabled = true +backend = oslo_cache.memcache_pool +memcache_servers = {{ memcache_url }} +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit new file mode 100644 index 0000000000000000000000000000000000000000..bed2216aba7217022ded17dec4cdb0871f513b40 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata new file mode 100644 index 0000000000000000000000000000000000000000..365f43757719b2de3c601ea6a9752dac8a8b3545 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-middleware new file mode 100644 index 0000000000000000000000000000000000000000..dd73230a42aa037582989979c1bc8132d30b9b38 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-middleware @@ -0,0 +1,5 @@ +[oslo_middleware] + +# Bug #1758675 +enable_proxy_headers_parsing = true + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-notifications new file mode 100644 index 0000000000000000000000000000000000000000..71c7eb068eace94e8986d7a868bded236f75128c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -0,0 +1,15 @@ +{% if transport_url -%} +[oslo_messaging_notifications] +driver = {{ oslo_messaging_driver }} +transport_url = {{ transport_url }} +{% if send_notifications_to_logs %} +driver = log +{% endif %} +{% if notification_topics -%} +topics = {{ notification_topics }} +{% endif -%} +{% if notification_format -%} +[notifications] +notification_format = {{ notification_format }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-placement b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-placement new file mode 100644 index 0000000000000000000000000000000000000000..97724bdb5af6e0352d0b920600d7cbbc8318fa7c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-placement @@ -0,0 +1,19 @@ +[placement] +{% if auth_host -%} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_type = password +{% if api_version == "3" -%} +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +{% endif -%} +{% if region -%} +os_region_name = {{ region }} +{% endif -%} +randomize_allocation_candidates = true diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo new file mode 100644 index 0000000000000000000000000000000000000000..b444c9c99bd179eaa0be31af462576e4ca9c45b5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo @@ -0,0 +1,22 @@ +{% if rabbitmq_host or rabbitmq_hosts -%} +[oslo_messaging_rabbit] +rabbit_userid = {{ rabbitmq_user }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +rabbit_password = {{ rabbitmq_password }} +{% if rabbitmq_hosts -%} +rabbit_hosts = {{ rabbitmq_hosts }} +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +rabbit_durable_queues = False +{% endif -%} +{% else -%} +rabbit_host = {{ rabbitmq_host }} +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +rabbit_port = {{ rabbit_ssl_port }} +{% if rabbit_ssl_ca -%} +kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} +{% endif -%} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-zeromq b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-zeromq new file mode 100644 index 0000000000000000000000000000000000000000..95f1a76ce87f9babdcba58da79c68d34c2776eea --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/section-zeromq @@ -0,0 +1,14 @@ +{% if zmq_host -%} +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) +rpc_backend = zmq +rpc_zmq_host = {{ zmq_host }} +{% if zmq_redis_address -%} +rpc_zmq_matchmaker = redis +matchmaker_heartbeat_freq = 15 +matchmaker_heartbeat_ttl = 30 +[matchmaker_redis] +host = {{ zmq_redis_address }} +{% else -%} +rpc_zmq_matchmaker = ring +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/vendor_data.json b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/vendor_data.json new file mode 100644 index 0000000000000000000000000000000000000000..904f612a7f74490d4508920400d18d493d056477 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/vendor_data.json @@ -0,0 +1 @@ +{{ vendor_data_json }} \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf new file mode 100644 index 0000000000000000000000000000000000000000..23b62a385283e6b8f1a6af0fcebccac747031b26 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} + + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if admin_port -%} + + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if public_port -%} + + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf new file mode 100644 index 0000000000000000000000000000000000000000..23b62a385283e6b8f1a6af0fcebccac747031b26 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} + + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if admin_port -%} + + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} + +{% if public_port -%} + + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templating.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 0000000000000000000000000000000000000000..050f8af5c9135db4fafdbf5098edcd22c7157ff8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,379 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import six + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO, + TRACE +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + # demote this log to the lowest level; we don't really need to see these + # lots in production even when debugging. + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=TRACE) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + + def __init__(self, config_file, contexts, config_template=None): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + self.config_template = config_template + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have satisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + @property + def is_string_template(self): + """:returns: Boolean if this instance is a template initialised with a string""" + return self.config_template is not None + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + configs.register(config_file='/etc/keystone/policy.d/extra.cfg', + contexts=[context.ExtraPolicyContext() + context.KeystoneContext()], + config_template=hookenv.config('extra-policy')) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first searches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') + + def register(self, config_file, contexts, config_template=None): + """ + Register a config file with a list of context generators to be called + during rendering. + config_template can be used to load a template from a string instead of + using template loaders and template files. + :param config_file (str): a path where a config file will be rendered + :param contexts (list): a list of context dictionaries with kv pairs + :param config_template (str): an optional template string to use + """ + self.templates[config_file] = OSConfigTemplate( + config_file=config_file, + contexts=contexts, + config_template=config_template + ) + log('Registered config file: {}'.format(config_file), + level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from {}'.format(template.filename), + level=INFO) + return template + + def _get_template_from_string(self, ostmpl): + ''' + Get a jinja2 template object from a string. + :param ostmpl: OSConfigTemplate to use as a data source. + ''' + self._get_tmpl_env() + template = self._tmpl_env.from_string(ostmpl.config_template) + log('Loaded a template from a string for {}'.format( + ostmpl.config_file), + level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: {}'.format(config_file), level=ERROR) + raise OSConfigException + + ostmpl = self.templates[config_file] + ctxt = ostmpl.context() + + if ostmpl.is_string_template: + template = self._get_template_from_string(ostmpl) + log('Rendering from a string template: ' + '{}'.format(config_file), + level=INFO) + else: + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking + # for it using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from {} by {} or {}.' + ''.format( + self.templates_dir, + os.path.basename(config_file), + _tmpl + ), + level=ERROR) + raise e + + log('Rendering from template: {}'.format(config_file), + level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf0156108537f9caae35e193c346b0e4ee237b9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,2352 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict, namedtuple +from functools import wraps + +import subprocess +import json +import os +import sys +import re +import itertools +import functools + +import six +import traceback +import uuid +import yaml + +from charmhelpers import deprecate + +from charmhelpers.contrib.network import ip + +from charmhelpers.core import unitdata + +from charmhelpers.core.hookenv import ( + WORKLOAD_STATES, + action_fail, + action_set, + config, + expected_peer_units, + expected_related_units, + log as juju_log, + charm_dir, + INFO, + ERROR, + metadata, + related_units, + relation_get, + relation_id, + relation_ids, + relation_set, + status_set, + hook_name, + application_version_set, + cached, + leader_set, + leader_get, + local_unit, +) + +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, + is_ipv6, + port_has_listener, +) + +from charmhelpers.core.host import ( + lsb_release, + mounts, + umount, + service_running, + service_pause, + service_resume, + service_stop, + service_start, + restart_on_change_helper, +) +from charmhelpers.fetch import ( + apt_cache, + import_key as fetch_import_key, + add_source as fetch_add_source, + SourceConfigError, + GPGKeyError, + get_upstream_version, + filter_missing_packages, + ubuntu_apt_pkg as apt, +) + +from charmhelpers.fetch.snap import ( + snap_install, + snap_refresh, + valid_snap_channel, +) + +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device +from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) + +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', +) + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), + ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), + ('2016.2', 'newton'), + ('2017.1', 'ocata'), + ('2017.2', 'pike'), + ('2018.1', 'queens'), + ('2018.2', 'rocky'), + ('2019.1', 'stein'), + ('2019.2', 'train'), + ('2020.1', 'ussuri'), +]) + +# The ugly duckling - must list releases oldest to newest +SWIFT_CODENAMES = OrderedDict([ + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0', '2.6.0', '2.7.0']), + ('newton', + ['2.8.0', '2.9.0', '2.10.0']), + ('ocata', + ['2.11.0', '2.12.0', '2.13.0']), + ('pike', + ['2.13.0', '2.15.0']), + ('queens', + ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0', '2.19.0']), + ('stein', + ['2.20.0', '2.21.0']), + ('train', + ['2.22.0', '2.23.0']), + ('ussuri', + ['2.24.0', '2.25.0']), +]) + +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12', 'liberty'), + ('13', 'mitaka'), + ('14', 'newton'), + ('15', 'ocata'), + ('16', 'pike'), + ('17', 'queens'), + ('18', 'rocky'), + ('19', 'stein'), + ('20', 'train'), + ('21', 'ussuri'), + ]), + 'neutron-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), + ('16', 'ussuri'), + ]), + 'cinder-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), + ('16', 'ussuri'), + ]), + 'keystone': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('17', 'ussuri'), + ]), + 'horizon-common': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('18', 'ussuri'), + ]), + 'ceilometer-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), + ('14', 'ussuri'), + ]), + 'heat-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), + ('14', 'ussuri'), + ]), + 'glance-common': OrderedDict([ + ('11', 'liberty'), + ('12', 'mitaka'), + ('13', 'newton'), + ('14', 'ocata'), + ('15', 'pike'), + ('16', 'queens'), + ('17', 'rocky'), + ('18', 'stein'), + ('19', 'train'), + ('20', 'ussuri'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('18', 'ussuri'), + ]), +} + +DEFAULT_LOOPBACK_SIZE = '5G' + +DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' + +DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] + + +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed', 'proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('-')[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if (src.startswith('deb') or + src.startswith('ppa') or + src.startswith('snap')): + for v in OPENSTACK_CODENAMES.values(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(version_map): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + + # NOTE: fallback - attempt to match with just major.minor version + match = re.match(r'^(\d+)\.(\d+)', version) + if match: + major_minor_version = match.group(0) + for codename, versions in six.iteritems(SWIFT_CODENAMES): + for release_version in versions: + if release_version.startswith(major_minor_version): + return codename + + return None + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + + if snap_install_requested(): + cmd = ['snap', 'list', package] + try: + out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') + except subprocess.CalledProcessError: + return None + lines = out.split('\n') + for line in lines: + if package in line: + # Second item in list is Version + return line.split()[1] + + cache = apt_cache() + + try: + pkg = cache[package] + except Exception: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match(r'^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + + # Generate a major version number for newer semantic + # versions of openstack projects + major_vers = vers.split('.')[0] + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + major_vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][major_vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + return get_swift_codename(vers) + else: + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] + else: + vers_map = OPENSTACK_CODENAMES + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +# Module local cache variable for the os_release. +_os_rel = None + + +def reset_os_release(): + '''Unset the cached os_release version''' + global _os_rel + _os_rel = None + + +def os_release(package, base=None, reset_cache=False, source_key=None): + """Returns OpenStack release codename from a cached global. + + If reset_cache then unset the cached os_release version and return the + freshly determined version. + + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + + :param package: Name of package to determine release from + :type package: str + :param base: Fallback codename if endavours to determine from package fail + :type base: Optional[str] + :param reset_cache: Reset any cached codename value + :type reset_cache: bool + :param source_key: Name of source configuration option + (default: 'openstack-origin') + :type source_key: Optional[str] + :returns: OpenStack release codename + :rtype: str + """ + source_key = source_key or 'openstack-origin' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] + global _os_rel + if reset_cache: + reset_os_release() + if _os_rel: + return _os_rel + _os_rel = ( + get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config(source_key)) or + base) + return _os_rel + + +@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) +def import_key(keyid): + """Import a key, either ASCII armored, or a GPG key id. + + @param keyid: the key in ASCII armor format, or a GPG key id. + @raises SystemExit() via sys.exit() on failure. + """ + try: + return fetch_import_key(keyid) + except GPGKeyError as e: + error_out("Could not import key: {}".format(str(e))) + + +def get_source_and_pgp_key(source_and_key): + """Look for a pgp key ID or ascii-armor key in the given input. + + :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + optional. + :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id + if there was no '|' in the source_and_key string. + """ + try: + source, key = source_and_key.split('|', 2) + return source, key or None + except ValueError: + return source_and_key, None + + +@deprecate("use charmhelpers.fetch.add_source() instead.", + "2017-07", log=juju_log) +def configure_installation_source(source_plus_key): + """Configure an installation source. + + The functionality is provided by charmhelpers.fetch.add_source() + The difference between the two functions is that add_source() signature + requires the key to be passed directly, whereas this function passes an + optional key by appending '|' to the end of the source specificiation + 'source'. + + Another difference from add_source() is that the function calls sys.exit(1) + if the configuration fails, whereas add_source() raises + SourceConfigurationError(). Another difference, is that add_source() + silently fails (with a juju_log command) if there is no matching source to + configure, whereas this function fails with a sys.exit(1) + + :param source: String_plus_key -- see above for details. + + Note that the behaviour on error is to log the error to the juju log and + then call sys.exit(1). + """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return + # extract the key if there is one, denoted by a '|' in the rel + source, key = get_source_and_pgp_key(source_plus_key) + + # handle the ordinary sources via add_source + try: + fetch_add_source(source, key, fail_invalid=True) + except SourceConfigError as se: + error_out(str(se)) + + +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + +def get_endpoint_key(service_name, relation_id, unit_name): + """Return the key used to refer to an ep changed notification from a unit. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param relation_id: The id of the relation the unit is on. + :type relation_id: str + :param unit_name: The name of the unit publishing the notification. + :type unit_name: str + :returns: The key used to refer to an ep changed notification from a unit + :rtype: str + """ + return '{}-{}-{}'.format( + service_name, + relation_id.replace(':', '_'), + unit_name.replace('/', '_')) + + +def get_endpoint_notifications(service_names, rel_name='identity-service'): + """Return all notifications for the given services. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: A dict containing the source of the notification and its nonce. + :rtype: Dict[str, str] + """ + notifications = {} + for rid in relation_ids(rel_name): + for unit in related_units(relid=rid): + ep_changed_json = relation_get( + rid=rid, + unit=unit, + attribute='ep_changed') + if ep_changed_json: + ep_changed = json.loads(ep_changed_json) + for service in service_names: + if ep_changed.get(service): + key = get_endpoint_key(service, rid, unit) + notifications[key] = ep_changed[service] + return notifications + + +def endpoint_changed(service_name, rel_name='identity-service'): + """Whether a new notification has been recieved for an endpoint. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: Whether endpoint has changed + :rtype: bool + """ + changed = False + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + [service_name], + rel_name=rel_name) + for key, nonce in notifications.items(): + if db.get(key) != nonce: + juju_log(('New endpoint change notification found: ' + '{}={}').format(key, nonce), + 'INFO') + changed = True + break + return changed + + +def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): + """Save the enpoint triggers in db so it can be tracked if they changed. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + """ + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + service_names, + rel_name=rel_name) + for key, nonce in notifications.items(): + db.set(key, nonce) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wt') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + if not cur_vers: + # The package has not been installed yet do not attempt upgrade + return False + if "swift" in package: + codename = get_os_codename_install_source(src) + avail_vers = get_os_version_codename_swift(codename) + else: + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers + apt.init() + return apt.version_compare(avail_vers, cur_vers) >= 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + + +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Set the state of the workload status for the charm. + + This calls _determine_os_workload_status() to get the new state, message + and sets the status using status_set() + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _determine_os_workload_status( + configs, required_interfaces, charm_func, services, ports) + status_set(state, message) + + +def _determine_os_workload_status( + configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Determine the state of the workload status for the charm. + + This function returns the new workload status for the charm based + on the state of the interfaces, the paused state and whether the + services are actually running and any specified ports are open. + + This checks: + + 1. if the unit should be paused, that it is actually paused. If so the + state is 'maintenance' + message, else 'broken'. + 2. that the interfaces/relations are complete. If they are not then + it sets the state to either 'broken' or 'waiting' and an appropriate + message. + 3. If all the relation data is set, then it checks that the actual + services really are running. If not it sets the state to 'broken'. + + If everything is okay then the state returns 'active'. + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _ows_check_if_paused(services, ports) + + if state is None: + state, message = _ows_check_generic_interfaces( + configs, required_interfaces) + + if state != 'maintenance' and charm_func: + # _ows_check_charm_func() may modify the state, message + state, message = _ows_check_charm_func( + state, message, lambda: charm_func(configs)) + + if state is None: + state, message = _ows_check_services_running(services, ports) + + if state is None: + state = 'active' + message = "Unit is ready" + juju_log(message, 'INFO') + + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + + return state, message + + +def _ows_check_if_paused(services=None, ports=None): + """Check if the unit is supposed to be paused, and if so check that the + services/ports (if passed) are actually stopped/not being listened to. + + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. + + @param services: OPTIONAL services spec or list of service names. + @param ports: OPTIONAL list of port numbers. + @returns state, message or None, None + """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + + if is_unit_paused_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "maintenance" + message = "Paused. Use 'resume' action to resume normal service." + return state, message + return None, None + + +def _ows_check_generic_interfaces(configs, required_interfaces): + """Check the complete contexts to determine the workload status. + + - Checks for missing or incomplete contexts + - juju log details of missing required data. + - determines the correct workload status + - creates an appropriate message for status_set(...) + + if there are no problems then the function returns None, None + + @param configs: a templating.OSConfigRenderer() object + @params required_interfaces: {generic_interface: [specific_interface], } + @returns state, message or None, None + """ + incomplete_rel_data = incomplete_relation_data(configs, + required_interfaces) + state = None + message = None + missing_relations = set() + incomplete_relations = set() + + for generic_interface, relations_states in incomplete_rel_data.items(): + related_interface = None + missing_data = {} + # Related or not? + for interface, relation_state in relations_states.items(): + if relation_state.get('related'): + related_interface = interface + missing_data = relation_state.get('missing_data') + break + # No relation ID for the generic_interface? + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + missing_relations.add(generic_interface) + else: + # Relation ID eists but no related unit + if not missing_data: + # Edge case - relation ID exists but departings + _hook_name = hook_name() + if (('departed' in _hook_name or 'broken' in _hook_name) and + related_interface in _hook_name): + state = 'blocked' + missing_relations.add(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), + "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has" + " no units in the relation." + "".format(generic_interface, related_interface), + "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in missing_relations: + incomplete_relations.add(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + return state, message + + +def _ows_check_charm_func(state, message, charm_func_with_configs): + """Run a custom check function for the charm to see if it wants to + change the state. This is only run if not in 'maintenance' and + tests to see if the new state is more important that the previous + one determined by the interfaces/relations check. + + @param state: the previously determined state so far. + @param message: the user orientated message so far. + @param charm_func: a callable function that returns state, message + @returns state, message strings. + """ + if charm_func_with_configs: + charm_state, charm_message = charm_func_with_configs() + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): + state = workload_state_compare(state, charm_state) + if message: + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + return state, message + + +def _ows_check_services_running(services, ports): + """Check that the services that should be running are actually running + and that any ports specified are being listened to. + + @param services: list of strings OR dictionary specifying services/ports + @param ports: list of ports + @returns state, message: strings or None, None + """ + messages = [] + state = None + if services is not None: + services = _extract_services_list_helper(services) + services_running, running = _check_running_services(services) + if not all(running): + messages.append( + "Services not running that should be: {}" + .format(", ".join(_filter_tuples(services_running, False)))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + map_not_open, ports_open = ( + _check_listening_on_services_ports(services)) + if not all(ports_open): + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in map_not_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "Services with ports not open that should be: {}" + .format(message)) + state = 'blocked' + + if ports is not None: + # and we can also check ports which we don't know the service for + ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + if not all(ports_open_bools): + messages.append( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in ports_open + if not v]))) + state = 'blocked' + + if state is not None: + message = "; ".join(messages) + return state, message + + return None, None + + +def _extract_services_list_helper(services): + """Extract a OrderedDict of {service: [ports]} of the supplied services + for use by the other functions. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param services: see above + @returns OrderedDict(service: [ports], ...) + """ + if services is None: + return {} + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = OrderedDict() + for s in services: + if isinstance(s, dict) and 'service' in s: + _s[s['service']] = s.get('ports', []) + if isinstance(s, str): + _s[s] = [] + return _s + + +def _check_running_services(services): + """Check that the services dict provided is actually running and provide + a list of (service, boolean) tuples for each service. + + Returns both a zipped list of (service, boolean) and a list of booleans + in the same order as the services. + + @param services: OrderedDict of strings: [ports], one for each service to + check. + @returns [(service, boolean), ...], : results for checks + [boolean] : just the result of the service checks + """ + services_running = [service_running(s) for s in services] + return list(zip(services, services_running)), services_running + + +def _check_listening_on_services_ports(services, test=False): + """Check that the unit is actually listening (has the port open) on the + ports that the service specifies are open. If test is True then the + function returns the services with ports that are open rather than + closed. + + Returns an OrderedDict of service: ports and a list of booleans + + @param services: OrderedDict(service: [port, ...], ...) + @param test: default=False, if False, test for closed, otherwise open. + @returns OrderedDict(service: [port-not-open, ...]...), [boolean] + """ + test = not(not(test)) # ensure test is True or False + all_ports = list(itertools.chain(*services.values())) + ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + map_ports = OrderedDict() + matched_ports = [p for p, opened in zip(all_ports, ports_states) + if opened == test] # essentially opened xor test + for service, ports in services.items(): + set_ports = set(ports).intersection(matched_ports) + if set_ports: + map_ports[service] = set_ports + return map_ports, ports_states + + +def _check_listening_on_ports_list(ports): + """Check that the ports list given are being listened to + + Returns a list of ports being listened to and a list of the + booleans. + + @param ports: LIST or port numbers. + @returns [(port_num, boolean), ...], [boolean] + """ + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + return zip(ports, ports_open), ports_open + + +def _filter_tuples(services_states, state): + """Return a simple list from a list of tuples according to the condition + + @param services_states: LIST of (string, boolean): service and running + state. + @param state: Boolean to match the tuple against. + @returns [LIST of strings] that matched the tuple RHS. + """ + return [s for s, b in services_states if b == state] + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [ + svc_type + for svc_type, interfaces in required_interfaces.items() + if not set(interfaces).intersection(complete_ctxts)] + return { + i: configs.get_incomplete_context_data(required_interfaces[i]) + for i in incomplete_relations} + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) + + +def check_actually_paused(services=None, ports=None): + """Check that services listed in the services object and ports + are actually closed (not listened to), to verify that the unit is + properly paused. + + @param services: See _extract_services_list_helper + @returns status, : string for status (None if okay) + message : string for problem for status_set + """ + state = None + message = None + messages = [] + if services is not None: + services = _extract_services_list_helper(services) + services_running, services_states = _check_running_services(services) + if any(services_states): + # there shouldn't be any running so this is a problem + messages.append("these services running: {}" + .format(", ".join( + _filter_tuples(services_running, True)))) + state = "blocked" + ports_open, ports_open_bools = ( + _check_listening_on_services_ports(services, True)) + if any(ports_open_bools): + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in ports_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "these service:ports are open: {}".format(message)) + state = 'blocked' + if ports is not None: + ports_open, bools = _check_listening_on_ports_list(ports) + if any(bools): + messages.append( + "these ports which should be closed, but are open: {}" + .format(", ".join([str(p) for p, v in ports_open if v]))) + state = 'blocked' + if messages: + message = ("Services should be paused but {}" + .format(", ".join(messages))) + return state, message + + +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except Exception: + return False + + +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + +def pause_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Pause a unit by stopping the services and setting 'unit-paused' + in the local kv() store. + + Also checks that the services have stopped and ports are no longer + being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None, None to indicate that the unit + didn't pause cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm pausing. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) + set_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages and not is_unit_upgrading_set(): + raise Exception("Couldn't pause: {}".format("; ".join(messages))) + + +def resume_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Resume a unit by starting the services and clearning 'unit-paused' + in the local kv() store. + + Also checks that the services have started and ports are being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None to indicate that the unit + didn't resume cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are started, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm resuming. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) + clear_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't resume: {}".format("; ".join(messages))) + + +def make_assess_status_func(*args, **kwargs): + """Creates an assess_status_func() suitable for handing to pause_unit() + and resume_unit(). + + This uses the _determine_os_workload_status(...) function to determine + what the workload_status should be for the unit. If the unit is + not in maintenance or active states, then the message is returned to + the caller. This is so an action that doesn't result in either a + complete pause or complete resume can signal failure with an action_fail() + """ + def _assess_status_func(): + state, message = _determine_os_workload_status(*args, **kwargs) + status_set(state, message) + if state not in ['maintenance', 'active']: + return message + return None + + return _assess_status_func + + +def pausable_restart_on_change(restart_map, stopstart=False, + restart_functions=None): + """A restart_on_change decorator that checks to see if the unit is + paused. If it is paused then the decorated function doesn't fire. + + This is provided as a helper, as the @restart_on_change(...) decorator + is in core.host, yet the openstack specific helpers are in this file + (contrib.openstack.utils). Thus, this needs to be an optional feature + for openstack charms (or charms that wish to use the openstack + pause/resume type features). + + It is used as follows: + + from contrib.openstack.utils import ( + pausable_restart_on_change as restart_on_change) + + @restart_on_change(restart_map, stopstart=) + def some_hook(...): + pass + + see core.utils.restart_on_change() for more details. + + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + + @param f: the function to decorate + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} + @param stopstart: DEFAULT false; whether to stop, start or just restart + @returns decorator to use a restart_on_change with pausability + """ + def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + if is_unit_paused_set(): + return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map + # otherwise, normal restart_on_change functionality + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) + return wrapped_f + return wrap + + +def ordered(orderme): + """Converts the provided dictionary into a collections.OrderedDict. + + The items in the returned OrderedDict will be inserted based on the + natural sort order of the keys. Nested dictionaries will also be sorted + in order to ensure fully predictable ordering. + + :param orderme: the dict to order + :return: collections.OrderedDict + :raises: ValueError: if `orderme` isn't a dict instance. + """ + if not isinstance(orderme, dict): + raise ValueError('argument must be a dict type') + + result = OrderedDict() + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + if isinstance(v, dict): + result[k] = ordered(v) + else: + result[k] = v + + return result + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + + {'key1': 'value1', 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + + {'key1': 'value1', 'key2': 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return ordered(yaml.safe_load(config_flags)) + + if config_flags.find('==') >= 0: + juju_log("config_flags is not in expected format (key=value)", + level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = OrderedDict() + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + juju_log("Invalid config value(s) at index %s" % (i), + level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + application_version = get_upstream_version(package) + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(os_release(package)) + else: + application_version_set(application_version) + + +def os_application_status_set(check_function): + """Run the supplied function and set the application status accordingly. + + :param check_function: Function to run to get app states and messages. + :type check_function: function + """ + state, message = check_function() + status_set(state, message, application=True) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package) + if not _release: + _release = get_os_codename_install_source(source) + + return CompareOpenStackReleases(_release) >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages + + +def update_json_file(filename, items): + """Updates the json `filename` with a given dict. + :param filename: path to json file (e.g. /etc/glance/policy.json) + :param items: dict of items to update + """ + if not items: + return + + with open(filename) as fd: + policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) + policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + + with open(filename, "w") as fd: + fd.write(after) + + +@cached +def snap_install_requested(): + """ Determine if installing from snaps + + If openstack-origin is of the form snap:track/channel[/branch] + and channel is in SNAPS_CHANNELS return True. + """ + origin = config('openstack-origin') or "" + if not origin.startswith('snap:'): + return False + + _src = origin[5:] + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) + + +def get_snaps_install_info_from_origin(snaps, src, mode='classic'): + """Generate a dictionary of snap install information from origin + + @param snaps: List of snaps + @param src: String of openstack-origin or source of the form + snap:track/channel + @param mode: String classic, devmode or jailmode + @returns: Dictionary of snaps with channels and modes + """ + + if not src.startswith('snap:'): + juju_log("Snap source is not a snap origin", 'WARN') + return {} + + _src = src[5:] + channel = '--channel={}'.format(_src) + + return {snap: {'channel': channel, 'mode': mode} + for snap in snaps} + + +def install_os_snaps(snaps, refresh=False): + """Install OpenStack snaps from channel and with mode + + @param snaps: Dictionary of snaps with channels and modes of the form: + {'snap_name': {'channel': 'snap_channel', + 'mode': 'snap_mode'}} + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. + @param post_snap_install: Callback function to run after snaps have been + installed + """ + + def _ensure_flag(flag): + if flag.startswith('--'): + return flag + return '--{}'.format(flag) + + if refresh: + for snap in snaps.keys(): + snap_refresh(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + else: + for snap in snaps.keys(): + snap_install(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) + + +def is_db_maintenance_mode(relid=None): + """Check relation data from notifications of db in maintenance mode. + + :returns: Whether db has notified it is in maintenance mode. + :rtype: bool + """ + juju_log('Checking for maintenance notifications', 'DEBUG') + if relid: + r_ids = [relid] + else: + r_ids = relation_ids('shared-db') + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + notifications = [] + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + for key, value in settings.items(): + if value and key in DB_MAINTENANCE_KEYS: + juju_log( + 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), + 'DEBUG') + try: + notifications.append(bool_from_string(value)) + except ValueError: + juju_log( + 'Could not discern bool from {}'.format(value), + 'WARN') + pass + return True in notifications + + +@cached +def container_scoped_relations(): + """Get all the container scoped relations + + :returns: List of relation names + :rtype: List + """ + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_db_ready(use_current_context=False, rel_name=None): + """Check remote database is ready to be used. + + Database relations are expected to provide a list of 'allowed' units to + confirm that the database is ready for use by those units. + + If db relation has provided this information and local unit is a member, + returns True otherwise False. + + :param use_current_context: Whether to limit checks to current hook + context. + :type use_current_context: bool + :param rel_name: Name of relation to check + :type rel_name: string + :returns: Whether remote db is ready. + :rtype: bool + :raises: Exception + """ + key = 'allowed_units' + + rel_name = rel_name or 'shared-db' + this_unit = local_unit() + + if use_current_context: + if relation_id() in relation_ids(rel_name): + rids_units = [(None, None)] + else: + raise Exception("use_current_context=True but not in {} " + "rel hook contexts (currently in {})." + .format(rel_name, relation_id())) + else: + rids_units = [(r_id, u) + for r_id in relation_ids(rel_name) + for u in related_units(r_id)] + + for rid, unit in rids_units: + allowed_units = relation_get(rid=rid, unit=unit, attribute=key) + if allowed_units and this_unit in allowed_units.split(): + juju_log("This unit ({}) is in allowed unit list from {}".format( + this_unit, + unit), 'DEBUG') + return True + + juju_log("This unit was not found in any allowed unit list") + return False + + +def is_expected_scale(peer_relation_name='cluster'): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :param peer_relation_name: Name of peer relation + :type rel_name: string + :returns: True or False + :rtype: bool + """ + def _get_relation_id(rel_type): + return next((rid for rid in relation_ids(reltype=rel_type)), None) + + Relation = namedtuple('Relation', 'rel_type rel_id') + peer_rid = _get_relation_id(peer_relation_name) + # Units with no peers should still have a peer relation. + if not peer_rid: + juju_log('Not at expected scale, no peer relation found', 'DEBUG') + return False + expected_relations = [ + Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] + if expect_ha(): + expected_relations.append( + Relation( + rel_type='ha', + rel_id=_get_relation_id('ha'))) + juju_log( + 'Checking scale of {} relations'.format( + ','.join([r.rel_type for r in expected_relations])), + 'DEBUG') + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for rel in expected_relations: + if not rel.rel_id: + juju_log( + 'Expected to find {} relation, but it is missing'.format( + rel.rel_type), + 'DEBUG') + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if rel.rel_type in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=rel.rel_type))) + if len(related_units(relid=rel.rel_id)) < expected_count: + juju_log( + ('Not at expected scale, not enough units on {} ' + 'relation'.format(rel.rel_type)), + 'DEBUG') + return False + except NotImplementedError: + return True + juju_log('All checks have passed, unit is at expected scale', 'DEBUG') + return True + + +def get_peer_key(unit_name): + """Get the peer key for this unit. + + The peer key is the key a unit uses to publish its status down the peer + relation + + :param unit_name: Name of unit + :type unit_name: string + :returns: Peer key for given unit + :rtype: string + """ + return 'unit-state-{}'.format(unit_name.replace('/', '-')) + + +UNIT_READY = 'READY' +UNIT_NOTREADY = 'NOTREADY' +UNIT_UNKNOWN = 'UNKNOWN' +UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] + + +def inform_peers_unit_state(state, relation_name='cluster'): + """Inform peers of the state of this unit. + + :param state: State of unit to publish + :type state: string + :param relation_name: Name of relation to publish state on + :type relation_name: string + """ + if state not in UNIT_STATES: + raise ValueError( + "Setting invalid state {} for unit".format(state)) + for r_id in relation_ids(relation_name): + relation_set(relation_id=r_id, + relation_settings={ + get_peer_key(local_unit()): state}) + + +def get_peers_unit_state(relation_name='cluster'): + """Get the state of all peers. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Unit states keyed on unit name. + :rtype: dict + :raises: ValueError + """ + r_ids = relation_ids(relation_name) + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + unit_states = {} + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) + if unit_states[unit] not in UNIT_STATES: + raise ValueError( + "Unit in unknown state {}".format(unit_states[unit])) + return unit_states + + +def are_peers_ready(relation_name='cluster'): + """Check if all peers are ready. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Whether all units are ready. + :rtype: bool + """ + unit_states = get_peers_unit_state(relation_name) + return all(v == UNIT_READY for v in unit_states.values()) + + +def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): + """Inform peers if this unit is ready. + + The check function should return a tuple (state, message). A state + of 'READY' indicates the unit is READY. + + :param check_unit_ready_func: Function to run to check readiness + :type check_unit_ready_func: function + :param relation_name: Name of relation to check peers on. + :type relation_name: string + """ + unit_ready, msg = check_unit_ready_func() + if unit_ready: + state = UNIT_READY + else: + state = UNIT_NOTREADY + juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') + inform_peers_unit_state(state, relation_name) + + +def check_api_unit_ready(check_db_ready=True): + """Check if this unit is ready. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Whether unit state is ready and status message + :rtype: (bool, str) + """ + unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) + return unit_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_unit_status(check_db_ready=True): + """Return a workload status and message for this unit. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Workload state and message + :rtype: (bool, str) + """ + unit_state = WORKLOAD_STATES.ACTIVE + msg = 'Unit is ready' + if is_db_maintenance_mode(): + unit_state = WORKLOAD_STATES.MAINTENANCE + msg = 'Database in maintenance mode.' + elif is_unit_paused_set(): + unit_state = WORKLOAD_STATES.BLOCKED + msg = 'Unit paused.' + elif check_db_ready and not is_db_ready(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Allowed_units list provided but this unit not present' + elif not is_db_initialised(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Database not initialised' + elif not is_expected_scale(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Charm and its dependencies not yet at expected scale' + juju_log(msg, 'DEBUG') + return unit_state, msg + + +def check_api_application_ready(): + """Check if this application is ready. + + :returns: Whether application state is ready and status message + :rtype: (bool, str) + """ + app_state, msg = get_api_application_status() + return app_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_application_status(): + """Return a workload status and message for this application. + + :returns: Workload state and message + :rtype: (bool, str) + """ + app_state, msg = get_api_unit_status() + if app_state == WORKLOAD_STATES.ACTIVE: + if are_peers_ready(): + return WORKLOAD_STATES.ACTIVE, 'Application Ready' + else: + return WORKLOAD_STATES.WAITING, 'Some units are not ready' + return app_state, msg diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/vaultlocker.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 0000000000000000000000000000000000000000..4ee6c1dba910b824467c2c34e0a3d3d0f3fd906d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,179 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + try: + import hvac + except ImportError: + # BUG: #1862085 - if the relation is made to vault, but the + # 'encrypt' option is not made, then the charm errors with an + # import warning. This catches that, logs a warning, and returns + # with an empty context. + hookenv.log("VaultKVContext: trying to use hvac pythong module " + "but it's not available. Is secrets-stroage relation " + "made, but encrypt option not set?", + level=hookenv.WARNING) + # return an emptry context on hvac import error + return {} + ctxt = {} + # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 + db = unitdata.kv() + # currently known-good secret-id + secret_id = db.get('secret-id') + + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + token_success = False + try: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + token_success = True + except hvac.exceptions.InvalidRequest: + # Try next + pass + + if token_success: + db.set('secret-id', secret_id) + db.flush() + + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + self.complete = True + break + else: + if secret_id: + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + if self.complete: + break + + if ctxt: + self.complete = True + + return ctxt + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + try: + import hvac + except ImportError: + return False + try: + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + except hvac.exceptions.InvalidRequest: + return False + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + try: + # hvac 0.10.1 changed default adapter to JSONAdapter + client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) + except AttributeError: + # hvac < 0.6.2 doesn't have adapter but uses the same response interface + client = hvac.Client(url=url, token=token) + else: + # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate + if not isinstance(client.adapter, hvac.adapters.Request): + client.adapter = hvac.adapters.Request(base_uri=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/peerstorage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/peerstorage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a8fa60c2a3080d088b8e0abae370aa355c80ec56 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/peerstorage/__init__.py @@ -0,0 +1,267 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import six + +from charmhelpers.core.hookenv import relation_id as current_relation_id +from charmhelpers.core.hookenv import ( + is_relation_made, + relation_ids, + relation_get as _relation_get, + local_unit, + relation_set as _relation_set, + leader_get as _leader_get, + leader_set, + is_leader, +) + + +""" +This helper provides functions to support use of a peer relation +for basic key/value storage, with the added benefit that all storage +can be replicated across peer units. + +Requirement to use: + +To use this, the "peer_echo()" method has to be called form the peer +relation's relation-changed hook: + +@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name +def cluster_relation_changed(): + peer_echo() + +Once this is done, you can use peer storage from anywhere: + +@hooks.hook("some-hook") +def some_hook(): + # You can store and retrieve key/values this way: + if is_relation_made("cluster"): # from charmhelpers.core.hookenv + # There are peers available so we can work with peer storage + peer_store("mykey", "myvalue") + value = peer_retrieve("mykey") + print value + else: + print "No peers joind the relation, cannot share key/values :(" +""" + + +def leader_get(attribute=None, rid=None): + """Wrapper to ensure that settings are migrated from the peer relation. + + This is to support upgrading an environment that does not support + Juju leadership election to one that does. + + If a setting is not extant in the leader-get but is on the relation-get + peer rel, it is migrated and marked as such so that it is not re-migrated. + """ + migration_key = '__leader_get_migrated_settings__' + if not is_leader(): + return _leader_get(attribute=attribute) + + settings_migrated = False + leader_settings = _leader_get(attribute=attribute) + previously_migrated = _leader_get(attribute=migration_key) + + if previously_migrated: + migrated = set(json.loads(previously_migrated)) + else: + migrated = set([]) + + try: + if migration_key in leader_settings: + del leader_settings[migration_key] + except TypeError: + pass + + if attribute: + if attribute in migrated: + return leader_settings + + # If attribute not present in leader db, check if this unit has set + # the attribute in the peer relation + if not leader_settings: + peer_setting = _relation_get(attribute=attribute, unit=local_unit(), + rid=rid) + if peer_setting: + leader_set(settings={attribute: peer_setting}) + leader_settings = peer_setting + + if leader_settings: + settings_migrated = True + migrated.add(attribute) + else: + r_settings = _relation_get(unit=local_unit(), rid=rid) + if r_settings: + for key in set(r_settings.keys()).difference(migrated): + # Leader setting wins + if not leader_settings.get(key): + leader_settings[key] = r_settings[key] + + settings_migrated = True + migrated.add(key) + + if settings_migrated: + leader_set(**leader_settings) + + if migrated and settings_migrated: + migrated = json.dumps(list(migrated)) + leader_set(settings={migration_key: migrated}) + + return leader_settings + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Attempt to use leader-set if supported in the current version of Juju, + otherwise falls back on relation-set. + + Note that we only attempt to use leader-set if the provided relation_id is + a peer relation id or no relation id is provided (in which case we assume + we are within the peer relation context). + """ + try: + if relation_id in relation_ids('cluster'): + return leader_set(settings=relation_settings, **kwargs) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_set(relation_id=relation_id, + relation_settings=relation_settings, **kwargs) + + +def relation_get(attribute=None, unit=None, rid=None): + """Attempt to use leader-get if supported in the current version of Juju, + otherwise falls back on relation-get. + + Note that we only attempt to use leader-get if the provided rid is a peer + relation id or no relation id is provided (in which case we assume we are + within the peer relation context). + """ + try: + if rid in relation_ids('cluster'): + return leader_get(attribute, rid) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_get(attribute=attribute, rid=rid, unit=unit) + + +def peer_retrieve(key, relation_name='cluster'): + """Retrieve a named key from peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + return relation_get(attribute=key, rid=cluster_rid, + unit=local_unit()) + else: + raise ValueError('Unable to detect' + 'peer relation {}'.format(relation_name)) + + +def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_', + inc_list=None, exc_list=None): + """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """ + inc_list = inc_list if inc_list else [] + exc_list = exc_list if exc_list else [] + peerdb_settings = peer_retrieve('-', relation_name=relation_name) + matched = {} + if peerdb_settings is None: + return matched + for k, v in peerdb_settings.items(): + full_prefix = prefix + delimiter + if k.startswith(full_prefix): + new_key = k.replace(full_prefix, '') + if new_key in exc_list: + continue + if new_key in inc_list or len(inc_list) == 0: + matched[new_key] = v + return matched + + +def peer_store(key, value, relation_name='cluster'): + """Store the key/value pair on the named peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + relation_set(relation_id=cluster_rid, + relation_settings={key: value}) + else: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(relation_name)) + + +def peer_echo(includes=None, force=False): + """Echo filtered attributes back onto the same relation for storage. + + This is a requirement to use the peerstorage module - it needs to be called + from the peer relation's changed hook. + + If Juju leader support exists this will be a noop unless force is True. + """ + try: + is_leader() + except NotImplementedError: + pass + else: + if not force: + return # NOOP if leader-election is supported + + # Use original non-leader calls + relation_get = _relation_get + relation_set = _relation_set + + rdata = relation_get() + echo_data = {} + if includes is None: + echo_data = rdata.copy() + for ex in ['private-address', 'public-address']: + if ex in echo_data: + echo_data.pop(ex) + else: + for attribute, value in six.iteritems(rdata): + for include in includes: + if include in attribute: + echo_data[attribute] = value + if len(echo_data) > 0: + relation_set(relation_settings=echo_data) + + +def peer_store_and_set(relation_id=None, peer_relation_name='cluster', + peer_store_fatal=False, relation_settings=None, + delimiter='_', **kwargs): + """Store passed-in arguments both in argument relation and in peer storage. + + It functions like doing relation_set() and peer_store() at the same time, + with the same data. + + @param relation_id: the id of the relation to store the data on. Defaults + to the current relation. + @param peer_store_fatal: Set to True, the function will raise an exception + should the peer storage not be available.""" + + relation_settings = relation_settings if relation_settings else {} + relation_set(relation_id=relation_id, + relation_settings=relation_settings, + **kwargs) + if is_relation_made(peer_relation_name): + for key, value in six.iteritems(dict(list(kwargs.items()) + + list(relation_settings.items()))): + key_prefix = relation_id or current_relation_id() + peer_store(key_prefix + delimiter + key, + value, + relation_name=peer_relation_name) + else: + if peer_store_fatal: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(peer_relation_name)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/python.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/python.py new file mode 100644 index 0000000000000000000000000000000000000000..84cba8c4eba34fdd705f4ee39628ebd33b5175a2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/python.py @@ -0,0 +1,21 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/saltstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/saltstack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d74f4039379b608b5c076fd96c906ff5237f1f83 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/saltstack/__init__.py @@ -0,0 +1,116 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Charm Helpers saltstack - declare the state of your machines. + +This helper enables you to declare your machine state, rather than +program it procedurally (and have to test each change to your procedures). +Your install hook can be as simple as:: + + {{{ + from charmhelpers.contrib.saltstack import ( + install_salt_support, + update_machine_state, + ) + + + def install(): + install_salt_support() + update_machine_state('machine_states/dependencies.yaml') + update_machine_state('machine_states/installed.yaml') + }}} + +and won't need to change (nor will its tests) when you change the machine +state. + +It's using a python package called salt-minion which allows various formats for +specifying resources, such as:: + + {{{ + /srv/{{ basedir }}: + file.directory: + - group: ubunet + - user: ubunet + - require: + - user: ubunet + - recurse: + - user + - group + + ubunet: + group.present: + - gid: 1500 + user.present: + - uid: 1500 + - gid: 1500 + - createhome: False + - require: + - group: ubunet + }}} + +The docs for all the different state definitions are at: + http://docs.saltstack.com/ref/states/all/ + + +TODO: + * Add test helpers which will ensure that machine state definitions + are functionally (but not necessarily logically) correct (ie. getting + salt to parse all state defs. + * Add a link to a public bootstrap charm example / blogpost. + * Find a way to obviate the need to use the grains['charm_dir'] syntax + in templates. +""" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +import subprocess + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv + + +salt_grains_path = '/etc/salt/grains' + + +def install_salt_support(from_ppa=True): + """Installs the salt-minion helper for machine state. + + By default the salt-minion package is installed from + the saltstack PPA. If from_ppa is False you must ensure + that the salt-minion package is available in the apt cache. + """ + if from_ppa: + subprocess.check_call([ + '/usr/bin/add-apt-repository', + '--yes', + 'ppa:saltstack/salt', + ]) + subprocess.check_call(['/usr/bin/apt-get', 'update']) + # We install salt-common as salt-minion would run the salt-minion + # daemon. + charmhelpers.fetch.apt_install('salt-common') + + +def update_machine_state(state_path): + """Update the machine state using the provided state declaration.""" + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + salt_grains_path) + subprocess.check_call([ + 'salt-call', + '--local', + 'state.template', + state_path, + ]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ssl/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ssl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d238b529e44ad2761d86e96b4845589f8e951c4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ssl/__init__.py @@ -0,0 +1,92 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from charmhelpers.core import hookenv + + +def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None): + """Generate selfsigned SSL keypair + + You must provide one of the 3 optional arguments: + config, subject or cn + If more than one is provided the leftmost will be used + + Arguments: + keyfile -- (required) full path to the keyfile to be created + certfile -- (required) full path to the certfile to be created + keysize -- (optional) SSL key length + config -- (optional) openssl configuration file + subject -- (optional) dictionary with SSL subject variables + cn -- (optional) cerfificate common name + + Required keys in subject dict: + cn -- Common name (eq. FQDN) + + Optional keys in subject dict + country -- Country Name (2 letter code) + state -- State or Province Name (full name) + locality -- Locality Name (eg, city) + organization -- Organization Name (eg, company) + organizational_unit -- Organizational Unit Name (eg, section) + email -- Email Address + """ + + cmd = [] + if config: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-config", config] + elif subject: + ssl_subject = "" + if "country" in subject: + ssl_subject = ssl_subject + "/C={}".format(subject["country"]) + if "state" in subject: + ssl_subject = ssl_subject + "/ST={}".format(subject["state"]) + if "locality" in subject: + ssl_subject = ssl_subject + "/L={}".format(subject["locality"]) + if "organization" in subject: + ssl_subject = ssl_subject + "/O={}".format(subject["organization"]) + if "organizational_unit" in subject: + ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"]) + if "cn" in subject: + ssl_subject = ssl_subject + "/CN={}".format(subject["cn"]) + else: + hookenv.log("When using \"subject\" argument you must " + "provide \"cn\" field at very least") + return False + if "email" in subject: + ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"]) + + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", ssl_subject] + elif cn: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", "/CN={}".format(cn)] + + if not cmd: + hookenv.log("No config, subject or cn provided," + "unable to generate self signed SSL certificates") + return False + try: + subprocess.check_call(cmd) + return True + except Exception as e: + print("Execution of openssl command failed:\n{}".format(e)) + return False diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ssl/service.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ssl/service.py new file mode 100644 index 0000000000000000000000000000000000000000..06b534ffa3b0de97a56bac4060c39a9a4485b0c2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/ssl/service.py @@ -0,0 +1,277 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from os.path import join as path_join +from os.path import exists +import subprocess + +from charmhelpers.core.hookenv import log, DEBUG + +STD_CERT = "standard" + +# Mysql server is fairly picky about cert creation +# and types, spec its creation separately for now. +MYSQL_CERT = "mysql" + + +class ServiceCA(object): + + default_expiry = str(365 * 2) + default_ca_expiry = str(365 * 6) + + def __init__(self, name, ca_dir, cert_type=STD_CERT): + self.name = name + self.ca_dir = ca_dir + self.cert_type = cert_type + + ############### + # Hook Helper API + @staticmethod + def get_ca(type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca') + ca = ServiceCA(service_name, ca_path, type) + ca.init() + return ca + + @classmethod + def get_service_cert(cls, type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca = cls.get_ca() + crt, key = ca.get_or_create_cert(service_name) + return crt, key, ca.get_ca_bundle() + + ############### + + def init(self): + log("initializing service ca", level=DEBUG) + if not exists(self.ca_dir): + self._init_ca_dir(self.ca_dir) + self._init_ca() + + @property + def ca_key(self): + return path_join(self.ca_dir, 'private', 'cacert.key') + + @property + def ca_cert(self): + return path_join(self.ca_dir, 'cacert.pem') + + @property + def ca_conf(self): + return path_join(self.ca_dir, 'ca.cnf') + + @property + def signing_conf(self): + return path_join(self.ca_dir, 'signing.cnf') + + def _init_ca_dir(self, ca_dir): + os.mkdir(ca_dir) + for i in ['certs', 'crl', 'newcerts', 'private']: + sd = path_join(ca_dir, i) + if not exists(sd): + os.mkdir(sd) + + if not exists(path_join(ca_dir, 'serial')): + with open(path_join(ca_dir, 'serial'), 'w') as fh: + fh.write('02\n') + + if not exists(path_join(ca_dir, 'index.txt')): + with open(path_join(ca_dir, 'index.txt'), 'w') as fh: + fh.write('') + + def _init_ca(self): + """Generate the root ca's cert and key. + """ + if not exists(path_join(self.ca_dir, 'ca.cnf')): + with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh: + fh.write( + CA_CONF_TEMPLATE % (self.get_conf_variables())) + + if not exists(path_join(self.ca_dir, 'signing.cnf')): + with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh: + fh.write( + SIGNING_CONF_TEMPLATE % (self.get_conf_variables())) + + if exists(self.ca_cert) or exists(self.ca_key): + raise RuntimeError("Initialized called when CA already exists") + cmd = ['openssl', 'req', '-config', self.ca_conf, + '-x509', '-nodes', '-newkey', 'rsa', + '-days', self.default_ca_expiry, + '-keyout', self.ca_key, '-out', self.ca_cert, + '-outform', 'PEM'] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + log("CA Init:\n %s" % output, level=DEBUG) + + def get_conf_variables(self): + return dict( + org_name="juju", + org_unit_name="%s service" % self.name, + common_name=self.name, + ca_dir=self.ca_dir) + + def get_or_create_cert(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + return self.create_certificate(common_name) + + def create_certificate(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name) + self._create_certificate(common_name, key_p, csr_p, crt_p) + return self.get_certificate(common_name) + + def get_certificate(self, common_name): + if common_name not in self: + raise ValueError("No certificate for %s" % common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + with open(crt_p) as fh: + crt = fh.read() + with open(key_p) as fh: + key = fh.read() + return crt, key + + def __contains__(self, common_name): + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + return exists(crt_p) + + def _create_certificate(self, common_name, key_p, csr_p, crt_p): + template_vars = self.get_conf_variables() + template_vars['common_name'] = common_name + subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % ( + template_vars) + + log("CA Create Cert %s" % common_name, level=DEBUG) + cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048', + '-nodes', '-days', self.default_expiry, + '-keyout', key_p, '-out', csr_p, '-subj', subj] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + log("CA Sign Cert %s" % common_name, level=DEBUG) + if self.cert_type == MYSQL_CERT: + cmd = ['openssl', 'x509', '-req', + '-in', csr_p, '-days', self.default_expiry, + '-CA', self.ca_cert, '-CAkey', self.ca_key, + '-set_serial', '01', '-out', crt_p] + else: + cmd = ['openssl', 'ca', '-config', self.signing_conf, + '-extensions', 'req_extensions', + '-days', self.default_expiry, '-notext', + '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch'] + log("running %s" % " ".join(cmd), level=DEBUG) + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + def get_ca_bundle(self): + with open(self.ca_cert) as fh: + return fh.read() + + +CA_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s Certificate Authority + + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign +""" + + +SIGNING_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s machine resources +commonName = %(common_name)s + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +""" diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/bcache.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/bcache.py new file mode 100644 index 0000000000000000000000000000000000000000..605991e16a4238f4ed46fbf0791048187f375c5c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/bcache.py @@ -0,0 +1,74 @@ +# Copyright 2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import json + +from charmhelpers.core.hookenv import log + +stats_intervals = ['stats_day', 'stats_five_minute', + 'stats_hour', 'stats_total'] + +SYSFS = '/sys' + + +class Bcache(object): + """Bcache behaviour + """ + + def __init__(self, cachepath): + self.cachepath = cachepath + + @classmethod + def fromdevice(cls, devname): + return cls('{}/block/{}/bcache'.format(SYSFS, devname)) + + def __str__(self): + return self.cachepath + + def get_stats(self, interval): + """Get cache stats + """ + intervaldir = 'stats_{}'.format(interval) + path = "{}/{}".format(self.cachepath, intervaldir) + out = dict() + for elem in os.listdir(path): + out[elem] = open('{}/{}'.format(path, elem)).read().strip() + return out + + +def get_bcache_fs(): + """Return all cache sets + """ + cachesetroot = "{}/fs/bcache".format(SYSFS) + try: + dirs = os.listdir(cachesetroot) + except OSError: + log("No bcache fs found") + return [] + cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) + return cacheset + + +def get_stats_action(cachespec, interval): + """Action for getting bcache statistics for a given cachespec. + Cachespec can either be a device name, eg. 'sdb', which will retrieve + cache stats for the given device, or 'global', which will retrieve stats + for all cachesets + """ + if cachespec == 'global': + caches = get_bcache_fs() + else: + caches = [Bcache.fromdevice(cachespec)] + res = dict((c.cachepath, c.get_stats(interval)) for c in caches) + return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/ceph.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 0000000000000000000000000000000000000000..814d5c72bc246c9536d3bb5975ade1c7fbe989e4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,1810 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import collections +import errno +import hashlib +import math +import six + +import os +import shutil +import json +import time +import uuid + +from subprocess import ( + check_call, + check_output, + CalledProcessError, +) +from charmhelpers.core.hookenv import ( + config, + service_name, + local_unit, + relation_get, + relation_ids, + relation_set, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, + cmp_pkgrevno, +) +from charmhelpers.fetch import ( + apt_install, +) +from charmhelpers.core.unitdata import kv + +from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} +""" + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 + + +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + +class OSDSettingConflict(Exception): + """Error class for conflicting osd setting requests.""" + pass + + +class OSDSettingNotAllowed(Exception): + """Error class for a disallowed setting.""" + pass + + +OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) + +OSD_SETTING_WHITELIST = [ + 'osd heartbeat grace', + 'osd heartbeat interval', +] + + +def _order_dict_by_key(rdict): + """Convert a dictionary into an OrderedDict sorted by key. + + :param rdict: Dictionary to be ordered. + :type rdict: dict + :returns: Ordered Dictionary. + :rtype: collections.OrderedDict + """ + return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) + + +def get_osd_settings(relation_name): + """Consolidate requested osd settings from all clients. + + Consolidate requested osd settings from all clients. Check that the + requested setting is on the whitelist and it does not conflict with + any other requested settings. + + :returns: Dictionary of settings + :rtype: dict + + :raises: OSDSettingNotAllowed + :raises: OSDSettingConflict + """ + rel_ids = relation_ids(relation_name) + osd_settings = {} + for relid in rel_ids: + for unit in related_units(relid): + unit_settings = relation_get('osd-settings', unit, relid) or '{}' + unit_settings = json.loads(unit_settings) + for key, value in unit_settings.items(): + if key not in OSD_SETTING_WHITELIST: + msg = 'Illegal settings "{}"'.format(key) + raise OSDSettingNotAllowed(msg) + if key in osd_settings: + if osd_settings[key] != unit_settings[key]: + msg = 'Conflicting settings for "{}"'.format(key) + raise OSDSettingConflict(msg) + else: + osd_settings[key] = value + return _order_dict_by_key(osd_settings) + + +def send_osd_settings(): + """Pass on requested OSD settings to osd units.""" + try: + settings = get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + # There is a problem with the settings, not passing them on. Update + # status will notify the user. + log(e, level=ERROR) + return + data = { + 'osd-settings': json.dumps(settings, sort_keys=True)} + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + relation_settings=data) + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if isinstance(value, six.string_types): + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(self.service, cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if cmp_pkgrevno('ceph-common', '10.1') >= 0: + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) + # Flush the cache and wait for it to return + check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. + :return: int. The number of pgs to use. + """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. + validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count + osd_list = get_osds(self.service, device_class) + expected = config('expected-osd-count') or 0 + + if osd_list: + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if not device_class and expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2 ** exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) + else: + return int(nearest) + + +class ReplicatedPool(Pool): + def __init__(self, service, name, pg_num=None, replicas=2, + percent_data=10.0, app_name=None): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + self.percent_data = percent_data + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) + else: + self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' + + def create(self): + if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + + try: + check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name), level=WARNING) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e), level=WARNING) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default", + percent_data=10.0, app_name=None): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + + try: + check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name), level=WARNING) + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e), level=WARNING) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}" + .format(str(e))) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]).decode('UTF-8') + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ + try: + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): + """ + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None, + device_class=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :param device_class: six.string_types + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return name in out.split() + + +def get_osds(service, device_class=None): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str + """ + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] + check_call(cmd) + + update_pool(service, name, settings={'size': str(replicas)}) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ + keyring = _keyring_path(service) + if os.path.exists(keyring): + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" + hosts = [] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + add_key(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key + """ + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + add_key(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, api_version=1, request_id=None): + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] + + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None, + object_prefix_permissions=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} + """ + self.add_op({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) + + def add_op_create_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def request_id(self): + return self.rsp.get('request-id') + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request, relation='ceph'): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids(relation): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request, relation='ceph'): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=relation): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids(relation): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) + + +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + +def is_broker_action_done(action, rid=None, unit=None): + """Check whether broker action has completed yet. + + @param action: name of action to be performed + @returns True if action complete otherwise False + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return False + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + val = kvstore.get(key=key) + if val and val == rsp.request_id: + return True + + return False + + +def mark_broker_action_done(action, rid=None, unit=None): + """Mark action as having been completed. + + @param action: name of action to be performed + @returns None + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + kvstore.set(key=key, value=rsp.request_id) + kvstore.flush() + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if not isinstance(conf, dict): + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + return ceph_conf + + +class CephOSDConfContext(CephConfContext): + """Ceph config (ceph.conf) context. + + Consolidates settings from config-flags via CephConfContext with + settings provided by the mons. The config-flag values are preserved in + conf['osd'], settings from the mons which do not clash with config-flag + settings are in conf['osd_from_client'] and finally settings which do + clash are in conf['osd_from_client_conflict']. Rather than silently drop + the conflicting settings they are provided in the context so they can be + rendered commented out to give some visability to the admin. + """ + + def __init__(self, permitted_sections=None): + super(CephOSDConfContext, self).__init__( + permitted_sections=permitted_sections) + try: + self.settings_from_mons = get_osd_settings('mon') + except OSDSettingConflict: + log( + "OSD settings from mons are inconsistent, ignoring them", + level=WARNING) + self.settings_from_mons = {} + + def filter_osd_from_mon_settings(self): + """Filter settings from client relation against config-flags. + + :returns: A tuple ( + ,config-flag values, + ,client settings which do not conflict with config-flag values, + ,client settings which confilct with config-flag values) + :rtype: (OrderedDict, OrderedDict, OrderedDict) + """ + ceph_conf = super(CephOSDConfContext, self).__call__() + conflicting_entries = {} + clear_entries = {} + for key, value in self.settings_from_mons.items(): + if key in ceph_conf.get('osd', {}): + if ceph_conf['osd'][key] != value: + conflicting_entries[key] = value + else: + clear_entries[key] = value + clear_entries = _order_dict_by_key(clear_entries) + conflicting_entries = _order_dict_by_key(conflicting_entries) + return ceph_conf, clear_entries, conflicting_entries + + def __call__(self): + """Construct OSD config context. + + Standard context with two additional special keys. + osd_from_client_conflict: client settings which confilct with + config-flag values + osd_from_client: settings which do not conflict with config-flag + values + + :returns: OSD config context dict. + :rtype: dict + """ + conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() + conf['osd_from_client_conflict'] = osd_conflict + conf['osd_from_client'] = osd_clear + return conf diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/loopback.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 0000000000000000000000000000000000000000..74bab40e43a978e3d9e1e2f9c8975368092145c0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,92 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + or: + + /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) + + +def is_mapped_loopback_device(device): + """ + Checks if a given device name is an existing/mapped loopback device. + :param device: str: Full path to the device (eg, /dev/loop1). + :returns: str: Path to the backing file if is a loopback device + empty string otherwise + """ + return loopback_devices().get(device, "") diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/lvm.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 0000000000000000000000000000000000000000..c8bde69263f0e917d32d0e5d70abba1409b26012 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,182 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a35617606cf52d7cffc04ac245811b770fd95e8e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,128 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from stat import S_ISBLK + +from subprocess import ( + CalledProcessError, + check_call, + check_output, + call +) + + +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + try: + out = check_output(['lsblk', '-P', device]).decode('UTF-8') + except Exception: + return False + return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False, inode_size=1024): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', "size={}".format(inode_size), device] + check_call(cmd) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/contexts.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/contexts.py new file mode 100644 index 0000000000000000000000000000000000000000..c1adf94b133f41a168727a3cdd3a536633ff62b3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/contexts.py @@ -0,0 +1,137 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +"""A helper to create a yaml cache of config with namespaced relation data.""" +import os +import yaml + +import six + +import charmhelpers.core.hookenv + + +charm_dir = os.environ.get('CHARM_DIR', '') + + +def dict_keys_without_hyphens(a_dict): + """Return the a new dict with underscores instead of hyphens in keys.""" + return dict( + (key.replace('-', '_'), val) for key, val in a_dict.items()) + + +def update_relations(context, namespace_separator=':'): + """Update the context with the relation data.""" + # Add any relation data prefixed with the relation type. + relation_type = charmhelpers.core.hookenv.relation_type() + relations = [] + context['current_relation'] = {} + if relation_type is not None: + relation_data = charmhelpers.core.hookenv.relation_get() + context['current_relation'] = relation_data + # Deprecated: the following use of relation data as keys + # directly in the context will be removed. + relation_data = dict( + ("{relation_type}{namespace_separator}{key}".format( + relation_type=relation_type, + key=key, + namespace_separator=namespace_separator), val) + for key, val in relation_data.items()) + relation_data = dict_keys_without_hyphens(relation_data) + context.update(relation_data) + relations = charmhelpers.core.hookenv.relations_of_type(relation_type) + relations = [dict_keys_without_hyphens(rel) for rel in relations] + + context['relations_full'] = charmhelpers.core.hookenv.relations() + + # the hookenv.relations() data structure is effectively unusable in + # templates and other contexts when trying to access relation data other + # than the current relation. So provide a more useful structure that works + # with any hook. + local_unit = charmhelpers.core.hookenv.local_unit() + relations = {} + for rname, rids in context['relations_full'].items(): + relations[rname] = [] + for rid, rdata in rids.items(): + data = rdata.copy() + if local_unit in rdata: + data.pop(local_unit) + for unit_name, rel_data in data.items(): + new_data = {'__relid__': rid, '__unit__': unit_name} + new_data.update(rel_data) + relations[rname].append(new_data) + context['relations'] = relations + + +def juju_state_to_yaml(yaml_path, namespace_separator=':', + allow_hyphens_in_keys=True, mode=None): + """Update the juju config and state in a yaml file. + + This includes any current relation-get data, and the charm + directory. + + This function was created for the ansible and saltstack + support, as those libraries can use a yaml file to supply + context to templates, but it may be useful generally to + create and update an on-disk cache of all the config, including + previous relation data. + + By default, hyphens are allowed in keys as this is supported + by yaml, but for tools like ansible, hyphens are not valid [1]. + + [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name + """ + config = charmhelpers.core.hookenv.config() + + # Add the charm_dir which we will need to refer to charm + # file resources etc. + config['charm_dir'] = charm_dir + config['local_unit'] = charmhelpers.core.hookenv.local_unit() + config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip() + config['unit_public_address'] = charmhelpers.core.hookenv.unit_get( + 'public-address' + ) + + # Don't use non-standard tags for unicode which will not + # work when salt uses yaml.load_safe. + yaml.add_representer(six.text_type, + lambda dumper, value: dumper.represent_scalar( + six.u('tag:yaml.org,2002:str'), value)) + + yaml_dir = os.path.dirname(yaml_path) + if not os.path.exists(yaml_dir): + os.makedirs(yaml_dir) + + if os.path.exists(yaml_path): + with open(yaml_path, "r") as existing_vars_file: + existing_vars = yaml.load(existing_vars_file.read()) + else: + with open(yaml_path, "w+"): + pass + existing_vars = {} + + if mode is not None: + os.chmod(yaml_path, mode) + + if not allow_hyphens_in_keys: + config = dict_keys_without_hyphens(config) + existing_vars.update(config) + + update_relations(existing_vars, namespace_separator) + + with open(yaml_path, "w+") as fp: + fp.write(yaml.dump(existing_vars, default_flow_style=False)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/jinja.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/jinja.py new file mode 100644 index 0000000000000000000000000000000000000000..38d4fba0e651399064a14112398a0ef43717f5cd --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/jinja.py @@ -0,0 +1,38 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Templating using the python-jinja2 package. +""" +import six +from charmhelpers.fetch import apt_install, apt_update +try: + import jinja2 +except ImportError: + apt_update(fatal=True) + if six.PY3: + apt_install(["python3-jinja2"], fatal=True) + else: + apt_install(["python-jinja2"], fatal=True) + import jinja2 + + +DEFAULT_TEMPLATES_DIR = 'templates' + + +def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/pyformat.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/pyformat.py new file mode 100644 index 0000000000000000000000000000000000000000..51a24dc42379fbee88d8952c578fc2498c852b0b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/templating/pyformat.py @@ -0,0 +1,27 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Templating using standard Python str.format() method. +''' + +from charmhelpers.core import hookenv + + +def render(template, extra={}, **kwargs): + """Return the template rendered using Python's str.format().""" + context = hookenv.execution_environment() + context.update(extra) + context.update(kwargs) + return template.format(**context) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/unison/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/unison/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61409b14c843766aa10cb957bb1c3d83ba87c6a7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/contrib/unison/__init__.py @@ -0,0 +1,314 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Easy file synchronization among peer units using ssh + unison. +# +# For the -joined, -changed, and -departed peer relations, add a call to +# ssh_authorized_peers() describing the peer relation and the desired +# user + group. After all peer relations have settled, all hosts should +# be able to connect to on another via key auth'd ssh as the specified user. +# +# Other hooks are then free to synchronize files and directories using +# sync_to_peers(). +# +# For a peer relation named 'cluster', for example: +# +# cluster-relation-joined: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-changed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-departed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# Hooks are now free to sync files as easily as: +# +# files = ['/etc/fstab', '/etc/apt.conf.d/'] +# sync_to_peers(peer_interface='cluster', +# user='juju_ssh, paths=[files]) +# +# It is assumed the charm itself has setup permissions on each unit +# such that 'juju_ssh' has read + write permissions. Also assumed +# that the calling charm takes care of leader delegation. +# +# Additionally files can be synchronized only to an specific unit: +# sync_to_peer(slave_address, user='juju_ssh', +# paths=[files], verbose=False) + +import os +import pwd + +from copy import copy +from subprocess import check_call, check_output + +from charmhelpers.core.host import ( + adduser, + add_user_to_group, + pwgen, + remove_password_expiry, +) + +from charmhelpers.core.hookenv import ( + log, + hook_name, + relation_ids, + related_units, + relation_set, + relation_get, + unit_private_ip, + INFO, + ERROR, +) + +BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false', + '-fastcheck=true', '-group=false', '-owner=false', + '-prefer=newer', '-times=true'] + + +def get_homedir(user): + try: + user = pwd.getpwnam(user) + return user.pw_dir + except KeyError: + log('Could not get homedir for user %s: user exists?' % (user), ERROR) + raise Exception + + +def create_private_key(user, priv_key_path, key_type='rsa'): + types_bits = { + 'rsa': '2048', + 'ecdsa': '521', + } + if key_type not in types_bits: + log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR) + key_type = 'rsa' + if not os.path.isfile(priv_key_path): + log('Generating new SSH key for user %s.' % user) + cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type, + '-b', types_bits[key_type], '-f', priv_key_path] + check_call(cmd) + else: + log('SSH key already exists at %s.' % priv_key_path) + check_call(['chown', user, priv_key_path]) + check_call(['chmod', '0600', priv_key_path]) + + +def create_public_key(user, priv_key_path, pub_key_path): + if not os.path.isfile(pub_key_path): + log('Generating missing ssh public key @ %s.' % pub_key_path) + cmd = ['ssh-keygen', '-y', '-f', priv_key_path] + p = check_output(cmd).strip() + with open(pub_key_path, 'wb') as out: + out.write(p) + check_call(['chown', user, pub_key_path]) + + +def get_keypair(user): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + priv_key = os.path.join(ssh_dir, 'id_rsa') + pub_key = '%s.pub' % priv_key + + if not os.path.isdir(ssh_dir): + os.mkdir(ssh_dir) + check_call(['chown', '-R', user, ssh_dir]) + + create_private_key(user, priv_key) + create_public_key(user, priv_key, pub_key) + + with open(priv_key, 'r') as p: + _priv = p.read().strip() + + with open(pub_key, 'r') as p: + _pub = p.read().strip() + + return (_priv, _pub) + + +def write_authorized_keys(user, keys): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + auth_keys = os.path.join(ssh_dir, 'authorized_keys') + log('Syncing authorized_keys @ %s.' % auth_keys) + with open(auth_keys, 'w') as out: + for k in keys: + out.write('%s\n' % k) + + +def write_known_hosts(user, hosts): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + known_hosts = os.path.join(ssh_dir, 'known_hosts') + khosts = [] + for host in hosts: + cmd = ['ssh-keyscan', host] + remote_key = check_output(cmd, universal_newlines=True).strip() + khosts.append(remote_key) + log('Syncing known_hosts @ %s.' % known_hosts) + with open(known_hosts, 'w') as out: + for host in khosts: + out.write('%s\n' % host) + + +def ensure_user(user, group=None): + adduser(user, pwgen()) + if group: + add_user_to_group(user, group) + # Remove password expiry (Bug #1686085) + remove_password_expiry(user) + + +def ssh_authorized_peers(peer_interface, user, group=None, + ensure_local_user=False): + """ + Main setup function, should be called from both peer -changed and -joined + hooks with the same parameters. + """ + if ensure_local_user: + ensure_user(user, group) + priv_key, pub_key = get_keypair(user) + hook = hook_name() + if hook == '%s-relation-joined' % peer_interface: + relation_set(ssh_pub_key=pub_key) + elif hook == '%s-relation-changed' % peer_interface or \ + hook == '%s-relation-departed' % peer_interface: + hosts = [] + keys = [] + + for r_id in relation_ids(peer_interface): + for unit in related_units(r_id): + ssh_pub_key = relation_get('ssh_pub_key', + rid=r_id, + unit=unit) + priv_addr = relation_get('private-address', + rid=r_id, + unit=unit) + if ssh_pub_key: + keys.append(ssh_pub_key) + hosts.append(priv_addr) + else: + log('ssh_authorized_peers(): ssh_pub_key ' + 'missing for unit %s, skipping.' % unit) + write_authorized_keys(user, keys) + write_known_hosts(user, hosts) + authed_hosts = ':'.join(hosts) + relation_set(ssh_authorized_hosts=authed_hosts) + + +def _run_as_user(user, gid=None): + try: + user = pwd.getpwnam(user) + except KeyError: + log('Invalid user: %s' % user) + raise Exception + uid = user.pw_uid + gid = gid or user.pw_gid + os.environ['HOME'] = user.pw_dir + + def _inner(): + os.setgid(gid) + os.setuid(uid) + return _inner + + +def run_as_user(user, cmd, gid=None): + return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/') + + +def collect_authed_hosts(peer_interface): + '''Iterate through the units on peer interface to find all that + have the calling host in its authorized hosts list''' + hosts = [] + for r_id in (relation_ids(peer_interface) or []): + for unit in related_units(r_id): + private_addr = relation_get('private-address', + rid=r_id, unit=unit) + authed_hosts = relation_get('ssh_authorized_hosts', + rid=r_id, unit=unit) + + if not authed_hosts: + log('Peer %s has not authorized *any* hosts yet, skipping.' % + (unit), level=INFO) + continue + + if unit_private_ip() in authed_hosts.split(':'): + hosts.append(private_addr) + else: + log('Peer %s has not authorized *this* host yet, skipping.' % + (unit), level=INFO) + return hosts + + +def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync path to an specific peer host + + Propagates exception if operation fails and fatal=True. + """ + cmd = cmd or copy(BASE_CMD) + if not verbose: + cmd.append('-silent') + + # removing trailing slash from directory paths, unison + # doesn't like these. + if path.endswith('/'): + path = path[:(len(path) - 1)] + + cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] + + try: + log('Syncing local path %s to %s@%s:%s' % (path, user, host, path)) + run_as_user(user, cmd, gid) + except Exception: + log('Error syncing remote files') + if fatal: + raise + + +def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync paths to an specific peer host + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for p in paths: + sync_path_to_host(p, host, user, verbose, cmd, gid, fatal) + + +def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None, + gid=None, fatal=False): + """Sync all hosts to an specific path + + The type of group is integer, it allows user has permissions to + operate a directory have a different group id with the user id. + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for host in collect_authed_hosts(peer_interface): + sync_to_peer(host, user, paths, verbose, cmd, gid, fatal) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/coordinator.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/coordinator.py new file mode 100644 index 0000000000000000000000000000000000000000..59bee3e5b320f6bffa0d89eef7a20888cddb0521 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/coordinator.py @@ -0,0 +1,606 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +The coordinator module allows you to use Juju's leadership feature to +coordinate operations between units of a service. + +Behavior is defined in subclasses of coordinator.BaseCoordinator. +One implementation is provided (coordinator.Serial), which allows an +operation to be run on a single unit at a time, on a first come, first +served basis. You can trivially define more complex behavior by +subclassing BaseCoordinator or Serial. + +:author: Stuart Bishop + + +Services Framework Usage +======================== + +Ensure a peers relation is defined in metadata.yaml. Instantiate a +BaseCoordinator subclass before invoking ServiceManager.manage(). +Ensure that ServiceManager.manage() is wired up to the leader-elected, +leader-settings-changed, peers relation-changed and peers +relation-departed hooks in addition to any other hooks you need, or your +service will deadlock. + +Ensure calls to acquire() are guarded, so that locks are only requested +when they are really needed (and thus hooks only triggered when necessary). +Failing to do this and calling acquire() unconditionally will put your unit +into a hook loop. Calls to granted() do not need to be guarded. + +For example:: + + from charmhelpers.core import hookenv, services + from charmhelpers import coordinator + + def maybe_restart(servicename): + serial = coordinator.Serial() + if needs_restart(): + serial.acquire('restart') + if serial.granted('restart'): + hookenv.service_restart(servicename) + + services = [dict(service='servicename', + data_ready=[maybe_restart])] + + if __name__ == '__main__': + _ = coordinator.Serial() # Must instantiate before manager.manage() + manager = services.ServiceManager(services) + manager.manage() + + +You can implement a similar pattern using a decorator. If the lock has +not been granted, an attempt to acquire() it will be made if the guard +function returns True. If the lock has been granted, the decorated function +is run as normal:: + + from charmhelpers.core import hookenv, services + from charmhelpers import coordinator + + serial = coordinator.Serial() # Global, instatiated on module import. + + def needs_restart(): + [ ... Introspect state. Return True if restart is needed ... ] + + @serial.require('restart', needs_restart) + def maybe_restart(servicename): + hookenv.service_restart(servicename) + + services = [dict(service='servicename', + data_ready=[maybe_restart])] + + if __name__ == '__main__': + manager = services.ServiceManager(services) + manager.manage() + + +Traditional Usage +================= + +Ensure a peers relation is defined in metadata.yaml. + +If you are using charmhelpers.core.hookenv.Hooks, ensure that a +BaseCoordinator subclass is instantiated before calling Hooks.execute. + +If you are not using charmhelpers.core.hookenv.Hooks, ensure +that a BaseCoordinator subclass is instantiated and its handle() +method called at the start of all your hooks. + +For example:: + + import sys + from charmhelpers.core import hookenv + from charmhelpers import coordinator + + hooks = hookenv.Hooks() + + def maybe_restart(): + serial = coordinator.Serial() + if serial.granted('restart'): + hookenv.service_restart('myservice') + + @hooks.hook + def config_changed(): + update_config() + serial = coordinator.Serial() + if needs_restart(): + serial.acquire('restart'): + maybe_restart() + + # Cluster hooks must be wired up. + @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') + def cluster_relation_changed(): + maybe_restart() + + # Leader hooks must be wired up. + @hooks.hook('leader-elected', 'leader-settings-changed') + def leader_settings_changed(): + maybe_restart() + + [ ... repeat for *all* other hooks you are using ... ] + + if __name__ == '__main__': + _ = coordinator.Serial() # Must instantiate before execute() + hooks.execute(sys.argv) + + +You can also use the require decorator. If the lock has not been granted, +an attempt to acquire() it will be made if the guard function returns True. +If the lock has been granted, the decorated function is run as normal:: + + from charmhelpers.core import hookenv + + hooks = hookenv.Hooks() + serial = coordinator.Serial() # Must instantiate before execute() + + @require('restart', needs_restart) + def maybe_restart(): + hookenv.service_restart('myservice') + + @hooks.hook('install', 'config-changed', 'upgrade-charm', + # Peers and leader hooks must be wired up. + 'cluster-relation-changed', 'cluster-relation-departed', + 'leader-elected', 'leader-settings-changed') + def default_hook(): + [...] + maybe_restart() + + if __name__ == '__main__': + hooks.execute() + + +Details +======= + +A simple API is provided similar to traditional locking APIs. A lock +may be requested using the acquire() method, and the granted() method +may be used do to check if a lock previously requested by acquire() has +been granted. It doesn't matter how many times acquire() is called in a +hook. + +Locks are released at the end of the hook they are acquired in. This may +be the current hook if the unit is leader and the lock is free. It is +more likely a future hook (probably leader-settings-changed, possibly +the peers relation-changed or departed hook, potentially any hook). + +Whenever a charm needs to perform a coordinated action it will acquire() +the lock and perform the action immediately if acquisition is +successful. It will also need to perform the same action in every other +hook if the lock has been granted. + + +Grubby Details +-------------- + +Why do you need to be able to perform the same action in every hook? +If the unit is the leader, then it may be able to grant its own lock +and perform the action immediately in the source hook. If the unit is +the leader and cannot immediately grant the lock, then its only +guaranteed chance of acquiring the lock is in the peers relation-joined, +relation-changed or peers relation-departed hooks when another unit has +released it (the only channel to communicate to the leader is the peers +relation). If the unit is not the leader, then it is unlikely the lock +is granted in the source hook (a previous hook must have also made the +request for this to happen). A non-leader is notified about the lock via +leader settings. These changes may be visible in any hook, even before +the leader-settings-changed hook has been invoked. Or the requesting +unit may be promoted to leader after making a request, in which case the +lock may be granted in leader-elected or in a future peers +relation-changed or relation-departed hook. + +This could be simpler if leader-settings-changed was invoked on the +leader. We could then never grant locks except in +leader-settings-changed hooks giving one place for the operation to be +performed. Unfortunately this is not the case with Juju 1.23 leadership. + +But of course, this doesn't really matter to most people as most people +seem to prefer the Services Framework or similar reset-the-world +approaches, rather than the twisty maze of attempting to deduce what +should be done based on what hook happens to be running (which always +seems to evolve into reset-the-world anyway when the charm grows beyond +the trivial). + +I chose not to implement a callback model, where a callback was passed +to acquire to be executed when the lock is granted, because the callback +may become invalid between making the request and the lock being granted +due to an upgrade-charm being run in the interim. And it would create +restrictions, such no lambdas, callback defined at the top level of a +module, etc. Still, we could implement it on top of what is here, eg. +by adding a defer decorator that stores a pickle of itself to disk and +have BaseCoordinator unpickle and execute them when the locks are granted. +''' +from datetime import datetime +from functools import wraps +import json +import os.path + +from six import with_metaclass + +from charmhelpers.core import hookenv + + +# We make BaseCoordinator and subclasses singletons, so that if we +# need to spill to local storage then only a single instance does so, +# rather than having multiple instances stomp over each other. +class Singleton(type): + _instances = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, + **kwargs) + return cls._instances[cls] + + +class BaseCoordinator(with_metaclass(Singleton, object)): + relid = None # Peer relation-id, set by __init__ + relname = None + + grants = None # self.grants[unit][lock] == timestamp + requests = None # self.requests[unit][lock] == timestamp + + def __init__(self, relation_key='coordinator', peer_relation_name=None): + '''Instatiate a Coordinator. + + Data is stored on the peers relation and in leadership storage + under the provided relation_key. + + The peers relation is identified by peer_relation_name, and defaults + to the first one found in metadata.yaml. + ''' + # Most initialization is deferred, since invoking hook tools from + # the constructor makes testing hard. + self.key = relation_key + self.relname = peer_relation_name + hookenv.atstart(self.initialize) + + # Ensure that handle() is called, without placing that burden on + # the charm author. They still need to do this manually if they + # are not using a hook framework. + hookenv.atstart(self.handle) + + def initialize(self): + if self.requests is not None: + return # Already initialized. + + assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+' + + if self.relname is None: + self.relname = _implicit_peer_relation_name() + + relids = hookenv.relation_ids(self.relname) + if relids: + self.relid = sorted(relids)[0] + + # Load our state, from leadership, the peer relationship, and maybe + # local state as a fallback. Populates self.requests and self.grants. + self._load_state() + self._emit_state() + + # Save our state if the hook completes successfully. + hookenv.atexit(self._save_state) + + # Schedule release of granted locks for the end of the hook. + # This needs to be the last of our atexit callbacks to ensure + # it will be run first when the hook is complete, because there + # is no point mutating our state after it has been saved. + hookenv.atexit(self._release_granted) + + def acquire(self, lock): + '''Acquire the named lock, non-blocking. + + The lock may be granted immediately, or in a future hook. + + Returns True if the lock has been granted. The lock will be + automatically released at the end of the hook in which it is + granted. + + Do not mindlessly call this method, as it triggers a cascade of + hooks. For example, if you call acquire() every time in your + peers relation-changed hook you will end up with an infinite loop + of hooks. It should almost always be guarded by some condition. + ''' + unit = hookenv.local_unit() + ts = self.requests[unit].get(lock) + if not ts: + # If there is no outstanding request on the peers relation, + # create one. + self.requests.setdefault(lock, {}) + self.requests[unit][lock] = _timestamp() + self.msg('Requested {}'.format(lock)) + + # If the leader has granted the lock, yay. + if self.granted(lock): + self.msg('Acquired {}'.format(lock)) + return True + + # If the unit making the request also happens to be the + # leader, it must handle the request now. Even though the + # request has been stored on the peers relation, the peers + # relation-changed hook will not be triggered. + if hookenv.is_leader(): + return self.grant(lock, unit) + + return False # Can't acquire lock, yet. Maybe next hook. + + def granted(self, lock): + '''Return True if a previously requested lock has been granted''' + unit = hookenv.local_unit() + ts = self.requests[unit].get(lock) + if ts and self.grants.get(unit, {}).get(lock) == ts: + return True + return False + + def requested(self, lock): + '''Return True if we are in the queue for the lock''' + return lock in self.requests[hookenv.local_unit()] + + def request_timestamp(self, lock): + '''Return the timestamp of our outstanding request for lock, or None. + + Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute. + ''' + ts = self.requests[hookenv.local_unit()].get(lock, None) + if ts is not None: + return datetime.strptime(ts, _timestamp_format) + + def handle(self): + if not hookenv.is_leader(): + return # Only the leader can grant requests. + + self.msg('Leader handling coordinator requests') + + # Clear our grants that have been released. + for unit in self.grants.keys(): + for lock, grant_ts in list(self.grants[unit].items()): + req_ts = self.requests.get(unit, {}).get(lock) + if req_ts != grant_ts: + # The request timestamp does not match the granted + # timestamp. Several hooks on 'unit' may have run + # before the leader got a chance to make a decision, + # and 'unit' may have released its lock and attempted + # to reacquire it. This will change the timestamp, + # and we correctly revoke the old grant putting it + # to the end of the queue. + ts = datetime.strptime(self.grants[unit][lock], + _timestamp_format) + del self.grants[unit][lock] + self.released(unit, lock, ts) + + # Grant locks + for unit in self.requests.keys(): + for lock in self.requests[unit]: + self.grant(lock, unit) + + def grant(self, lock, unit): + '''Maybe grant the lock to a unit. + + The decision to grant the lock or not is made for $lock + by a corresponding method grant_$lock, which you may define + in a subclass. If no such method is defined, the default_grant + method is used. See Serial.default_grant() for details. + ''' + if not hookenv.is_leader(): + return False # Not the leader, so we cannot grant. + + # Set of units already granted the lock. + granted = set() + for u in self.grants: + if lock in self.grants[u]: + granted.add(u) + if unit in granted: + return True # Already granted. + + # Ordered list of units waiting for the lock. + reqs = set() + for u in self.requests: + if u in granted: + continue # In the granted set. Not wanted in the req list. + for _lock, ts in self.requests[u].items(): + if _lock == lock: + reqs.add((ts, u)) + queue = [t[1] for t in sorted(reqs)] + if unit not in queue: + return False # Unit has not requested the lock. + + # Locate custom logic, or fallback to the default. + grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant) + + if grant_func(lock, unit, granted, queue): + # Grant the lock. + self.msg('Leader grants {} to {}'.format(lock, unit)) + self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock] + return True + + return False + + def released(self, unit, lock, timestamp): + '''Called on the leader when it has released a lock. + + By default, does nothing but log messages. Override if you + need to perform additional housekeeping when a lock is released, + for example recording timestamps. + ''' + interval = _utcnow() - timestamp + self.msg('Leader released {} from {}, held {}'.format(lock, unit, + interval)) + + def require(self, lock, guard_func, *guard_args, **guard_kw): + """Decorate a function to be run only when a lock is acquired. + + The lock is requested if the guard function returns True. + + The decorated function is called if the lock has been granted. + """ + def decorator(f): + @wraps(f) + def wrapper(*args, **kw): + if self.granted(lock): + self.msg('Granted {}'.format(lock)) + return f(*args, **kw) + if guard_func(*guard_args, **guard_kw) and self.acquire(lock): + return f(*args, **kw) + return None + return wrapper + return decorator + + def msg(self, msg): + '''Emit a message. Override to customize log spam.''' + hookenv.log('coordinator.{} {}'.format(self._name(), msg), + level=hookenv.INFO) + + def _name(self): + return self.__class__.__name__ + + def _load_state(self): + self.msg('Loading state') + + # All responses must be stored in the leadership settings. + # The leader cannot use local state, as a different unit may + # be leader next time. Which is fine, as the leadership + # settings are always available. + self.grants = json.loads(hookenv.leader_get(self.key) or '{}') + + local_unit = hookenv.local_unit() + + # All requests must be stored on the peers relation. This is + # the only channel units have to communicate with the leader. + # Even the leader needs to store its requests here, as a + # different unit may be leader by the time the request can be + # granted. + if self.relid is None: + # The peers relation is not available. Maybe we are early in + # the units's lifecycle. Maybe this unit is standalone. + # Fallback to using local state. + self.msg('No peer relation. Loading local state') + self.requests = {local_unit: self._load_local_state()} + else: + self.requests = self._load_peer_state() + if local_unit not in self.requests: + # The peers relation has just been joined. Update any state + # loaded from our peers with our local state. + self.msg('New peer relation. Merging local state') + self.requests[local_unit] = self._load_local_state() + + def _emit_state(self): + # Emit this units lock status. + for lock in sorted(self.requests[hookenv.local_unit()].keys()): + if self.granted(lock): + self.msg('Granted {}'.format(lock)) + else: + self.msg('Waiting on {}'.format(lock)) + + def _save_state(self): + self.msg('Publishing state') + if hookenv.is_leader(): + # sort_keys to ensure stability. + raw = json.dumps(self.grants, sort_keys=True) + hookenv.leader_set({self.key: raw}) + + local_unit = hookenv.local_unit() + + if self.relid is None: + # No peers relation yet. Fallback to local state. + self.msg('No peer relation. Saving local state') + self._save_local_state(self.requests[local_unit]) + else: + # sort_keys to ensure stability. + raw = json.dumps(self.requests[local_unit], sort_keys=True) + hookenv.relation_set(self.relid, relation_settings={self.key: raw}) + + def _load_peer_state(self): + requests = {} + units = set(hookenv.related_units(self.relid)) + units.add(hookenv.local_unit()) + for unit in units: + raw = hookenv.relation_get(self.key, unit, self.relid) + if raw: + requests[unit] = json.loads(raw) + return requests + + def _local_state_filename(self): + # Include the class name. We allow multiple BaseCoordinator + # subclasses to be instantiated, and they are singletons, so + # this avoids conflicts (unless someone creates and uses two + # BaseCoordinator subclasses with the same class name, so don't + # do that). + return '.charmhelpers.coordinator.{}'.format(self._name()) + + def _load_local_state(self): + fn = self._local_state_filename() + if os.path.exists(fn): + with open(fn, 'r') as f: + return json.load(f) + return {} + + def _save_local_state(self, state): + fn = self._local_state_filename() + with open(fn, 'w') as f: + json.dump(state, f) + + def _release_granted(self): + # At the end of every hook, release all locks granted to + # this unit. If a hook neglects to make use of what it + # requested, it will just have to make the request again. + # Implicit release is the only way this will work, as + # if the unit is standalone there may be no future triggers + # called to do a manual release. + unit = hookenv.local_unit() + for lock in list(self.requests[unit].keys()): + if self.granted(lock): + self.msg('Released local {} lock'.format(lock)) + del self.requests[unit][lock] + + +class Serial(BaseCoordinator): + def default_grant(self, lock, unit, granted, queue): + '''Default logic to grant a lock to a unit. Unless overridden, + only one unit may hold the lock and it will be granted to the + earliest queued request. + + To define custom logic for $lock, create a subclass and + define a grant_$lock method. + + `unit` is the unit name making the request. + + `granted` is the set of units already granted the lock. It will + never include `unit`. It may be empty. + + `queue` is the list of units waiting for the lock, ordered by time + of request. It will always include `unit`, but `unit` is not + necessarily first. + + Returns True if the lock should be granted to `unit`. + ''' + return unit == queue[0] and not granted + + +def _implicit_peer_relation_name(): + md = hookenv.metadata() + assert 'peers' in md, 'No peer relations in metadata.yaml' + return sorted(md['peers'].keys())[0] + + +# A human readable, sortable UTC timestamp format. +_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ' + + +def _utcnow(): # pragma: no cover + # This wrapper exists as mocking datetime methods is problematic. + return datetime.utcnow() + + +def _timestamp(): + return _utcnow().strftime(_timestamp_format) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/decorators.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad41ee4121f4c0816935f8b16cd84f972aff22b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/files.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/files.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd82b75709c13da0d534bf4962822984a3c1867 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/fstab.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/fstab.py new file mode 100644 index 0000000000000000000000000000000000000000..d9fa9152c765c538adad3fd9bc45a46018c89b72 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/hookenv.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/hookenv.py new file mode 100644 index 0000000000000000000000000000000000000000..db7ce7282b4c96c8a33abf309a340377216922ec --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/hookenv.py @@ -0,0 +1,1613 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from enum import Enum +from functools import wraps +from collections import namedtuple +import glob +import os +import json +import yaml +import re +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +from charmhelpers import deprecate + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +TRACE = "TRACE" +MARKER = object() +SH_MAX_ARG = 131071 + + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + + +class WORKLOAD_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ + return local_unit().split('/')[0] + + +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + if not md: + continue + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path) and os.stat(self.path).st_size: + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Found but was unable to parse previous config data, ' + 'ignoring which will report all values as changed - {}' + .format(str(e)), level=ERROR) + return + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +_cache_config = None + + +def config(scope=None): + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] + try: + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) + if scope is not None: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _port_op('open-port', port, protocol) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _port_op('close-port', port, protocol) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +class NoNetworkBinding(Exception): + pass + + +def charm_dir(): + """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d + return os.environ.get('CHARM_DIR') + + +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + +@cached +@deprecate("moved to function_get()", log=log) +def action_get(key=None): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) +def action_set(values): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) +def action_fail(message): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. + + The results set by action_set are preserved. + """ + subprocess.check_call(['action-fail', message]) + + +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + +def status_set(workload_state, message, application=False): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message instead. + + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set + """ + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + + cmd = ['status-set'] + if application: + cmd.append('--application') + cmd.extend([workload_state.value, message]) + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state.value, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Deprecated since Juju 2.3; use network_get() + + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response + + +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if request not supported by the Juju version. + """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + return yaml.safe_load(response) + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host.py new file mode 100644 index 0000000000000000000000000000000000000000..b33ac906d9eeb198210dceb069724ac9a35652ea --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host.py @@ -0,0 +1,1104 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log, INFO, DEBUG, local_unit, charm_name +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + CompareHostReleases, + get_distrib_codename, + arch + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + CompareHostReleases, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + service('mask', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('unmask', service_name) + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid, existing_perms = None, None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership or permissions. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile(r'^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output + + +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/centos.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/centos.py new file mode 100644 index 0000000000000000000000000000000000000000..7781a3961f23ce0b161ae08b11710466af8de814 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,72 @@ +import subprocess +import yum +import os + +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 0000000000000000000000000000000000000000..3edc0687275b29762b45ebf3fe1b045bc9f568b2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,116 @@ +import subprocess + +from charmhelpers.core.hookenv import cached +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', + 'artful', + 'bionic', + 'cosmic', + 'disco', + 'eoan', + 'focal' +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + from charmhelpers.fetch import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/hugepage.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/hugepage.py new file mode 100644 index 0000000000000000000000000000000000000000..54b5b5e2fcf81eea5f2ebfbceb620ea68d725584 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..e01f4f8ba73ee0d5ab7553740c2590a50e42f96d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/centos.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 0000000000000000000000000000000000000000..1c402c1157900ff1ad5c6c296a409c9e8fb96d2b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 0000000000000000000000000000000000000000..3de372fd3df38fe151cf79243f129cb504516f22 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61fd074edc09de434859e48ae1b36baef0503708 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/base.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/base.py new file mode 100644 index 0000000000000000000000000000000000000000..179ad4f0c367dd6b13c10b201c3752d1c8daf05e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/base.py @@ -0,0 +1,362 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' + if event_name == 'start': + hookenv.open_port(port, protocol) + elif event_name == 'stop': + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/helpers.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..3e6e30d2fe0d9c73ffdc42d70b77e864b6379c53 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/strutils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/strutils.py new file mode 100644 index 0000000000000000000000000000000000000000..e8df0452f8203b53947eb137eed22d85ff62dff0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/strutils.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as bytes" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: + """ + return self._list[self.index] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/sysctl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/sysctl.py new file mode 100644 index 0000000000000000000000000000000000000000..386428d619bc38edf02dc088bf7ec32767c0ab94 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/sysctl.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call, CalledProcessError + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, + WARNING, +) + +from charmhelpers.core.host import is_container + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file, ignore=False): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool + :returns: None + """ + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), + level=DEBUG) + + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/templating.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/templating.py new file mode 100644 index 0000000000000000000000000000000000000000..9014015c14ee0b48c775562cd4f0d30884944439 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/templating.py @@ -0,0 +1,93 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + config_template may be provided to render from a provided template instead + of loading from a file. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/unitdata.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/unitdata.py new file mode 100644 index 0000000000000000000000000000000000000000..ab554327b343f896880523fc627c1abea84be29a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/core/unitdata.py @@ -0,0 +1,525 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except Exception: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0cc7fc850a0632568ad78aae9716be718c9ff6b5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/__init__.py @@ -0,0 +1,209 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +from charmhelpers.osplatform import get_platform +from yaml import safe_load +from charmhelpers.core.hookenv import ( + config, + log, +) + +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', +) + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) + +filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge +add_source = fetch.add_source + +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold + import_key = fetch.import_key + get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env +elif __platform__ == "centos": + yum_search = fetch.yum_search + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The fragment needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). + + Example config: + install_sources: | + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: | + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, six.string_types): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) + else: + if isinstance(keys, six.string_types): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) + if update: + _fetch_update(fatal=True) + + +def install_remote(source, *args, **kwargs): + """Install a file tree from a remote source. + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + for handler in handlers: + try: + return handler.install(source, *args, **kwargs) + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') + raise UnhandledSource("No handler found for source {}".format(source)) + + +def install_from_config(config_var_name): + """Install a file from config.""" + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr( + importlib.import_module(package), + classname) + plugin_list.append(handler_class()) + except NotImplementedError: + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) + return plugin_list diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/archiveurl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/archiveurl.py new file mode 100644 index 0000000000000000000000000000000000000000..d25587adeff102c3fc9e402f98746fccbd8a3693 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,165 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import hashlib +import re + +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs + + +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ + # propagate all exceptions + # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse(source) + if proto in ('http', 'https'): + auth, barehost = splituser(netloc) + if auth is not None: + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) + try: + with open(dest, 'wb') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + options = parse_qs(url_parts.fragment) + for key, value in options.items(): + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/bzrurl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/bzrurl.py new file mode 100644 index 0000000000000000000000000000000000000000..c4ab3ff1e6bc7dde24e8ed568a3dc0c6012ddea6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,76 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from subprocess import STDOUT, check_output +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource, + filter_installed_packages, + install, +) +from charmhelpers.core.host import mkdir + + +if filter_installed_packages(['bzr']) != []: + install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') + + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs.""" + + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): + return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) + else: + return True + + def branch(self, source, dest, revno=None): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + cmd_opts = [] + if revno: + cmd_opts += ['-r', str(revno)] + if os.path.exists(dest): + cmd = ['bzr', 'pull'] + cmd += cmd_opts + cmd += ['--overwrite', '-d', dest, source] + else: + cmd = ['bzr', 'branch'] + cmd += cmd_opts + cmd += [source, dest] + check_output(cmd, stderr=STDOUT) + + def install(self, source, dest=None, revno=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + + if dest and not os.path.exists(dest): + mkdir(dest, perms=0o755) + + try: + self.branch(source, dest_dir, revno) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/centos.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/centos.py new file mode 100644 index 0000000000000000000000000000000000000000..a91dcff0645ed541a79cd72af3112bdff393719a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file.name]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/giturl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/giturl.py new file mode 100644 index 0000000000000000000000000000000000000000..070ca9bb5c1a2fdef39f88606ffcaf39bb049410 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/giturl.py @@ -0,0 +1,69 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from subprocess import check_output, CalledProcessError, STDOUT +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource, + filter_installed_packages, + install, +) + +if filter_installed_packages(['git']) != []: + install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs.""" + + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git', ''): + return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) + else: + return True + + def clone(self, source, dest, branch="master", depth=None): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] + else: + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_output(cmd, stderr=STDOUT) + + def install(self, source, branch="master", dest=None, depth=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + try: + self.clone(source, dest_dir, branch, depth) + except CalledProcessError as e: + raise UnhandledSource(e) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bff99dc93c64f80716e2d5a2b6d0d4e8a2436955 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/debug.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..757135ee4cf3b5ff4c02305126f5ca3940892afc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.fetch.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski " + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except Exception: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/packages.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/packages.py new file mode 100644 index 0000000000000000000000000000000000000000..6e95028bc540aace84a2ec6c1bcc4de2663e8a87 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/packages.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import subprocess +import sys + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import charm_dir, log + +__author__ = "Jorge Niedbalski " + + +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if not value: + continue + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, venv=None, + constraints=None, **options): + """Install a python package""" + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] + + available_options = ('proxy', 'src', 'log', 'index-url', ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if constraints: + command.extend(['-c', constraints]) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/rpdb.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/rpdb.py new file mode 100644 index 0000000000000000000000000000000000000000..9b31610c22fc2d24fe5097016cf45728f87de4ae --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin " +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/version.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/version.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb421036ff737f8ff1684e85ff87703e30fe543 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski " + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/snap.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/snap.py new file mode 100644 index 0000000000000000000000000000000000000000..fc70aa941bc4f0bb5ff126237db65705b9e4a10a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/snap.py @@ -0,0 +1,150 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +import os +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg ' + +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] + + +class CouldNotAcquireLockException(Exception): + pass + + +class InvalidSnapChannel(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/ubuntu.py new file mode 100644 index 0000000000000000000000000000000000000000..3ddaf0dd47f23cef60d7b7e59a83c989999d9f3f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,805 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import platform +import re +import six +import subprocess +import sys +import time + +from charmhelpers.core.host import get_distrib_codename, get_system_env + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, + env_proxy_settings, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', + # Ocata + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'xenial-ocata': 'xenial-updates/ocata', + 'xenial-ocata/updates': 'xenial-updates/ocata', + 'xenial-updates/ocata': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/proposed': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', + # Pike + 'pike': 'xenial-updates/pike', + 'xenial-pike': 'xenial-updates/pike', + 'xenial-pike/updates': 'xenial-updates/pike', + 'xenial-updates/pike': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/proposed': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', + # Queens + 'queens': 'xenial-updates/queens', + 'xenial-queens': 'xenial-updates/queens', + 'xenial-queens/updates': 'xenial-updates/queens', + 'xenial-updates/queens': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/proposed': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', +} + + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def apt_update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def import_key(key): + """Import an ASCII Armor key. + + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) + :raises: GPGKeyError if the key could not be imported + """ + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("SECURELY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) + + +def add_source(source, key=None, fail_invalid=False): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds -proposed to the debs [2] + 'ppa:': add-apt-repository --yes + 'deb ': add-apt-repository --yes deb + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:': add-apt-repository -yes cloud-archive: + 'cloud:[-staging]': specify a Cloud Archive pocket with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu + to /etc/apt/sources.list.d/cloud-archive-list + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:, the is not a + valid pocket in CLOUD_ARCHIVE_POCKETS + """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + ]) + if source is None: + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + break + else: + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses get_distrib_codename to determine the correct stanza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = get_distrib_codename() + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + :type spec: str + """ + if '{series}' in spec: + series = get_distrib_codename() + spec = spec.replace('{series}', series) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = get_distrib_codename() + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) + + +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. + + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] + """ + env = get_apt_dpkg_env() + if cmd_env: + env.update(cmd_env) + + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) + + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + """ + if fatal: + _run_with_retries( + cmd, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") + else: + subprocess.call(cmd, env=get_apt_dpkg_env()) + + +def get_upstream_version(package): + """Determine upstream version based on installed package + + @returns None (if not installed) or the upstream version + """ + cache = apt_cache() + try: + pkg = cache[package] + except Exception: + # the package is unknown to the current apt cache. + return None + + if not pkg.current_ver: + # package is known, but no version is currently installed. + return None + + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/ubuntu_apt_pkg.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 0000000000000000000000000000000000000000..929a75d7a775921c32368e8cc7950eaa97390047 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,267 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess +import sys + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/osplatform.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/osplatform.py new file mode 100644 index 0000000000000000000000000000000000000000..78c81af5955caee51271c830d58cacac2cab9bcc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/osplatform.py @@ -0,0 +1,46 @@ +import platform +import os + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee55cb3d2baddb556df910f1d41638c3c7f39c59 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Tools for working with files injected into a charm just before deployment." diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/archive.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/archive.py new file mode 100644 index 0000000000000000000000000000000000000000..7fc453f523cf49f0c123839a347c4452c6d465ca --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/archive.py @@ -0,0 +1,71 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import tarfile +import zipfile +from charmhelpers.core import ( + host, + hookenv, +) + + +class ArchiveError(Exception): + pass + + +def get_archive_handler(archive_name): + if os.path.isfile(archive_name): + if tarfile.is_tarfile(archive_name): + return extract_tarfile + elif zipfile.is_zipfile(archive_name): + return extract_zipfile + else: + # look at the file name + for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'): + if archive_name.endswith(ext): + return extract_tarfile + for ext in ('.zip', '.jar'): + if archive_name.endswith(ext): + return extract_zipfile + + +def archive_dest_default(archive_name): + archive_file = os.path.basename(archive_name) + return os.path.join(hookenv.charm_dir(), "archives", archive_file) + + +def extract(archive_name, destpath=None): + handler = get_archive_handler(archive_name) + if handler: + if not destpath: + destpath = archive_dest_default(archive_name) + if not os.path.isdir(destpath): + host.mkdir(destpath) + handler(archive_name, destpath) + return destpath + else: + raise ArchiveError("No handler for archive") + + +def extract_tarfile(archive_name, destpath): + "Unpack a tar archive, optionally compressed" + archive = tarfile.open(archive_name) + archive.extractall(destpath) + + +def extract_zipfile(archive_name, destpath): + "Unpack a zip file" + archive = zipfile.ZipFile(archive_name) + archive.extractall(destpath) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/execd.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/execd.py new file mode 100644 index 0000000000000000000000000000000000000000..1502aa0b596f0b1a2017ccb4543a35999774431d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/charmhelpers/payload/execd.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_output(submodule_path, stderr=stderr, + universal_newlines=True) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/compat b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/compat new file mode 100644 index 0000000000000000000000000000000000000000..7f8f011eb73d6043d2e6db9d2c101195ae2801f2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/compat @@ -0,0 +1 @@ +7 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/control b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/control new file mode 100644 index 0000000000000000000000000000000000000000..c4992afd94fd22c6449365362dd96bc53ae6d404 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/control @@ -0,0 +1,20 @@ +Source: charmhelpers +Maintainer: Matthew Wedgwood +Section: python +Priority: optional +Build-Depends: python-all (>= 2.6.6-3), debhelper (>= 7) +Standards-Version: 3.9.1 + +Package: python-charmhelpers +Architecture: all +Depends: ${misc:Depends}, ${python:Depends} +Description: UNKNOWN + ============ + CharmHelpers + ============ + . + CharmHelpers provides an opinionated set of tools for building Juju + charms that work together. In addition to basic tasks like interact- + ing with the charm environment and the machine it runs on, it also + helps keep you build hooks and establish relations effortlessly. + . diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/rules b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/rules new file mode 100755 index 0000000000000000000000000000000000000000..7f2101b365a870adf54c565999245eedcc416810 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/rules @@ -0,0 +1,9 @@ +#!/usr/bin/make -f + +# This file was automatically generated by stdeb 0.6.0+git at +# Fri, 04 Jan 2013 15:14:11 -0600 + +%: + dh $@ --with python2 --buildsystem=python_distutils + + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/source/format b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/source/format new file mode 100644 index 0000000000000000000000000000000000000000..163aaf8d82b6c54f23c45f32895dbdfdcc27b047 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/Makefile b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..17396a1fddc3e317e197d7b9d73a6eace214b065 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/Makefile @@ -0,0 +1,177 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/CharmHelpers.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/CharmHelpers.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/CharmHelpers" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/CharmHelpers" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/_extensions/automembersummary.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/_extensions/automembersummary.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1e707ab4422d738ae006b2c9d34647148da4c5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/_extensions/automembersummary.py @@ -0,0 +1,84 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect + +from docutils.parsers.rst import directives +from sphinx.ext.autosummary import Autosummary +from sphinx.ext.autosummary import get_import_prefixes_from_env +from sphinx.ext.autosummary import import_by_name + + +class AutoMemberSummary(Autosummary): + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = False + has_content = True + option_spec = { + 'toctree': directives.unchanged, + 'nosignatures': directives.flag, + 'template': directives.unchanged, + } + + def get_items(self, names): + env = self.state.document.settings.env + prefixes = get_import_prefixes_from_env(env) + + items = [] + prefix = '' + shorten = '' + + def _get_items(name): + _items = super(AutoMemberSummary, self).get_items([shorten + name]) + for dn, sig, summary, rn in _items: + items.append(('%s%s' % (prefix, dn), sig, summary, rn)) + + for name in names: + if '~' in name: + prefix, name = name.split('~') + shorten = '~' + else: + prefix = '' + shorten = '' + + try: + real_name, obj, parent, _ = import_by_name(name, prefixes=prefixes) + except ImportError: + self.warn('failed to import %s' % name) + continue + + if not inspect.ismodule(obj): + _get_items(name) + continue + + for member in dir(obj): + if member.startswith('_'): + continue + mobj = getattr(obj, member) + if hasattr(mobj, '__module__'): + if not mobj.__module__.startswith(real_name): + continue # skip imported classes & functions + elif hasattr(mobj, '__name__'): + if not mobj.__name__.startswith(real_name): + continue # skip imported modules + else: + continue # skip instances + _get_items('%s.%s' % (name, member)) + + return items + + +def setup(app): + app.add_directive('automembersummary', AutoMemberSummary) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.cli.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.cli.rst new file mode 100644 index 0000000000000000000000000000000000000000..749ad249f07b68ba5886cdbdc0c9e8b78d5d16b2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.cli.rst @@ -0,0 +1,24 @@ +charmhelpers.cli package +======================== + +charmhelpers.cli.commands module +-------------------------------- + +.. automodule:: charmhelpers.cli.commands + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.cli.host module +---------------------------- + +.. automodule:: charmhelpers.cli.host + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.cli + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.ansible.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.ansible.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6644937d0f7e2d508e500dd46f33ad6d47fb346 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.ansible.rst @@ -0,0 +1,7 @@ +charmhelpers.contrib.ansible package +==================================== + +.. automodule:: charmhelpers.contrib.ansible + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.charmhelpers.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.charmhelpers.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d5222db297171acf80bce09ec22b47a5611ac39 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.charmhelpers.rst @@ -0,0 +1,7 @@ +charmhelpers.contrib.charmhelpers package +========================================= + +.. automodule:: charmhelpers.contrib.charmhelpers + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.charmsupport.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.charmsupport.rst new file mode 100644 index 0000000000000000000000000000000000000000..5f33aeb027320a708a2980f03cf1d7ae6bc6c893 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.charmsupport.rst @@ -0,0 +1,24 @@ +charmhelpers.contrib.charmsupport package +========================================= + +charmhelpers.contrib.charmsupport.nrpe module +--------------------------------------------- + +.. automodule:: charmhelpers.contrib.charmsupport.nrpe + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.charmsupport.volumes module +------------------------------------------------ + +.. automodule:: charmhelpers.contrib.charmsupport.volumes + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.charmsupport + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.hahelpers.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.hahelpers.rst new file mode 100644 index 0000000000000000000000000000000000000000..129db24def3584ee42ae64de3e87123eed585ca1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.hahelpers.rst @@ -0,0 +1,24 @@ +charmhelpers.contrib.hahelpers package +====================================== + +charmhelpers.contrib.hahelpers.apache module +-------------------------------------------- + +.. automodule:: charmhelpers.contrib.hahelpers.apache + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.hahelpers.cluster module +--------------------------------------------- + +.. automodule:: charmhelpers.contrib.hahelpers.cluster + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.hahelpers + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.network.ovs.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.network.ovs.rst new file mode 100644 index 0000000000000000000000000000000000000000..98ab8cb1e2f42ff434da1b26d1519bf2d440c76f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.network.ovs.rst @@ -0,0 +1,7 @@ +charmhelpers.contrib.network.ovs package +======================================== + +.. automodule:: charmhelpers.contrib.network.ovs + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.network.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.network.rst new file mode 100644 index 0000000000000000000000000000000000000000..c7b89b252b100033a122a1717fd753c21893096f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.network.rst @@ -0,0 +1,20 @@ +charmhelpers.contrib.network package +==================================== + +.. toctree:: + + charmhelpers.contrib.network.ovs + +charmhelpers.contrib.network.ip module +-------------------------------------- + +.. automodule:: charmhelpers.contrib.network.ip + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.network + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.openstack.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.openstack.rst new file mode 100644 index 0000000000000000000000000000000000000000..d969ed62678e227a14640db45e9a851465341762 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.openstack.rst @@ -0,0 +1,52 @@ +charmhelpers.contrib.openstack package +====================================== + +.. toctree:: + + charmhelpers.contrib.openstack.templates + +charmhelpers.contrib.openstack.alternatives module +-------------------------------------------------- + +.. automodule:: charmhelpers.contrib.openstack.alternatives + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.openstack.context module +--------------------------------------------- + +.. automodule:: charmhelpers.contrib.openstack.context + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.openstack.neutron module +--------------------------------------------- + +.. automodule:: charmhelpers.contrib.openstack.neutron + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.openstack.templating module +------------------------------------------------ + +.. automodule:: charmhelpers.contrib.openstack.templating + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.openstack.utils module +------------------------------------------- + +.. automodule:: charmhelpers.contrib.openstack.utils + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.openstack + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.openstack.templates.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.openstack.templates.rst new file mode 100644 index 0000000000000000000000000000000000000000..f9eaa2f8581744fdc42b632f743636f715f149a0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.openstack.templates.rst @@ -0,0 +1,7 @@ +charmhelpers.contrib.openstack.templates package +================================================ + +.. automodule:: charmhelpers.contrib.openstack.templates + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.peerstorage.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.peerstorage.rst new file mode 100644 index 0000000000000000000000000000000000000000..e33e82fc09d84a1c8f29202c913e21ac43d001ac --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.peerstorage.rst @@ -0,0 +1,7 @@ +charmhelpers.contrib.peerstorage package +======================================== + +.. automodule:: charmhelpers.contrib.peerstorage + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.python.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.python.rst new file mode 100644 index 0000000000000000000000000000000000000000..98d6a62dc03df54fa70494f3301f4f793d5143f1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.python.rst @@ -0,0 +1,40 @@ +charmhelpers.contrib.python package +=================================== + +charmhelpers.contrib.python.debug module +---------------------------------------- + +.. automodule:: charmhelpers.contrib.python.debug + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.python.packages module +------------------------------------------- + +.. automodule:: charmhelpers.contrib.python.packages + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.python.rpdb module +--------------------------------------- + +.. automodule:: charmhelpers.contrib.python.rpdb + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.python.version module +------------------------------------------ + +.. automodule:: charmhelpers.contrib.python.version + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.python + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3655431936555d7e3f9b5a1c2ad6aacd299bfc5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.rst @@ -0,0 +1,23 @@ +charmhelpers.contrib package +============================ + +.. toctree:: + + charmhelpers.contrib.ansible + charmhelpers.contrib.charmhelpers + charmhelpers.contrib.charmsupport + charmhelpers.contrib.hahelpers + charmhelpers.contrib.network + charmhelpers.contrib.openstack + charmhelpers.contrib.peerstorage + charmhelpers.contrib.python + charmhelpers.contrib.saltstack + charmhelpers.contrib.ssl + charmhelpers.contrib.storage + charmhelpers.contrib.templating + charmhelpers.contrib.unison + +.. automodule:: charmhelpers.contrib + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.saltstack.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.saltstack.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca14b85d87263152b6f90d7009a8935ca5888002 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.saltstack.rst @@ -0,0 +1,7 @@ +charmhelpers.contrib.saltstack package +====================================== + +.. automodule:: charmhelpers.contrib.saltstack + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.ssl.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.ssl.rst new file mode 100644 index 0000000000000000000000000000000000000000..8b7cde87776fb49c116ea4911dbc0ff5f000a19b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.ssl.rst @@ -0,0 +1,16 @@ +charmhelpers.contrib.ssl package +================================ + +charmhelpers.contrib.ssl.service module +--------------------------------------- + +.. automodule:: charmhelpers.contrib.ssl.service + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.ssl + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.storage.linux.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.storage.linux.rst new file mode 100644 index 0000000000000000000000000000000000000000..5acb07394dab5b5cf7017d4d4f46f693d2023abf --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.storage.linux.rst @@ -0,0 +1,40 @@ +charmhelpers.contrib.storage.linux package +========================================== + +charmhelpers.contrib.storage.linux.ceph module +---------------------------------------------- + +.. automodule:: charmhelpers.contrib.storage.linux.ceph + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.storage.linux.loopback module +-------------------------------------------------- + +.. automodule:: charmhelpers.contrib.storage.linux.loopback + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.storage.linux.lvm module +--------------------------------------------- + +.. automodule:: charmhelpers.contrib.storage.linux.lvm + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.storage.linux.utils module +----------------------------------------------- + +.. automodule:: charmhelpers.contrib.storage.linux.utils + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.storage.linux + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.storage.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.storage.rst new file mode 100644 index 0000000000000000000000000000000000000000..a3adfdfc3ebda31ff24001a2a243ce98249ba5c5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.storage.rst @@ -0,0 +1,11 @@ +charmhelpers.contrib.storage package +==================================== + +.. toctree:: + + charmhelpers.contrib.storage.linux + +.. automodule:: charmhelpers.contrib.storage + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.templating.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.templating.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3eb2a24312eda3c725949ad646977776ee76f0c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.templating.rst @@ -0,0 +1,24 @@ +charmhelpers.contrib.templating package +======================================= + +charmhelpers.contrib.templating.contexts module +----------------------------------------------- + +.. automodule:: charmhelpers.contrib.templating.contexts + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.contrib.templating.pyformat module +----------------------------------------------- + +.. automodule:: charmhelpers.contrib.templating.pyformat + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.contrib.templating + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.unison.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.unison.rst new file mode 100644 index 0000000000000000000000000000000000000000..af3a5254608508cfa253f71e741f4423ffb77c39 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.contrib.unison.rst @@ -0,0 +1,7 @@ +charmhelpers.contrib.unison package +=================================== + +.. automodule:: charmhelpers.contrib.unison + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.coordinator.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.coordinator.rst new file mode 100644 index 0000000000000000000000000000000000000000..13eeb677d5383a681d43b385aee14691bfcccac2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.coordinator.rst @@ -0,0 +1,10 @@ +charmhelpers.coordinator package +================================ + +charmhelpers.coordinator module +------------------------------- + +.. automodule:: charmhelpers.coordinator + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.decorators.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.decorators.rst new file mode 100644 index 0000000000000000000000000000000000000000..5b4fb402a7d03916de39fe39a8ab418f027a1463 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.decorators.rst @@ -0,0 +1,7 @@ +charmhelpers.core.decorators +============================ + +.. automodule:: charmhelpers.core.decorators + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.fstab.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.fstab.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4c9f9488c91c062dac8bf482032fdd03d8a5aaa --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.fstab.rst @@ -0,0 +1,7 @@ +charmhelpers.core.fstab +======================= + +.. automodule:: charmhelpers.core.fstab + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.hookenv.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.hookenv.rst new file mode 100644 index 0000000000000000000000000000000000000000..70a0df8891af1ceb085e4574abbea7409b6a35ca --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.hookenv.rst @@ -0,0 +1,12 @@ +charmhelpers.core.hookenv +========================= + +.. automembersummary:: + :nosignatures: + + ~charmhelpers.core.hookenv + +.. automodule:: charmhelpers.core.hookenv + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.host.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.host.rst new file mode 100644 index 0000000000000000000000000000000000000000..7b6b9f0cf835c61963ab5a190b47720bdef5030c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.host.rst @@ -0,0 +1,12 @@ +charmhelpers.core.host +====================== + +.. automembersummary:: + :nosignatures: + + ~charmhelpers.core.host + +.. automodule:: charmhelpers.core.host + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.rst new file mode 100644 index 0000000000000000000000000000000000000000..0aec7b766b12702c826fe21c41416700c4239945 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.rst @@ -0,0 +1,19 @@ +charmhelpers.core package +========================= + +.. toctree:: + + charmhelpers.core.decorators + charmhelpers.core.fstab + charmhelpers.core.hookenv + charmhelpers.core.host + charmhelpers.core.strutils + charmhelpers.core.sysctl + charmhelpers.core.templating + charmhelpers.core.unitdata + charmhelpers.core.services + +.. automodule:: charmhelpers.core + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.base.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.base.rst new file mode 100644 index 0000000000000000000000000000000000000000..79ef6cb59c3722050b4ccfb744a5b5929589b1c1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.base.rst @@ -0,0 +1,12 @@ +charmhelpers.core.services.base +=============================== + +.. automembersummary:: + :nosignatures: + + ~charmhelpers.core.services.base + +.. automodule:: charmhelpers.core.services.base + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.helpers.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.helpers.rst new file mode 100644 index 0000000000000000000000000000000000000000..ccba56ce1434d86e35d21a3b452fdbe5eeaa8be1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.helpers.rst @@ -0,0 +1,12 @@ +charmhelpers.core.services.helpers +================================== + +.. automembersummary:: + :nosignatures: + + ~charmhelpers.core.services.helpers + +.. automodule:: charmhelpers.core.services.helpers + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.rst new file mode 100644 index 0000000000000000000000000000000000000000..515103c4313ac0f069aea6847911693807f2b8da --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.services.rst @@ -0,0 +1,12 @@ +charmhelpers.core.services +========================== + +.. toctree:: + + charmhelpers.core.services.base + charmhelpers.core.services.helpers + +.. automodule:: charmhelpers.core.services + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.strutils.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.strutils.rst new file mode 100644 index 0000000000000000000000000000000000000000..3b96809fb4540af6079a0400b835617c879cb991 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.strutils.rst @@ -0,0 +1,7 @@ +charmhelpers.core.strutils +============================ + +.. automodule:: charmhelpers.core.strutils + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.sysctl.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.sysctl.rst new file mode 100644 index 0000000000000000000000000000000000000000..45b960fd94dac9c58efe0b7d9b525985e0d97391 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.sysctl.rst @@ -0,0 +1,7 @@ +charmhelpers.core.sysctl +============================ + +.. automodule:: charmhelpers.core.sysctl + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.templating.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.templating.rst new file mode 100644 index 0000000000000000000000000000000000000000..c131d97beafe3d4c0e6bbc0fd679f286ea21f27d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.templating.rst @@ -0,0 +1,7 @@ +charmhelpers.core.templating +============================ + +.. automodule:: charmhelpers.core.templating + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.unitdata.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.unitdata.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b50978de002ccc15fe049abd694615c465fd546 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.core.unitdata.rst @@ -0,0 +1,7 @@ +charmhelpers.core.unitdata +========================== + +.. automodule:: charmhelpers.core.unitdata + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.archiveurl.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.archiveurl.rst new file mode 100644 index 0000000000000000000000000000000000000000..b9d09447755143364143e6bff8d52f53329a0f41 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.archiveurl.rst @@ -0,0 +1,7 @@ +charmhelpers.fetch.archiveurl module +==================================== + +.. automodule:: charmhelpers.fetch.archiveurl + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.bzrurl.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.bzrurl.rst new file mode 100644 index 0000000000000000000000000000000000000000..1be591097884beb4480db1054fb2cd3b01498637 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.bzrurl.rst @@ -0,0 +1,7 @@ +charmhelpers.fetch.bzrurl module +================================ + +.. automodule:: charmhelpers.fetch.bzrurl + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.python.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.python.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed89d428c526fcee5d5f06d273194f4eb03c170c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.python.rst @@ -0,0 +1,7 @@ +charmhelpers.fetch.python module +==================================== + +.. automodule:: charmhelpers.fetch.python + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.rst new file mode 100644 index 0000000000000000000000000000000000000000..6fb42ef2d7e3df3d4643a9bc480734c1c56e6bc3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.rst @@ -0,0 +1,37 @@ +charmhelpers.fetch package +========================== + +.. automodule:: charmhelpers.fetch + :members: + :undoc-members: + :show-inheritance: + + +charmhelpers.fetch.archiveurl module +------------------------------------ + +.. toctree:: + + charmhelpers.fetch.archiveurl + +charmhelpers.fetch.bzrurl module +-------------------------------- + +.. toctree:: + + charmhelpers.fetch.bzrurl + +charmhelpers.fetch.snap module +------------------------------ + +.. toctree:: + + charmhelpers.fetch.snap + +charmhelpers.fetch.python module +------------------------------ + +.. toctree:: + + charmhelpers.fetch.python + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.snap.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.snap.rst new file mode 100644 index 0000000000000000000000000000000000000000..882a88e2dd9a9e844846f3fecda7d21e08f0803f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.fetch.snap.rst @@ -0,0 +1,26 @@ +charmhelpers.fetch.snap package +=============================== + +.. automodule:: charmhelpers.fetch.snap + :members: + :undoc-members: + :show-inheritance: + +Examples +-------- + +.. code-block:: python + + snap_install('hello-world', '--classic', '--stable') + snap_install(['hello-world', 'htop']) + +.. code-block:: python + + snap_refresh('hello-world', '--classic', '--stable') + snap_refresh(['hello-world', 'htop']) + +.. code-block:: python + + snap_remove('hello-world') + snap_remove(['hello-world', 'htop']) + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.payload.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.payload.rst new file mode 100644 index 0000000000000000000000000000000000000000..b1d9607ad115e37806b36ab6d8d54bae665cf917 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.payload.rst @@ -0,0 +1,24 @@ +charmhelpers.payload package +============================ + +charmhelpers.payload.archive module +----------------------------------- + +.. automodule:: charmhelpers.payload.archive + :members: + :undoc-members: + :show-inheritance: + +charmhelpers.payload.execd module +--------------------------------- + +.. automodule:: charmhelpers.payload.execd + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: charmhelpers.payload + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.rst new file mode 100644 index 0000000000000000000000000000000000000000..14266fbe55f19887d4b454a13094852bdbb61b55 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/api/charmhelpers.rst @@ -0,0 +1,17 @@ +API Documentation +================= + +.. toctree:: + :maxdepth: 3 + + charmhelpers.core + charmhelpers.contrib + charmhelpers.fetch + charmhelpers.payload + charmhelpers.cli + charmhelpers.coordinator + +.. automodule:: charmhelpers + :members: + :undoc-members: + :show-inheritance: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/conf.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..f495d5511209971c53b0098178438246a3fee94b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/conf.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- +# +# Charm Helpers documentation build configuration file, created by +# sphinx-quickstart on Fri Jun 6 10:34:44 2014. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('../')) +sys.path.append(os.path.abspath('_extensions/')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'automembersummary', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Charm Helpers' +copyright = u'2014-2018, Canonical Ltd.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +version_file = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../', 'VERSION')) +VERSION = open(version_file).read().strip() +# The short X.Y version. +version = VERSION +# The full version, including alpha/beta/rc tags. +release = VERSION + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build', '_extensions'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ['_themes'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'CharmHelpersdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'CharmHelpers.tex', u'Charm Helpers Documentation', + u'Charm Helpers Developers', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'charmhelpers', u'Charm Helpers Documentation', + [u'Charm Helpers Developers'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'CharmHelpers', u'Charm Helpers Documentation', + u'Charm Helpers Developers', 'CharmHelpers', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/contributing.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/contributing.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed4728b5950eb9128d6536ea34c2647968614389 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/contributing.rst @@ -0,0 +1,49 @@ +Contributing +============ + +All contributions, both code and documentation, are welcome! + +Source +------ + +The source code is located at https://github.com/juju/charm-helpers. To +submit contributions you'll need to create a GitHub account if you do not +already have one. + +To get the code:: + + $ git clone https://github.com/juju/charm-helpers + +To build and run tests:: + + $ cd charm-helpers + $ make + +Submitting a Merge Proposal +--------------------------- + +Run ``make test`` and ensure all tests pass. Then commit your changes to a +`fork `_ and create a +`pull request `_. + +Open Bugs +--------- + +If you're looking for something to work on, the open bug/feature list can be +found at https://bugs.launchpad.net/charm-helpers. + +Documentation +------------- + +If you'd like to contribute to the documentation, please refer to the ``HACKING.md`` +document in the root of the source tree for instructions on building the documentation. + +Contributions to the :doc:`example-index` section of the documentation are +especially welcome, and are easy to add. Simply add a new ``.rst`` file under +``charmhelpers/docs/examples``. + +Getting Help +------------ + +If you need help you can find it in ``#juju`` on the Freenode IRC network. Come +talk to us - we're a friendly bunch! diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/example-index.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/example-index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0251c08a1c8e471081a5007c6faede6d92f2d52 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/example-index.rst @@ -0,0 +1,11 @@ +Examples +======== + +If you'd like to contribute an example (please do!), please refer to the +:doc:`contributing` page for instructions on how to do so. + +.. toctree:: + :maxdepth: 1 + :glob: + + examples/* diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/examples/config.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/examples/config.rst new file mode 100644 index 0000000000000000000000000000000000000000..d6d60895729bd038d161bb458773c0d410238b1b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/examples/config.rst @@ -0,0 +1,88 @@ +Interacting with Charm Configuration +==================================== + +The :func:`charmhelpers.core.hookenv.config`, when called with no arguments, +returns a :class:`charmhelpers.core.hookenv.Config` instance - a dictionary +representation of a charm's ``config.yaml`` file. This object can +be used to: + +* get a charm's current config values +* check if a config value has changed since the last hook invocation +* view the previous value of a changed config item +* save arbitrary key/value data for use in a later hook + +For the following examples we'll assume our charm has a config.yaml file that +looks like this:: + + options: + app-name: + type: string + default: "My App" + description: "Name of your app." + + +Getting charm config values +--------------------------- + +:: + + # hooks/hooks.py + + from charmhelpers.core import hookenv + + hooks = hookenv.Hooks() + + @hooks.hook('install') + def install(): + config = hookenv.config() + + assert config['app-name'] == 'My App' + +Checking if a config value has changed +-------------------------------------- + +Let's say the user changes the ``app-name`` config value at runtime by +executing the following juju command:: + + juju set mycharm app-name="My New App" + +which triggers a ``config-changed`` hook:: + + # hooks/hooks.py + + from charmhelpers.core import hookenv + + hooks = hookenv.Hooks() + + @hooks.hook('config-changed') + def config_changed(): + config = hookenv.config() + + assert config.changed('app-name') + assert config['app-name'] == 'My New App' + assert config.previous('app-name') == 'My App' + +Saving arbitrary key/value data +------------------------------- + +The :class:`Config ` object maybe also be +used to store arbitrary data that you want to persist across hook +invocations:: + + # hooks/hooks.py + + from charmhelpers.core import hookenv + + hooks = hookenv.Hooks() + + @hooks.hook('install') + def install(): + config = hookenv.config() + + config['mykey'] = 'myval' + + @hooks.hook('config-changed') + def config_changed(): + config = hookenv.config() + + assert config['mykey'] == 'myval' diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/examples/services.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/examples/services.rst new file mode 100644 index 0000000000000000000000000000000000000000..96b2971e35913b0ee12505b3af80e250d7f4de61 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/examples/services.rst @@ -0,0 +1,160 @@ +Managing Charms with the Services Framework +=========================================== + +Traditional charm authoring is focused on implementing hooks. That is, +the charm author is thinking in terms of "What hook am I handling; what +does this hook need to do?" However, in most cases, the real question +should be "Do I have the information I need to configure and start this +piece of software and, if so, what are the steps for doing so?" The +services framework tries to bring the focus to the data and the +setup tasks, in the most declarative way possible. + + +Hooks as Data Sources for Service Definitions +--------------------------------------------- + +While the ``install``, ``start``, and ``stop`` hooks clearly represent +state transitions, all of the other hooks are really notifications of +changes in data from external sources, such as config data values in +the case of ``config-changed`` or relation data for any of the +``*-relation-*`` hooks. Moreover, many charms that rely on external +data from config options or relations find themselves needing some +piece of external data before they can even configure and start anything, +and so the ``start`` hook loses its semantic usefulness. + +If data is required from multiple sources, it even becomes impossible to +know which hook will be executing when all required data is available. +(E.g., which relation will be the last to execute; will the required +config option be set before or after all of the relations are available?) +One common solution to this problem is to create "flag files" to track +whether a given bit of data has been observed, but this can get cluttered +quickly and is difficult to understand what conditions lead to which actions. + +When using the services framework, all hooks other than ``install`` +are handled by a single call to :meth:`manager.manage() `. +This can be done with symlinks, or by having a ``definitions.py`` file +containing the service defintions, and every hook can be reduced to:: + + #!/bin/env python + from charmhelpers.core.services import ServiceManager + from definitions import service_definitions + ServiceManager(service_definitions).manage() + +So, what magic goes into ``definitions.py``? + + +Service Definitions Overview +---------------------------- + +The format of service definitions are fully documented in +:class:`~charmhelpers.core.services.base.ServiceManager`, but most commonly +will consist of one or more dictionaries containing four items: the name of +a service being managed, the list of data contexts required before the service +can be configured and started, the list of actions to take when the data +requirements are satisfied, and list of ports to open. The service name +generally maps to an Upstart job, the required data contexts are ``dict`` +or ``dict``-like structures that contain the data once available (usually +subclasses of :class:`~charmhelpers.core.services.helpers.RelationContext` +or wrappers around :func:`charmhelpers.core.hookenv.config`), and the actions +are just callbacks that are passed the service name for which they are executing +(or a subclass of :class:`~charmhelpers.core.services.base.ManagerCallback` +for more complex cases). + +An example service definition might be:: + + service_definitions = [ + { + 'service': 'wordpress', + 'ports': [80], + 'required_data': [config(), MySQLRelation()], + 'data_ready': [ + actions.install_frontend, + services.render_template(source='wp-config.php.j2', + target=os.path.join(WP_INSTALL_DIR, 'wp-config.php')) + services.render_template(source='wordpress.upstart.j2', + target='/etc/init/wordpress'), + ], + }, + ] + +Each time a hook is fired, the conditions will be checked (in this case, just +that MySQL is available) and, if met, the appropriate actions taken (correct +front-end installed, config files written / updated, and the Upstart job +(re)started, implicitly). + + +Required Data Contexts +---------------------- + +Required data contexts are, at the most basic level, are just dictionaries, +and if they evaluate as True (e.g., if the contain data), their condition is +considered to be met. A simple sentinal could just be a function that returns +data if available or an empty ``dict`` otherwise. + +For the common case of gathering data from relations, the +:class:`~charmhelpers.core.services.helpers.RelationContext` base class gathers +data from a named relation and checks for a set of required keys to be present +and set on the relation before considering that relation complete. For example, +a basic MySQL context might be:: + + class MySQLRelation(RelationContext): + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + +Because there could potentially be multiple units on a given relation, and +to prevent conflicts when the data contexts are merged to be sent to templates +(see below), the data for a ``RelationContext`` is nested in the following way:: + + relation[relation.name][unit_number][relation_key] + +For example, to get the host of the first MySQL unit (``mysql/0``):: + + mysql = MySQLRelation() + unit_0_host = mysql[mysql.name][0]['host'] + +Note that only units that have set values for all of the required keys are +included in the list, and if no units have set all of the required keys, +instantiating the ``RelationContext`` will result in an empty list. + + +Data-Ready Actions +------------------ + +When a hook is triggered and all of the ``required_data`` contexts are complete, +the list of "data ready" actions are executed. These callbacks are passed +the service name from the ``service`` key of the service definition for which +they are running, and are responsible for (re)configuring the service +according to the required data. + +The most common action should be to render a config file from a template. +The :class:`render_template ` +helper will merge all of the ``required_data`` contexts and render a +`Jinja2 `_ template with the combined data. For +example, to render a list of DSNs for units on the db relation, the +template should include:: + + databases: [ + {% for unit in db %} + "mysql://{{unit['user']}}:{{unit['password']}}@{{unit['host']}}/{{unit['database']}}", + {% endfor %} + ] + +Note that the actions need to be idempotent, since they will all be re-run +if something about the charm changes (that is, if a hook is triggered). That +is why rendering a template is preferred to editing a file via regular expression +substitutions. + +Also note that the actions are not responsible for starting the service; there +are separate ``start`` and ``stop`` options that default to starting and stopping +an Upstart service with the name given by the ``service`` value. + + +Conclusion +---------- + +By using this framework, it is easy to see what the preconditions for the charm +are, and there is never a concern about things being in a partially configured +state. As a charm author, you can focus on what is important to you: what +data is mandatory, what is optional, and what actions should be taken once +the requirements are met. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/getting-started.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/getting-started.rst new file mode 100644 index 0000000000000000000000000000000000000000..785fc8693eb5f74ab1b72ffe1affb8804a182a5c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/getting-started.rst @@ -0,0 +1,152 @@ +Getting Started +=============== + +For a video introduction to ``charmhelpers``, check out this +`Charm School session `_. To start +using ``charmhelpers``, proceed with the instructions on the remainder of this +page. + +Installing Charm Tools +---------------------- + +First, follow `these instructions `_ +to install the ``charm-tools`` package for your platform. + +Creating a New Charm +-------------------- + +:: + + $ cd ~ + $ mkdirs -p charms/precise + $ cd charms/precise + $ charm create -t python mycharm + INFO: Generating template for mycharm in ./mycharm + INFO: No mycharm in apt cache; creating an empty charm instead. + Symlink all hooks to one python source file? [yN] y + INFO:root:Loading charm helper config from charm-helpers.yaml. + INFO:root:Checking out lp:charm-helpers to /tmp/tmpPAqUyN/charm-helpers. + Branched 160 revisions. + INFO:root:Syncing directory: /tmp/tmpPAqUyN/charm-helpers/charmhelpers/core -> lib/charmhelpers/core. + INFO:root:Adding missing __init__.py: lib/charmhelpers/__init__.py + +Let's see what our new charm looks like:: + + $ tree mycharm/ + mycharm/ + ├── charm-helpers.yaml + ├── config.yaml + ├── hooks + │   ├── config-changed -> hooks.py + │   ├── hooks.py + │   ├── install -> hooks.py + │   ├── start -> hooks.py + │   ├── stop -> hooks.py + │   └── upgrade-charm -> hooks.py + ├── icon.svg + ├── lib + │   └── charmhelpers + │   ├── core + │   │   ├── fstab.py + │   │   ├── hookenv.py + │   │   ├── host.py + │   │   └── __init__.py + │   └── __init__.py + ├── metadata.yaml + ├── README.ex + ├── revision + ├── scripts + │   └── charm_helpers_sync.py + └── tests + ├── 00-setup + └── 10-deploy + + 6 directories, 20 files + +The ``charmhelpers`` code is bundled in our charm in the ``lib/`` directory. +All of our python code will go in ``hooks/hook.py``. A look at that file reveals +that ``charmhelpers`` has been added to the python path and imported for us:: + + $ head mycharm/hooks/hooks.py -n11 + #!/usr/bin/python + + import os + import sys + + sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib')) + + from charmhelpers.core import ( + hookenv, + host, + ) + +Updating Charmhelpers Packages +------------------------------ + +By default, a new charm installs only the ``charmhelpers.core`` package, but +other packages are available (for a complete list, see the :doc:`api/charmhelpers`). +The installed packages are controlled by the ``charm-helpers.yaml`` file in our charm:: + + $ cd mycharm + $ cat charm-helpers.yaml + destination: lib/charmhelpers + branch: lp:charm-helpers + include: + - core + +Let's update this file to include some more packages:: + + $ vim charm-helpers.yaml + $ cat charm-helpers.yaml + destination: lib/charmhelpers + branch: lp:charm-helpers + include: + - core + - contrib.storage + - fetch + +Now we need to download the new packages into our charm:: + + $ ./scripts/charm_helpers_sync.py -c charm-helpers.yaml + INFO:root:Loading charm helper config from charm-helpers.yaml. + INFO:root:Checking out lp:charm-helpers to /tmp/tmpT38Y87/charm-helpers. + Branched 160 revisions. + INFO:root:Syncing directory: /tmp/tmpT38Y87/charm-helpers/charmhelpers/core -> lib/charmhelpers/core. + INFO:root:Syncing directory: /tmp/tmpT38Y87/charm-helpers/charmhelpers/contrib/storage -> lib/charmhelpers/contrib/storage. + INFO:root:Adding missing __init__.py: lib/charmhelpers/contrib/__init__.py + INFO:root:Syncing directory: /tmp/tmpT38Y87/charm-helpers/charmhelpers/fetch -> lib/charmhelpers/fetch. + +A look at our charmhelpers directory reveals that the new packages have indeed +been added. We are now free to import and use them in our charm:: + + $ tree lib/charmhelpers/ + lib/charmhelpers/ + ├── contrib + │   ├── __init__.py + │   └── storage + │   ├── __init__.py + │   └── linux + │   ├── ceph.py + │   ├── __init__.py + │   ├── loopback.py + │   ├── lvm.py + │   └── utils.py + ├── core + │   ├── fstab.py + │   ├── hookenv.py + │   ├── host.py + │   └── __init__.py + ├── fetch + │   ├── archiveurl.py + │   ├── bzrurl.py + │   └── __init__.py + └── __init__.py + + 5 directories, 15 files + +Next Steps +---------- + +Now that you have access to ``charmhelpers`` in your charm, check out the +:doc:`example-index` or :doc:`api/charmhelpers` to learn about all the great +functionality that ``charmhelpers`` provides. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/index.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..7a75555cdc4afc43e53a4da3afd9b5751cb9f3ad --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/docs/index.rst @@ -0,0 +1,41 @@ +.. Charm Helpers documentation master file, created by + sphinx-quickstart on Fri Jun 6 10:34:44 2014. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Charm Helpers Documentation +=========================== + +The ``charmhelpers`` Python library is an extensive collection of functions and classes +for simplifying the development of `Juju Charms`_. It includes utilities for: + +* Interacting with the host environment +* Managing hook events +* Reading and writing charm configuration +* Installing dependencies +* Much, much more! + +.. toctree:: + :maxdepth: 2 + + getting-started + example-index + api/charmhelpers + +.. toctree:: + :caption: Project + :glob: + :maxdepth: 3 + + contributing + changelog + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` + + +.. _Juju Charms: https://juju.ubuntu.com/docs/ diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/requirements.txt b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..97dbebaed2bd8c992d55d7446095a2a02772687b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/requirements.txt @@ -0,0 +1,16 @@ +# Runtime Dependencies + +# https://pyyaml.org/wiki/PyYAML#history +# PyYAML==5.2 is last supported for py34 +PyYAML + +# https://jinja.palletsprojects.com/en/2.11.x/changelog/ +# Jinja2==2.10 is last supported for py34 (trusty) +# Jinja2==2.11 is last supported for py27 & py35 (xenial) +Jinja2 + +six +netaddr +Tempita + +pbr!=2.1.0,>=2.0.0 # Apache-2.0 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/scripts/README b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/scripts/README new file mode 100644 index 0000000000000000000000000000000000000000..baaf91962d2a0ddc2fb6b5fb1824a2081df8839a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/scripts/README @@ -0,0 +1 @@ +This directory contains scripts for managing the charmhelpers project diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/scripts/update-revno b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/scripts/update-revno new file mode 100755 index 0000000000000000000000000000000000000000..48e6bba99da16156bba0ef75cf0008b0c35d8663 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/scripts/update-revno @@ -0,0 +1,11 @@ +#!/bin/bash +VERSION=$(cat VERSION) +REVNO=$(bzr revno) +bzr di &>/dev/null +if [ $? ]; then + REVNO="${REVNO}+" +fi +cat << EOF > charmhelpers/version.py +CHARMHELPERS_VERSION = '${VERSION}' +CHARMHELPERS_BZRREVNO = '${REVNO}' +EOF diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/setup.cfg b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..af820fb78782d5051c8b18e1ed7c2e77c04310eb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/setup.cfg @@ -0,0 +1,36 @@ +[metadata] +name = charmhelpers +summary = Helpers for Juju Charm development +description-file = + README.rst +author = Charmers +author-email = juju@lists.ubuntu.com +home-page = https://github.com/juju/charm-helpers +classifier = + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + +[files] +packages = + charmhelpers +scripts = + bin/chlp + bin/contrib/charmsupport/charmsupport + bin/contrib/saltstack/salt-call + +[nosetests] +with-coverage=1 +cover-erase=1 +cover-package=charmhelpers,tools + +[upload_sphinx] +upload-dir = docs/_build/html diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/setup.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..9d8594c18214533b15c511991656497131985c61 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/setup.py @@ -0,0 +1,27 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr>=2.0.0'], + pbr=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tarmac_tests.sh b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tarmac_tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..28f7e5a4432b97d9a4b4b7f06f1bb0e3538fb560 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tarmac_tests.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# How the tests are run in Jenkins by Tarmac + +set -e + +pkgs='python-flake8 python-shelltoolbox python-tempita python-nose python-mock python-testtools python-jinja2 python-coverage python-git python-netifaces python-netaddr python-pip zip' +if ! dpkg -s $pkgs 2>/dev/null >/dev/null ; then + echo "Required packages are missing. Please ensure that the missing packages are installed." + echo "Run: sudo apt-get install $pkgs" + exit 1 +fi + +make build diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/test-requirements.txt b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/test-requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..733801e1ba5ff896eecb7d80ee742b8e4cc50e7d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/test-requirements.txt @@ -0,0 +1,37 @@ +# Test-only dependencies are unpinned. +# +git+https://git.launchpad.net/ubuntu/+source/python-distutils-extra +pip +coverage>=3.6 +mock>=1.0.1,<1.1.0 +nose>=1.3.1 +flake8 +testtools==0.9.14 # Before dependent on modern 'six' +amulet +distro-info +sphinx_rtd_theme +ipaddress;python_version<'3.0' # Py27 unit test requirement + + +########################################################## +# Specify versions of runtime dependencies where possible. +# The requirements.txt file cannot be so specific + +# https://pyyaml.org/wiki/PyYAML#history +# PyYAML==5.2 is last supported for py34 +PyYAML==5.2;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +PyYAML; python_version == '2.7' or python_version >= '3.5' # all else + +# https://jinja.palletsprojects.com/en/2.11.x/changelog/ +# Jinja2==2.10 is last supported for py34 +# Jinja2==2.11 is last supported for py27 & py35 +Jinja2==2.10;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +Jinja2==2.11;python_version == '2.7' or python_version == '3.5' # py27, py35 +Jinja2; python_version >= '3.6' # py36 and on + +############################################################## + +netifaces==0.10 # trusty is 0.8, but using py3 compatible version for tests. +psutil==1.2.1 # trusty +python-keystoneclient==2.3.2 # xenial +dnspython==1.11.1 # trusty diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5707debfde11bad2916f95be62df9f24a0306efa --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/__init__.py @@ -0,0 +1,16 @@ +import sys +import mock + + +sys.modules['yum'] = mock.MagicMock() +sys.modules['sriov_netplan_shim'] = mock.MagicMock() +sys.modules['sriov_netplan_shim.pci'] = mock.MagicMock() +with mock.patch('charmhelpers.deprecate') as ch_deprecate: + def mock_deprecate(warning, date=None, log=None): + def mock_wrap(f): + def wrapped_f(*args, **kwargs): + return f(*args, **kwargs) + return wrapped_f + return mock_wrap + ch_deprecate.side_effect = mock_deprecate + import charmhelpers.contrib.openstack.utils as openstack # noqa: F401 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/test_cmdline.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/test_cmdline.py new file mode 100644 index 0000000000000000000000000000000000000000..549e70c025836268d2f52c4a2653669cd2d81848 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/test_cmdline.py @@ -0,0 +1,236 @@ +"""Tests for the commandant code that analyzes a function signature to +determine the parameters to argparse.""" + +from unittest import TestCase +from mock import ( + patch, + MagicMock, + ANY, +) +import json +from pprint import pformat +import yaml +import csv + +from six import StringIO + +from charmhelpers import cli + + +class SubCommandTest(TestCase): + """Test creation of subcommands""" + + def setUp(self): + super(SubCommandTest, self).setUp() + self.cl = cli.CommandLine() + + @patch('sys.exit') + def test_subcommand_wrapper(self, _sys_exit): + """Test function name detection""" + @self.cl.subcommand() + def payload(): + "A function that does work." + pass + args = self.cl.argument_parser.parse_args(['payload']) + self.assertEqual(args.func, payload) + self.assertEqual(_sys_exit.mock_calls, []) + + @patch('sys.exit') + def test_subcommand_wrapper_bogus_arguments(self, _sys_exit): + """Test function name detection""" + @self.cl.subcommand() + def payload(): + "A function that does work." + pass + with self.assertRaises(TypeError): + with patch("sys.argv", "tests deliberately bad input".split()): + with patch("sys.stderr"): + self.cl.argument_parser.parse_args() + _sys_exit.assert_called_once_with(2) + + @patch('sys.exit') + def test_subcommand_wrapper_cmdline_options(self, _sys_exit): + """Test detection of positional arguments and optional parameters.""" + @self.cl.subcommand() + def payload(x, y=None): + "A function that does work." + return x + args = self.cl.argument_parser.parse_args(['payload', 'positional', '--y=optional']) + self.assertEqual(args.func, payload) + self.assertEqual(args.x, 'positional') + self.assertEqual(args.y, 'optional') + self.assertEqual(_sys_exit.mock_calls, []) + + @patch('sys.exit') + def test_subcommand_builder(self, _sys_exit): + def noop(z): + pass + + @self.cl.subcommand_builder('payload', description="A subcommand") + def payload_command(subparser): + subparser.add_argument('-z', action='store_true') + return noop + + args = self.cl.argument_parser.parse_args(['payload', '-z']) + self.assertEqual(args.func, noop) + self.assertTrue(args.z) + self.assertFalse(_sys_exit.called) + + def test_subcommand_builder_bogus_wrapped_args(self): + with self.assertRaises(TypeError): + @self.cl.subcommand_builder('payload', description="A subcommand") + def payload_command(subparser, otherarg): + pass + + def test_run(self): + self.bar_called = False + + @self.cl.subcommand() + def bar(x, y=None, *vargs): + "A function that does work." + self.assertEqual(x, 'baz') + self.assertEqual(y, 'why') + self.assertEqual(vargs, ('mux', 'zob')) + self.bar_called = True + return "qux" + + args = ['chlp', 'bar', '--y', 'why', 'baz', 'mux', 'zob'] + self.cl.formatter = MagicMock() + with patch("sys.argv", args): + with patch("charmhelpers.core.unitdata._KV") as _KV: + self.cl.run() + assert _KV.flush.called + self.assertTrue(self.bar_called) + self.cl.formatter.format_output.assert_called_once_with('qux', ANY) + + def test_no_output(self): + self.bar_called = False + + @self.cl.subcommand() + @self.cl.no_output + def bar(x, y=None, *vargs): + "A function that does work." + self.bar_called = True + return "qux" + + args = ['foo', 'bar', 'baz'] + self.cl.formatter = MagicMock() + with patch("sys.argv", args): + self.cl.run() + self.assertTrue(self.bar_called) + self.cl.formatter.format_output.assert_called_once_with('', ANY) + + def test_test_command(self): + self.bar_called = False + self.bar_result = True + + @self.cl.subcommand() + @self.cl.test_command + def bar(x, y=None, *vargs): + "A function that does work." + self.bar_called = True + return self.bar_result + + args = ['foo', 'bar', 'baz'] + self.cl.formatter = MagicMock() + with patch("sys.argv", args): + self.cl.run() + self.assertTrue(self.bar_called) + self.assertEqual(self.cl.exit_code, 0) + self.cl.formatter.format_output.assert_called_once_with('', ANY) + + self.bar_result = False + with patch("sys.argv", args): + self.cl.run() + self.assertEqual(self.cl.exit_code, 1) + + +class OutputFormatterTest(TestCase): + def setUp(self): + super(OutputFormatterTest, self).setUp() + self.expected_formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = StringIO() + self.of = cli.OutputFormatter(outfile=self.outfile) + self.output_data = {"this": "is", "some": 1, "data": dict()} + + def test_supports_formats(self): + self.assertEqual(sorted(self.expected_formats), + sorted(self.of.supported_formats)) + + def test_adds_arguments(self): + ap = MagicMock() + arg_group = MagicMock() + add_arg = MagicMock() + arg_group.add_argument = add_arg + ap.add_mutually_exclusive_group.return_value = arg_group + self.of.add_arguments(ap) + + self.assertTrue(add_arg.called) + + for call_args in add_arg.call_args_list: + if "--format" in call_args[0]: + self.assertEqual(sorted(call_args[1]['choices']), + sorted(self.expected_formats)) + self.assertEqual(call_args[1]['default'], 'raw') + break + else: + print(arg_group.call_args_list) + self.fail("No --format argument was created") + + all_args = [c[0][0] for c in add_arg.call_args_list] + all_args.extend([c[0][1] for c in add_arg.call_args_list if len(c[0]) > 1]) + for fmt in self.expected_formats: + self.assertIn("-{}".format(fmt[0]), all_args) + self.assertIn("--{}".format(fmt), all_args) + + def test_outputs_raw(self): + self.of.raw(self.output_data) + self.outfile.seek(0) + self.assertEqual(self.outfile.read(), str(self.output_data)) + + def test_outputs_json(self): + self.of.json(self.output_data) + self.outfile.seek(0) + self.assertEqual(self.outfile.read(), json.dumps(self.output_data)) + + def test_outputs_py(self): + self.of.py(self.output_data) + self.outfile.seek(0) + self.assertEqual(self.outfile.read(), pformat(self.output_data) + "\n") + + def test_outputs_yaml(self): + self.of.yaml(self.output_data) + self.outfile.seek(0) + self.assertEqual(self.outfile.read(), yaml.dump(self.output_data)) + + def test_outputs_csv(self): + sample = StringIO() + writer = csv.writer(sample) + writer.writerows(self.output_data) + sample.seek(0) + self.of.csv(self.output_data) + self.outfile.seek(0) + self.assertEqual(self.outfile.read(), sample.read()) + + def test_outputs_tab(self): + sample = StringIO() + writer = csv.writer(sample, dialect=csv.excel_tab) + writer.writerows(self.output_data) + sample.seek(0) + self.of.tab(self.output_data) + self.outfile.seek(0) + self.assertEqual(self.outfile.read(), sample.read()) + + def test_formats_output(self): + for format in self.expected_formats: + mock_f = MagicMock() + setattr(self.of, format, mock_f) + self.of.format_output(self.output_data, format) + mock_f.assert_called_with(self.output_data) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/test_function_signature_analysis.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/test_function_signature_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..515d9ceca36b9bbc1b97901231b1ba60b7a13774 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/cli/test_function_signature_analysis.py @@ -0,0 +1,46 @@ +"""Tests for the commandant code that analyzes a function signature to +determine the parameters to argparse.""" + +from testtools import TestCase + +from charmhelpers import cli + + +class FunctionSignatureTest(TestCase): + """Test a variety of function signatures.""" + + def test_positional_arguments(self): + """Finite number of order-dependent required arguments.""" + argparams = tuple(cli.describe_arguments(lambda x, y, z: False)) + self.assertEqual(3, len(argparams)) + for argspec in ((('x',), {}), (('y',), {}), (('z',), {})): + self.assertIn(argspec, argparams) + + def test_keyword_arguments(self): + """Function has optional parameters with default values.""" + argparams = tuple(cli.describe_arguments(lambda x, y=3, z="bar": False)) + self.assertEqual(3, len(argparams)) + for argspec in ((('x',), {}), + (('--y',), {"default": 3}), + (('--z',), {"default": "bar"})): + self.assertIn(argspec, argparams) + + def test_varargs(self): + """Function has a splat-operator parameter to catch an arbitrary number + of positional parameters.""" + argparams = tuple(cli.describe_arguments( + lambda x, y=3, *z: False)) + self.assertEqual(3, len(argparams)) + for argspec in ((('x',), {}), + (('--y',), {"default": 3}), + (('z',), {"nargs": "*"})): + self.assertIn(argspec, argparams) + + def test_keyword_splat_missing(self): + """Double-splat arguments can't be represented in the current version + of commandant.""" + args = cli.describe_arguments(lambda x, y=3, *z, **missing: False) + for opts, _ in args: + # opts should be ('varname',) at this point + self.assertTrue(len(opts) == 1) + self.assertNotIn('missing', opts) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/context/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/context/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/context/test_context.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/context/test_context.py new file mode 100644 index 0000000000000000000000000000000000000000..35543015a2ed17024fb34c36ad128c2e11a4a5da --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/context/test_context.py @@ -0,0 +1,199 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from mock import patch, sentinel +import six + +from charmhelpers import context +from charmhelpers.core import hookenv + + +class TestRelations(unittest.TestCase): + def setUp(self): + def install(*args, **kw): + p = patch.object(*args, **kw) + p.start() + self.addCleanup(p.stop) + + install(hookenv, 'relation_types', return_value=['rel', 'pear']) + install(hookenv, 'peer_relation_id', return_value='pear:9') + install(hookenv, 'relation_ids', + side_effect=lambda x: ['{}:{}'.format(x, i) + for i in range(9, 11)]) + install(hookenv, 'related_units', + side_effect=lambda x: ['svc_' + x.replace(':', '/')]) + install(hookenv, 'local_unit', return_value='foo/1') + install(hookenv, 'relation_get') + install(hookenv, 'relation_set') + # install(hookenv, 'is_leader', return_value=False) + + def test_relations(self): + rels = context.Relations() + self.assertListEqual(list(rels.keys()), + ['pear', 'rel']) # Ordered alphabetically + self.assertListEqual(list(rels['rel'].keys()), + ['rel:9', 'rel:10']) # Ordered numerically + + # Relation data is loaded on demand, not on instantiation. + self.assertFalse(hookenv.relation_get.called) + + # But we did have to retrieve some lists of units etc. + self.assertGreaterEqual(hookenv.relation_ids.call_count, 2) + self.assertGreaterEqual(hookenv.related_units.call_count, 2) + + def test_relations_peer(self): + # The Relations instance has a short cut to the peer relation. + # If the charm has managed to get multiple peer relations, + # it returns the 'primary' one used here and returned by + # hookenv.peer_relation_id() + rels = context.Relations() + self.assertIs(rels.peer, rels['pear']['pear:9']) + + def test_relation(self): + rel = context.Relations()['rel']['rel:9'] + self.assertEqual(rel.relid, 'rel:9') + self.assertEqual(rel.relname, 'rel') + self.assertEqual(rel.service, 'svc_rel') + self.assertTrue(isinstance(rel.local, context.RelationInfo)) + self.assertEqual(rel.local.unit, hookenv.local_unit()) + self.assertTrue(isinstance(rel.peers, context.OrderedDict)) + self.assertTrue(len(rel.peers), 2) + self.assertTrue(isinstance(rel.peers['svc_pear/9'], + context.RelationInfo)) + + # I use this in my log messages. Relation id for identity + # plus service name for ease of reference. + self.assertEqual(str(rel), 'rel:9 (svc_rel)') + + def test_relation_no_peer_relation(self): + hookenv.peer_relation_id.return_value = None + rel = context.Relation('rel:10') + self.assertTrue(rel.peers is None) + + def test_relation_no_peers(self): + hookenv.related_units.side_effect = None + hookenv.related_units.return_value = [] + rel = context.Relation('rel:10') + self.assertDictEqual(rel.peers, {}) + + def test_peer_relation(self): + peer_rel = context.Relations().peer + # The peer relation does not have a 'peers' properly. We + # could give it one for symmetry, but it seems somewhat silly. + self.assertTrue(peer_rel.peers is None) + + def test_relationinfo(self): + hookenv.relation_get.return_value = {sentinel.key: 'value'} + r = context.RelationInfo('rel:10', 'svc_rel/9') + + self.assertEqual(r.relname, 'rel') + self.assertEqual(r.relid, 'rel:10') + self.assertEqual(r.unit, 'svc_rel/9') + self.assertEqual(r.service, 'svc_rel') + self.assertEqual(r.number, 9) + + self.assertFalse(hookenv.relation_get.called) + self.assertEqual(r[sentinel.key], 'value') + hookenv.relation_get.assert_called_with(unit='svc_rel/9', rid='rel:10') + + # Updates fail + with self.assertRaises(TypeError): + r['newkey'] = 'foo' + + # Deletes fail + with self.assertRaises(TypeError): + del r[sentinel.key] + + # I use this for logging. + self.assertEqual(str(r), 'rel:10 (svc_rel/9)') + + def test_relationinfo_local(self): + r = context.RelationInfo('rel:10', hookenv.local_unit()) + + # Updates work, with standard strings. + r[sentinel.key] = 'value' + hookenv.relation_set.assert_called_once_with( + 'rel:10', {sentinel.key: 'value'}) + + # Python 2 unicode strings work too. + hookenv.relation_set.reset_mock() + r[sentinel.key] = six.u('value') + hookenv.relation_set.assert_called_once_with( + 'rel:10', {sentinel.key: six.u('value')}) + + # Byte strings fail under Python 3. + if six.PY3: + with self.assertRaises(ValueError): + r[sentinel.key] = six.b('value') + + # Deletes work + del r[sentinel.key] + hookenv.relation_set.assert_called_with('rel:10', {sentinel.key: None}) + + # Attempting to write a non-string fails + with self.assertRaises(ValueError): + r[sentinel.key] = 42 + + +class TestLeader(unittest.TestCase): + @patch.object(hookenv, 'leader_get') + def test_get(self, leader_get): + leader_get.return_value = {'a_key': 'a_value'} + + leader = context.Leader() + self.assertEqual(leader['a_key'], 'a_value') + leader_get.assert_called_with() + + with self.assertRaises(KeyError): + leader['missing'] + + @patch.object(hookenv, 'leader_set') + @patch.object(hookenv, 'leader_get') + @patch.object(hookenv, 'is_leader') + def test_set(self, is_leader, leader_get, leader_set): + is_leader.return_value = True + leader = context.Leader() + + # Updates work + leader[sentinel.key] = 'foo' + leader_set.assert_called_with({sentinel.key: 'foo'}) + del leader[sentinel.key] + leader_set.assert_called_with({sentinel.key: None}) + + # Python 2 unicode string values work too + leader[sentinel.key] = six.u('bar') + leader_set.assert_called_with({sentinel.key: 'bar'}) + + # Byte strings fail under Python 3 + if six.PY3: + with self.assertRaises(ValueError): + leader[sentinel.key] = six.b('baz') + + # Non strings fail, as implicit casting causes more trouble + # than it solves. Simple types like integers would round trip + # back as strings. + with self.assertRaises(ValueError): + leader[sentinel.key] = 42 + + @patch.object(hookenv, 'leader_set') + @patch.object(hookenv, 'leader_get') + @patch.object(hookenv, 'is_leader') + def test_set_not_leader(self, is_leader, leader_get, leader_set): + is_leader.return_value = False + leader_get.return_value = {'a_key': 'a_value'} + leader = context.Leader() + with self.assertRaises(TypeError): + leader['a_key'] = 'foo' + with self.assertRaises(TypeError): + del leader['a_key'] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/amulet/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/amulet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/amulet/test_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/amulet/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..78d3ecd110bea991a18312aed9c6ad66db9733aa --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/amulet/test_utils.py @@ -0,0 +1,370 @@ +# Copyright 2015 Canonical Ltd. +# +# Authors: +# Adam Collard + +from contextlib import contextmanager +from mock import patch +import sys +import unittest + +import six + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils, + amulet, +) + + +@contextmanager +def captured_output(): + """Simple context manager to capture stdout/stderr. + + Source: http://stackoverflow.com/a/17981937/56219. + """ + new_out, new_err = six.StringIO(), six.StringIO() + old_out, old_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = new_out, new_err + yield sys.stdout, sys.stderr + finally: + sys.stdout, sys.stderr = old_out, old_err + + +class FakeSentry(object): + + def __init__(self, name="foo"): + self.commands = {} + self.info = {"unit_name": name} + + def run(self, command): + return self.commands[command] + + def ssh(self, command): + return self.commands[command] + + def run_action(self, action, action_args=None): + return 'action-id' + + +class ValidateServicesByNameTestCase(unittest.TestCase): + + def setUp(self): + self.utils = AmuletUtils() + self.sentry_unit = FakeSentry() + + def test_errors_for_unknown_upstart_service(self): + """ + Returns a message if the Upstart service is unknown. + """ + self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0 + self.sentry_unit.commands["sudo status foo"] = ( + "status: Unknown job: foo", 1) + + result = self.utils.validate_services_by_name( + {self.sentry_unit: ["foo"]}) + self.assertIsNotNone(result) + + def test_none_for_started_upstart_service(self): + """ + Returns None if the Upstart service is running. + """ + self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0 + self.sentry_unit.commands["sudo status foo"] = ( + "foo start/running, process 42", 0) + + result = self.utils.validate_services_by_name( + {self.sentry_unit: ["foo"]}) + self.assertIsNone(result) + + def test_errors_for_stopped_upstart_service(self): + """ + Returns a message if the Upstart service is stopped. + """ + self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0 + self.sentry_unit.commands["sudo status foo"] = "foo stop/waiting", 0 + + result = self.utils.validate_services_by_name( + {self.sentry_unit: ["foo"]}) + self.assertIsNotNone(result) + + def test_errors_for_unknown_systemd_service(self): + """ + Returns a message if a systemd service is unknown. + """ + self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0 + self.sentry_unit.commands["sudo service foo status"] = (u"""\ +\u25cf foo.service + Loaded: not-found (Reason: No such file or directory) + Active: inactive (dead) +""", 3) + + result = self.utils.validate_services_by_name({ + self.sentry_unit: ["foo"]}) + self.assertIsNotNone(result) + + def test_none_for_started_systemd_service(self): + """ + Returns None if a systemd service is running. + """ + self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0 + self.sentry_unit.commands["sudo service foo status"] = (u"""\ +\u25cf foo.service - Foo + Loaded: loaded (/lib/systemd/system/foo.service; enabled) + Active: active (exited) since Thu 1970-01-01 00:00:00 UTC; 42h 42min ago + Main PID: 3 (code=exited, status=0/SUCCESS) + CGroup: /system.slice/foo.service +""", 0) + result = self.utils.validate_services_by_name( + {self.sentry_unit: ["foo"]}) + self.assertIsNone(result) + + def test_errors_for_stopped_systemd_service(self): + """ + Returns a message if a systemd service is stopped. + """ + self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0 + self.sentry_unit.commands["sudo service foo status"] = (u"""\ +\u25cf foo.service - Foo + Loaded: loaded (/lib/systemd/system/foo.service; disabled) + Active: inactive (dead) +""", 3) + result = self.utils.validate_services_by_name( + {self.sentry_unit: ["foo"]}) + self.assertIsNotNone(result) + + +class RunActionTestCase(unittest.TestCase): + + def setUp(self): + self.utils = AmuletUtils() + self.sentry_unit = FakeSentry() + + def test_returns_action_id(self): + """Returns action_id.""" + + self.assertEqual("action-id", self.utils.run_action( + self.sentry_unit, "foo")) + + +class WaitActionTestCase(unittest.TestCase): + + def setUp(self): + self.utils = AmuletUtils() + + @patch.object(amulet.actions, "get_action_output") + def test_returns_true_if_completed(self, get_action_output): + """JSON output is parsed and returns True if the action completed.""" + + get_action_output.return_value = {"status": "completed"} + + self.assertTrue(self.utils.wait_on_action("action-id")) + get_action_output.assert_called_with("action-id", full_output=True) + + @patch.object(amulet.actions, "get_action_output") + def test_returns_false_if_still_running(self, get_action_output): + """ + JSON output is parsed and returns False if the action is still running. + """ + get_action_output.return_value = {"status": "running"} + + self.assertFalse(self.utils.wait_on_action("action-id")) + get_action_output.assert_called_with("action-id", full_output=True) + + @patch.object(amulet.actions, "get_action_output") + def test_returns_false_if_no_status(self, get_action_output): + """ + JSON output is parsed and returns False if there is no action status. + """ + get_action_output.return_value = {} + + self.assertFalse(self.utils.wait_on_action("action-id")) + get_action_output.assert_called_with("action-id", full_output=True) + + +class GetProcessIdListTestCase(unittest.TestCase): + + def setUp(self): + self.utils = AmuletUtils() + self.sentry_unit = FakeSentry() + + def test_returns_pids(self): + """ + Normal execution returns a list of pids + """ + self.sentry_unit.commands['pidof -x "foo"'] = ("123 124 125", 0) + result = self.utils.get_process_id_list(self.sentry_unit, "foo") + self.assertEqual(["123", "124", "125"], result) + + def test_fails_if_no_process_found(self): + """ + By default, the expectation is that a process is running. Failure + to find a given process results in an amulet.FAIL being + raised. + """ + self.sentry_unit.commands['pidof -x "foo"'] = ("", 1) + with self.assertRaises(SystemExit) as cm, captured_output() as ( + out, err): + self.utils.get_process_id_list(self.sentry_unit, "foo") + the_exception = cm.exception + self.assertEqual(1, the_exception.code) + self.assertEqual( + 'foo `pidof -x "foo"` returned 1', out.getvalue().rstrip()) + + def test_looks_for_scripts(self): + """ + pidof command uses -x to return a list of pids of scripts + """ + self.sentry_unit.commands["pidof foo"] = ("", 1) + self.sentry_unit.commands['pidof -x "foo"'] = ("123 124 125", 0) + result = self.utils.get_process_id_list(self.sentry_unit, "foo") + self.assertEqual(["123", "124", "125"], result) + + def test_expect_no_pid(self): + """ + By setting expectation that there are no pids running the logic + about when to fail is reversed. + """ + self.sentry_unit.commands[ + 'pidof -x "foo" || exit 0 && exit 1'] = ("", 0) + self.sentry_unit.commands[ + 'pidof -x "bar" || exit 0 && exit 1'] = ("", 1) + result = self.utils.get_process_id_list( + self.sentry_unit, "foo", expect_success=False) + self.assertEqual([], result) + with self.assertRaises(SystemExit) as cm, captured_output() as ( + out, err): + self.utils.get_process_id_list( + self.sentry_unit, "bar", expect_success=False) + the_exception = cm.exception + self.assertEqual(1, the_exception.code) + self.assertEqual( + 'foo `pidof -x "bar" || exit 0 && exit 1` returned 1', + out.getvalue().rstrip()) + + +class GetUnitProcessIdsTestCase(unittest.TestCase): + + def setUp(self): + self.utils = AmuletUtils() + self.sentry_unit = FakeSentry() + + def test_returns_map(self): + """ + Normal execution returns a dictionary mapping process names to + PIDs for each unit. + """ + second_sentry = FakeSentry(name="bar") + self.sentry_unit.commands['pidof -x "foo"'] = ("123 124", 0) + second_sentry.commands['pidof -x "bar"'] = ("456 457", 0) + + result = self.utils.get_unit_process_ids({ + self.sentry_unit: ["foo"], second_sentry: ["bar"]}) + self.assertEqual({ + self.sentry_unit: {"foo": ["123", "124"]}, + second_sentry: {"bar": ["456", "457"]}}, result) + + def test_expect_failure(self): + """ + Expected failures return empty lists. + """ + second_sentry = FakeSentry(name="bar") + self.sentry_unit.commands[ + 'pidof -x "foo" || exit 0 && exit 1'] = ("", 0) + second_sentry.commands['pidof -x "bar" || exit 0 && exit 1'] = ("", 0) + + result = self.utils.get_unit_process_ids( + {self.sentry_unit: ["foo"], second_sentry: ["bar"]}, + expect_success=False) + self.assertEqual({ + self.sentry_unit: {"foo": []}, + second_sentry: {"bar": []}}, result) + + +class StatusGetTestCase(unittest.TestCase): + + def setUp(self): + self.utils = AmuletUtils() + self.sentry_unit = FakeSentry() + + def test_status_get(self): + """ + We can get the status of a unit. + """ + self.sentry_unit.commands[ + "status-get --format=json --include-data"] = ( + """{"status": "active", "message": "foo"}""", 0) + self.assertEqual(self.utils.status_get(self.sentry_unit), + (u"active", u"foo")) + + def test_status_get_missing_command(self): + """ + Older releases of Juju have no status-get command. In those + cases we should return the "unknown" status. + """ + self.sentry_unit.commands[ + "status-get --format=json --include-data"] = ( + "status-get: command not found", 127) + self.assertEqual(self.utils.status_get(self.sentry_unit), + (u"unknown", u"")) + + +class ValidateServicesByProcessIDTestCase(unittest.TestCase): + + def setUp(self): + self.utils = AmuletUtils() + self.sentry_unit = FakeSentry() + + def test_accepts_list_wrong(self): + """ + Validates that it can accept a list + """ + expected = {self.sentry_unit: {"foo": [3, 4]}} + actual = {self.sentry_unit: {"foo": [12345, 67890]}} + result = self.utils.validate_unit_process_ids(expected, actual) + self.assertIsNotNone(result) + + def test_accepts_list(self): + """ + Validates that it can accept a list + """ + expected = {self.sentry_unit: {"foo": [2, 3]}} + actual = {self.sentry_unit: {"foo": [12345, 67890]}} + result = self.utils.validate_unit_process_ids(expected, actual) + self.assertIsNone(result) + + def test_accepts_string(self): + """ + Validates that it can accept a string + """ + expected = {self.sentry_unit: {"foo": 2}} + actual = {self.sentry_unit: {"foo": [12345, 67890]}} + result = self.utils.validate_unit_process_ids(expected, actual) + self.assertIsNone(result) + + def test_accepts_string_wrong(self): + """ + Validates that it can accept a string + """ + expected = {self.sentry_unit: {"foo": 3}} + actual = {self.sentry_unit: {"foo": [12345, 67890]}} + result = self.utils.validate_unit_process_ids(expected, actual) + self.assertIsNotNone(result) + + def test_accepts_bool(self): + """ + Validates that it can accept a boolean + """ + expected = {self.sentry_unit: {"foo": True}} + actual = {self.sentry_unit: {"foo": [12345, 67890]}} + result = self.utils.validate_unit_process_ids(expected, actual) + self.assertIsNone(result) + + def test_accepts_bool_wrong(self): + """ + Validates that it can accept a boolean + """ + expected = {self.sentry_unit: {"foo": True}} + actual = {self.sentry_unit: {"foo": []}} + result = self.utils.validate_unit_process_ids(expected, actual) + self.assertIsNotNone(result) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ansible/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ansible/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ansible/test_ansible.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ansible/test_ansible.py new file mode 100644 index 0000000000000000000000000000000000000000..5261da25c767d07d557ecd587dd4402c63ed0b65 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ansible/test_ansible.py @@ -0,0 +1,339 @@ +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +import mock +import os +import shutil +import stat +import tempfile +import unittest +import yaml + + +import charmhelpers.contrib.ansible +from charmhelpers.core import hookenv + + +class InstallAnsibleSupportTestCase(unittest.TestCase): + + def setUp(self): + super(InstallAnsibleSupportTestCase, self).setUp() + + patcher = mock.patch('charmhelpers.fetch') + self.mock_fetch = patcher.start() + self.addCleanup(patcher.stop) + + patcher = mock.patch('charmhelpers.core') + self.mock_core = patcher.start() + self.addCleanup(patcher.stop) + + hosts_file = tempfile.NamedTemporaryFile() + self.ansible_hosts_path = hosts_file.name + self.addCleanup(hosts_file.close) + patcher = mock.patch.object(charmhelpers.contrib.ansible, + 'ansible_hosts_path', + self.ansible_hosts_path) + patcher.start() + self.addCleanup(patcher.stop) + + def test_adds_ppa_by_default(self): + charmhelpers.contrib.ansible.install_ansible_support() + + self.mock_fetch.add_source.assert_called_once_with( + 'ppa:ansible/ansible') + self.mock_fetch.apt_update.assert_called_once_with(fatal=True) + self.mock_fetch.apt_install.assert_called_once_with( + 'ansible') + + def test_no_ppa(self): + charmhelpers.contrib.ansible.install_ansible_support( + from_ppa=False) + + self.assertEqual(self.mock_fetch.add_source.call_count, 0) + self.mock_fetch.apt_install.assert_called_once_with( + 'ansible') + + def test_writes_ansible_hosts(self): + with open(self.ansible_hosts_path) as hosts_file: + self.assertEqual(hosts_file.read(), '') + + charmhelpers.contrib.ansible.install_ansible_support() + + with open(self.ansible_hosts_path) as hosts_file: + self.assertEqual(hosts_file.read(), + 'localhost ansible_connection=local ' + 'ansible_remote_tmp=/root/.ansible/tmp') + + +class ApplyPlaybookTestCases(unittest.TestCase): + + unit_data = { + 'private-address': '10.0.3.2', + 'public-address': '123.123.123.123', + } + + def setUp(self): + super(ApplyPlaybookTestCases, self).setUp() + + # Hookenv patches (a single patch to hookenv doesn't work): + patcher = mock.patch('charmhelpers.core.hookenv.config') + self.mock_config = patcher.start() + self.addCleanup(patcher.stop) + Serializable = charmhelpers.core.hookenv.Serializable + self.mock_config.return_value = Serializable({}) + patcher = mock.patch('charmhelpers.core.hookenv.relation_get') + self.mock_relation_get = patcher.start() + self.mock_relation_get.return_value = {} + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.relations') + self.mock_relations = patcher.start() + self.mock_relations.return_value = { + 'wsgi-file': {}, + 'website': {}, + 'nrpe-external-master': {}, + } + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.relations_of_type') + self.mock_relations_of_type = patcher.start() + self.mock_relations_of_type.return_value = [] + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.relation_type') + self.mock_relation_type = patcher.start() + self.mock_relation_type.return_value = None + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.local_unit') + self.mock_local_unit = patcher.start() + self.addCleanup(patcher.stop) + self.mock_local_unit.return_value = {} + + def unit_get_data(argument): + "dummy unit_get that accesses dummy unit data" + return self.unit_data[argument] + + patcher = mock.patch( + 'charmhelpers.core.hookenv.unit_get', unit_get_data) + self.mock_unit_get = patcher.start() + self.addCleanup(patcher.stop) + + patcher = mock.patch('charmhelpers.contrib.ansible.subprocess') + self.mock_subprocess = patcher.start() + self.addCleanup(patcher.stop) + + etc_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, etc_dir) + self.vars_path = os.path.join(etc_dir, 'ansible', 'vars.yaml') + patcher = mock.patch.object(charmhelpers.contrib.ansible, + 'ansible_vars_path', self.vars_path) + patcher.start() + self.addCleanup(patcher.stop) + + patcher = mock.patch.object(charmhelpers.contrib.ansible.os, + 'environ', {}) + patcher.start() + self.addCleanup(patcher.stop) + + def test_calls_ansible_playbook(self): + charmhelpers.contrib.ansible.apply_playbook( + 'playbooks/dependencies.yaml') + + self.mock_subprocess.check_call.assert_called_once_with([ + 'ansible-playbook', '-c', 'local', 'playbooks/dependencies.yaml'], + env={'PYTHONUNBUFFERED': '1'}) + + def test_writes_vars_file(self): + self.assertFalse(os.path.exists(self.vars_path)) + self.mock_config.return_value = charmhelpers.core.hookenv.Serializable({ + 'group_code_owner': 'webops_deploy', + 'user_code_runner': 'ubunet', + 'private-address': '10.10.10.10', + }) + self.mock_relation_type.return_value = 'wsgi-file' + self.mock_relation_get.return_value = { + 'relation_key1': 'relation_value1', + 'relation-key2': 'relation_value2', + } + + charmhelpers.contrib.ansible.apply_playbook( + 'playbooks/dependencies.yaml') + + self.assertTrue(os.path.exists(self.vars_path)) + stats = os.stat(self.vars_path) + self.assertEqual( + stats.st_mode & stat.S_IRWXU, + stat.S_IRUSR | stat.S_IWUSR) + self.assertEqual(stats.st_mode & stat.S_IRWXG, 0) + self.assertEqual(stats.st_mode & stat.S_IRWXO, 0) + with open(self.vars_path, 'r') as vars_file: + result = yaml.safe_load(vars_file.read()) + self.assertEqual({ + "group_code_owner": "webops_deploy", + "user_code_runner": "ubunet", + "private_address": "10.10.10.10", + "charm_dir": "", + "local_unit": {}, + 'current_relation': { + 'relation_key1': 'relation_value1', + 'relation-key2': 'relation_value2', + }, + 'relations_full': { + 'nrpe-external-master': {}, + 'website': {}, + 'wsgi-file': {}, + }, + 'relations': { + 'nrpe-external-master': [], + 'website': [], + 'wsgi-file': [], + }, + "wsgi_file__relation_key1": "relation_value1", + "wsgi_file__relation_key2": "relation_value2", + "unit_private_address": "10.0.3.2", + "unit_public_address": "123.123.123.123", + }, result) + + def test_calls_with_tags(self): + charmhelpers.contrib.ansible.apply_playbook( + 'playbooks/complete-state.yaml', tags=['install', 'somethingelse']) + + self.mock_subprocess.check_call.assert_called_once_with([ + 'ansible-playbook', '-c', 'local', 'playbooks/complete-state.yaml', + '--tags', 'install,somethingelse'], env={'PYTHONUNBUFFERED': '1'}) + + @mock.patch.object(hookenv, 'config') + def test_calls_with_extra_vars(self, config): + charmhelpers.contrib.ansible.apply_playbook( + 'playbooks/complete-state.yaml', tags=['install', 'somethingelse'], + extra_vars={'a': 'b'}) + + self.mock_subprocess.check_call.assert_called_once_with([ + 'ansible-playbook', '-c', 'local', 'playbooks/complete-state.yaml', + '--tags', 'install,somethingelse', '--extra-vars', '{"a": "b"}'], + env={'PYTHONUNBUFFERED': '1'}) + + @mock.patch.object(hookenv, 'config') + def test_calls_with_extra_vars_path(self, config): + charmhelpers.contrib.ansible.apply_playbook( + 'playbooks/complete-state.yaml', tags=['install', 'somethingelse'], + extra_vars='@myvars.json') + + self.mock_subprocess.check_call.assert_called_once_with([ + 'ansible-playbook', '-c', 'local', 'playbooks/complete-state.yaml', + '--tags', 'install,somethingelse', '--extra-vars', '"@myvars.json"'], + env={'PYTHONUNBUFFERED': '1'}) + + @mock.patch.object(hookenv, 'config') + def test_calls_with_extra_vars_dict(self, config): + charmhelpers.contrib.ansible.apply_playbook( + 'playbooks/complete-state.yaml', tags=['install', 'somethingelse'], + extra_vars={'pkg': {'a': 'present', 'b': 'absent'}}) + + self.mock_subprocess.check_call.assert_called_once_with([ + 'ansible-playbook', '-c', 'local', 'playbooks/complete-state.yaml', + '--tags', 'install,somethingelse', '--extra-vars', + '{"pkg": {"a": "present", "b": "absent"}}'], + env={'PYTHONUNBUFFERED': '1'}) + + @mock.patch.object(hookenv, 'config') + def test_hooks_executes_playbook_with_tag(self, config): + hooks = charmhelpers.contrib.ansible.AnsibleHooks('my/playbook.yaml') + foo = mock.MagicMock() + hooks.register('foo', foo) + + hooks.execute(['foo']) + + self.assertEqual(foo.call_count, 1) + self.mock_subprocess.check_call.assert_called_once_with([ + 'ansible-playbook', '-c', 'local', 'my/playbook.yaml', + '--tags', 'foo'], env={'PYTHONUNBUFFERED': '1'}) + + @mock.patch.object(hookenv, 'config') + def test_specifying_ansible_handled_hooks(self, config): + hooks = charmhelpers.contrib.ansible.AnsibleHooks( + 'my/playbook.yaml', default_hooks=['start', 'stop']) + + hooks.execute(['start']) + + self.mock_subprocess.check_call.assert_called_once_with([ + 'ansible-playbook', '-c', 'local', 'my/playbook.yaml', + '--tags', 'start'], env={'PYTHONUNBUFFERED': '1'}) + + +class TestActionDecorator(unittest.TestCase): + + def setUp(self): + p = mock.patch('charmhelpers.contrib.ansible.apply_playbook') + self.apply_playbook = p.start() + self.addCleanup(p.stop) + + def test_action_no_args(self): + hooks = charmhelpers.contrib.ansible.AnsibleHooks('playbook.yaml') + + @hooks.action() + def test(): + return {} + + hooks.execute(['test']) + self.apply_playbook.assert_called_once_with( + 'playbook.yaml', tags=['test'], extra_vars={}) + + def test_action_required_arg_keyword(self): + hooks = charmhelpers.contrib.ansible.AnsibleHooks('playbook.yaml') + + @hooks.action() + def test(x): + return locals() + + hooks.execute(['test', 'x=a']) + self.apply_playbook.assert_called_once_with( + 'playbook.yaml', tags=['test'], extra_vars={'x': 'a'}) + + def test_action_required_arg_missing(self): + hooks = charmhelpers.contrib.ansible.AnsibleHooks('playbook.yaml') + + @hooks.action() + def test(x): + """Requires x""" + return locals() + + try: + hooks.execute(['test']) + self.fail("should have thrown TypeError") + except TypeError as e: + self.assertEqual(e.args[1], "Requires x") + + def test_action_required_unknown_arg(self): + hooks = charmhelpers.contrib.ansible.AnsibleHooks('playbook.yaml') + + @hooks.action() + def test(x='a'): + """Requires x""" + return locals() + + try: + hooks.execute(['test', 'z=c']) + self.fail("should have thrown TypeError") + except TypeError as e: + self.assertEqual(e.args[1], "Requires x") + + def test_action_default_arg(self): + hooks = charmhelpers.contrib.ansible.AnsibleHooks('playbook.yaml') + + @hooks.action() + def test(x='b'): + return locals() + + hooks.execute(['test']) + self.apply_playbook.assert_called_once_with( + 'playbook.yaml', tags=['test'], extra_vars={'x': 'b'}) + + def test_action_mutliple(self): + hooks = charmhelpers.contrib.ansible.AnsibleHooks('playbook.yaml') + + @hooks.action() + def test(x, y='b'): + return locals() + + hooks.execute(['test', 'x=a', 'y=b']) + self.apply_playbook.assert_called_once_with( + 'playbook.yaml', tags=['test'], extra_vars={'x': 'a', 'y': 'b'}) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/benchmark/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/benchmark/test_benchmark.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/benchmark/test_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..52ec086271f02e7538e69a9b272a35afe8e243c8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/benchmark/test_benchmark.py @@ -0,0 +1,124 @@ +from functools import partial +from os.path import join +from tempfile import mkdtemp +from shutil import rmtree + +import mock +from testtools import TestCase +# import unittest +from charmhelpers.contrib.benchmark import Benchmark, action_set # noqa +from tests.helpers import patch_open, FakeRelation + +TO_PATCH = [ + 'in_relation_hook', + 'relation_ids', + 'relation_set', + 'relation_get', +] + +FAKE_RELATION = { + 'benchmark:0': { + 'benchmark/0': { + 'hostname': '127.0.0.1', + 'port': '1111', + 'graphite_port': '2222', + 'graphite_endpoint': 'http://localhost:3333', + 'api_port': '4444' + } + } +} + + +class TestBenchmark(TestCase): + + def setUp(self): + super(TestBenchmark, self).setUp() + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + self.fake_relation = FakeRelation(FAKE_RELATION) + # self.hook_name.return_value = 'benchmark-relation-changed' + + self.relation_get.side_effect = partial( + self.fake_relation.get, rid="benchmark:0", unit="benchmark/0") + self.relation_ids.side_effect = self.fake_relation.relation_ids + + def _patch(self, method): + _m = mock.patch('charmhelpers.contrib.benchmark.' + method) + m = _m.start() + self.addCleanup(_m.stop) + return m + + @mock.patch('os.path.exists') + @mock.patch('subprocess.check_output') + def test_benchmark_start(self, check_output, exists): + + exists.return_value = True + check_output.return_value = "data" + + with patch_open() as (_open, _file): + self.assertIsNone(Benchmark.start()) + # _open.assert_called_with('/etc/benchmark.conf', 'w') + + COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' + exists.assert_any_call(COLLECT_PROFILE_DATA) + check_output.assert_any_call([COLLECT_PROFILE_DATA]) + + def test_benchmark_finish(self): + with patch_open() as (_open, _file): + self.assertIsNone(Benchmark.finish()) + # _open.assert_called_with('/etc/benchmark.conf', 'w') + + @mock.patch('charmhelpers.contrib.benchmark.action_set') + def test_benchmark_set_composite_score(self, action_set): + self.assertTrue(Benchmark.set_composite_score(15.7, 'hits/sec', 'desc')) + action_set.assert_called_once_with('meta.composite', {'value': 15.7, 'units': 'hits/sec', 'direction': 'desc'}) + + @mock.patch('charmhelpers.contrib.benchmark.find_executable') + @mock.patch('charmhelpers.contrib.benchmark.subprocess.check_call') + def test_benchmark_action_set(self, check_call, find_executable): + find_executable.return_value = "/usr/bin/action-set" + self.assertTrue(action_set('foo', 'bar')) + + find_executable.assert_called_once_with('action-set') + check_call.assert_called_once_with(['action-set', 'foo=bar']) + + @mock.patch('charmhelpers.contrib.benchmark.find_executable') + @mock.patch('charmhelpers.contrib.benchmark.subprocess.check_call') + def test_benchmark_action_set_dict(self, check_call, find_executable): + find_executable.return_value = "/usr/bin/action-set" + self.assertTrue(action_set('baz', {'foo': 1, 'bar': 2})) + + find_executable.assert_called_with('action-set') + + check_call.assert_any_call(['action-set', 'baz.foo=1']) + check_call.assert_any_call(['action-set', 'baz.bar=2']) + + @mock.patch('charmhelpers.contrib.benchmark.relation_ids') + @mock.patch('charmhelpers.contrib.benchmark.in_relation_hook') + def test_benchmark_init(self, in_relation_hook, relation_ids): + + in_relation_hook.return_value = True + relation_ids.return_value = ['benchmark:0'] + actions = ['asdf', 'foobar'] + + tempdir = mkdtemp(prefix=self.__class__.__name__) + self.addCleanup(rmtree, tempdir) + conf_path = join(tempdir, "benchmark.conf") + with mock.patch.object(Benchmark, "BENCHMARK_CONF", conf_path): + b = Benchmark(actions) + + self.assertIsInstance(b, Benchmark) + + self.assertTrue(self.relation_get.called) + self.assertTrue(self.relation_set.called) + + relation_ids.assert_called_once_with('benchmark') + + self.relation_set.assert_called_once_with( + relation_id='benchmark:0', + relation_settings={'benchmarks': ",".join(actions)} + ) + + conf_contents = open(conf_path).readlines() + for key, val in iter(FAKE_RELATION['benchmark:0']['benchmark/0'].items()): + self.assertIn("%s=%s\n" % (key, val), conf_contents) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmhelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmhelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmhelpers/test_charmhelpers.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmhelpers/test_charmhelpers.py new file mode 100644 index 0000000000000000000000000000000000000000..2837d2b9a49730d509def4943874928b88bec74b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmhelpers/test_charmhelpers.py @@ -0,0 +1,274 @@ +# Tests for Python charm helpers. + +import unittest +import yaml +from testtools import TestCase + +from six import StringIO + +import sys +# Path hack to ensure we test the local code, not a version installed in +# /usr/local/lib. This is necessary since /usr/local/lib is prepended before +# what is specified in PYTHONPATH. +sys.path.insert(0, 'helpers/python') +from charmhelpers.contrib import charmhelpers # noqa + + +class CharmHelpersTestCase(TestCase): + """A basic test case for Python charm helpers.""" + + def _patch_command(self, replacement_command): + """Monkeypatch charmhelpers.command for testing purposes. + + :param replacement_command: The replacement Callable for + command(). + """ + self.patch(charmhelpers, 'command', lambda *args: replacement_command) + + def _make_juju_status_dict(self, num_units=1, + service_name='test-service', + unit_state='pending', + machine_state='not-started'): + """Generate valid juju status dict and return it.""" + machine_data = {} + # The 0th machine is the Zookeeper. + machine_data[0] = {'dns-name': 'zookeeper.example.com', + 'instance-id': 'machine0', + 'state': 'not-started'} + service_data = {'charm': 'local:precise/{}-1'.format(service_name), + 'relations': {}, + 'units': {}} + for i in range(num_units): + # The machine is always going to be i+1 because there + # will always be num_units+1 machines. + machine_number = i + 1 + unit_machine_data = { + 'dns-name': 'machine{}.example.com'.format(machine_number), + 'instance-id': 'machine{}'.format(machine_number), + 'state': machine_state, + 'instance-state': machine_state} + machine_data[machine_number] = unit_machine_data + unit_data = { + 'machine': machine_number, + 'public-address': + '{}-{}.example.com'.format(service_name, i), + 'relations': {'db': {'state': 'up'}}, + 'agent-state': unit_state} + service_data['units']['{}/{}'.format(service_name, i)] = ( + unit_data) + juju_status_data = {'machines': machine_data, + 'services': {service_name: service_data}} + return juju_status_data + + def _make_juju_status_yaml(self, num_units=1, + service_name='test-service', + unit_state='pending', + machine_state='not-started'): + """Convert the dict returned by `_make_juju_status_dict` to YAML.""" + return yaml.dump( + self._make_juju_status_dict( + num_units, service_name, unit_state, machine_state)) + + def test_make_charm_config_file(self): + # make_charm_config_file() writes the passed configuration to a + # temporary file as YAML. + charm_config = {'foo': 'bar', + 'spam': 'eggs', + 'ham': 'jam'} + # make_charm_config_file() returns the file object so that it + # can be garbage collected properly. + charm_config_file = charmhelpers.make_charm_config_file(charm_config) + with open(charm_config_file.name) as config_in: + written_config = config_in.read() + self.assertEqual(yaml.dump(charm_config), written_config) + + def test_unit_info(self): + # unit_info returns requested data about a given service. + juju_yaml = self._make_juju_status_yaml() + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + self.assertEqual( + 'pending', + charmhelpers.unit_info('test-service', 'agent-state')) + + def test_unit_info_returns_empty_for_nonexistent_service(self): + # If the service passed to unit_info() has not yet started (or + # otherwise doesn't exist), unit_info() will return an empty + # string. + juju_yaml = "services: {}" + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + self.assertEqual( + '', charmhelpers.unit_info('test-service', 'state')) + + def test_unit_info_accepts_data(self): + # It's possible to pass a `data` dict, containing the parsed + # result of juju status, to unit_info(). + juju_status_data = yaml.safe_load( + self._make_juju_status_yaml()) + self.patch(charmhelpers, 'juju_status', lambda: None) + service_data = juju_status_data['services']['test-service'] + unit_info_dict = service_data['units']['test-service/0'] + for key, value in unit_info_dict.items(): + item_info = charmhelpers.unit_info( + 'test-service', key, data=juju_status_data) + self.assertEqual(value, item_info) + + def test_unit_info_returns_first_unit_by_default(self): + # By default, unit_info() just returns the value of the + # requested item for the first unit in a service. + juju_yaml = self._make_juju_status_yaml(num_units=2) + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + unit_address = charmhelpers.unit_info( + 'test-service', 'public-address') + self.assertEqual('test-service-0.example.com', unit_address) + + def test_unit_info_accepts_unit_name(self): + # By default, unit_info() just returns the value of the + # requested item for the first unit in a service. However, it's + # possible to pass a unit name to it, too. + juju_yaml = self._make_juju_status_yaml(num_units=2) + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + unit_address = charmhelpers.unit_info( + 'test-service', 'public-address', unit='test-service/1') + self.assertEqual('test-service-1.example.com', unit_address) + + def test_get_machine_data(self): + # get_machine_data() returns a dict containing the machine data + # parsed from juju status. + juju_yaml = self._make_juju_status_yaml() + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + machine_0_data = charmhelpers.get_machine_data()[0] + self.assertEqual('zookeeper.example.com', machine_0_data['dns-name']) + + def test_wait_for_machine_returns_if_machine_up(self): + # If wait_for_machine() is called and the machine(s) it is + # waiting for are already up, it will return. + juju_yaml = self._make_juju_status_yaml(machine_state='running') + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + machines, time_taken = charmhelpers.wait_for_machine(timeout=1) + self.assertEqual(1, machines) + + def test_wait_for_machine_times_out(self): + # If the machine that wait_for_machine is waiting for isn't + # 'running' before the passed timeout is reached, + # wait_for_machine will raise an error. + juju_yaml = self._make_juju_status_yaml() + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + self.assertRaises( + RuntimeError, charmhelpers.wait_for_machine, timeout=0) + + def test_wait_for_machine_always_returns_if_running_locally(self): + # If juju is actually running against a local LXC container, + # wait_for_machine will always return. + juju_status_dict = self._make_juju_status_dict() + # We'll update the 0th machine to make it look like it's an LXC + # container. + juju_status_dict['machines'][0]['dns-name'] = 'localhost' + juju_yaml = yaml.dump(juju_status_dict) + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + machines, time_taken = charmhelpers.wait_for_machine(timeout=1) + # wait_for_machine will always return 1 machine started here, + # since there's only one machine to start. + self.assertEqual(1, machines) + # time_taken will be 0, since no actual waiting happened. + self.assertEqual(0, time_taken) + + def test_wait_for_machine_waits_for_multiple_machines(self): + # wait_for_machine can be told to wait for multiple machines. + juju_yaml = self._make_juju_status_yaml( + num_units=2, machine_state='running') + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + machines, time_taken = charmhelpers.wait_for_machine(num_machines=2) + self.assertEqual(2, machines) + + def test_wait_for_unit_returns_if_unit_started(self): + # wait_for_unit() will return if the service it's waiting for is + # already up. + juju_yaml = self._make_juju_status_yaml( + unit_state='started', machine_state='running') + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + charmhelpers.wait_for_unit('test-service', timeout=0) + + def test_wait_for_unit_raises_error_on_error_state(self): + # If the unit is in some kind of error state, wait_for_unit will + # raise a RuntimeError. + juju_yaml = self._make_juju_status_yaml( + unit_state='start-error', machine_state='running') + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + self.assertRaises(RuntimeError, charmhelpers.wait_for_unit, + 'test-service', timeout=0) + + def test_wait_for_unit_raises_error_on_timeout(self): + # If the unit does not start before the timeout is reached, + # wait_for_unit will raise a RuntimeError. + juju_yaml = self._make_juju_status_yaml( + unit_state='pending', machine_state='running') + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + self.assertRaises(RuntimeError, charmhelpers.wait_for_unit, + 'test-service', timeout=0) + + def test_wait_for_relation_returns_if_relation_up(self): + # wait_for_relation() waits for relations to come up. If a + # relation is already 'up', wait_for_relation() will return + # immediately. + juju_yaml = self._make_juju_status_yaml( + unit_state='started', machine_state='running') + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + charmhelpers.wait_for_relation('test-service', 'db', timeout=0) + + def test_wait_for_relation_times_out_if_relation_not_present(self): + # If a relation does not exist at all before a timeout is + # reached, wait_for_relation() will raise a RuntimeError. + juju_dict = self._make_juju_status_dict( + unit_state='started', machine_state='running') + units = juju_dict['services']['test-service']['units'] + # We'll remove all the relations for test-service for this test. + units['test-service/0']['relations'] = {} + juju_dict['services']['test-service']['units'] = units + juju_yaml = yaml.dump(juju_dict) + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + self.assertRaises( + RuntimeError, charmhelpers.wait_for_relation, 'test-service', + 'db', timeout=0) + + def test_wait_for_relation_times_out_if_relation_not_up(self): + # If a relation does not transition to an 'up' state, before a + # timeout is reached, wait_for_relation() will raise a + # RuntimeError. + juju_dict = self._make_juju_status_dict( + unit_state='started', machine_state='running') + units = juju_dict['services']['test-service']['units'] + units['test-service/0']['relations']['db']['state'] = 'down' + juju_dict['services']['test-service']['units'] = units + juju_yaml = yaml.dump(juju_dict) + self.patch(charmhelpers, 'juju_status', lambda: juju_yaml) + self.assertRaises( + RuntimeError, charmhelpers.wait_for_relation, 'test-service', + 'db', timeout=0) + + def test_wait_for_page_contents_returns_if_contents_available(self): + # wait_for_page_contents() will wait until a given string is + # contained within the results of a given url and will return + # once it does. + # We need to patch the charmhelpers instance of urlopen so that + # it doesn't try to connect out. + test_content = "Hello, world." + self.patch(charmhelpers, 'urlopen', + lambda *args: StringIO(test_content)) + charmhelpers.wait_for_page_contents( + 'http://example.com', test_content, timeout=0) + + def test_wait_for_page_contents_times_out(self): + # If the desired contents do not appear within the page before + # the specified timeout, wait_for_page_contents() will raise a + # RuntimeError. + # We need to patch the charmhelpers instance of urlopen so that + # it doesn't try to connect out. + self.patch(charmhelpers, 'urlopen', + lambda *args: StringIO("This won't work.")) + self.assertRaises( + RuntimeError, charmhelpers.wait_for_page_contents, + 'http://example.com', "This will error", timeout=0) + + +if __name__ == '__main__': + unittest.main() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmsupport/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmsupport/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmsupport/test_nrpe.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmsupport/test_nrpe.py new file mode 100644 index 0000000000000000000000000000000000000000..454c62176f5fe0a772c0d73cc44061592aad56e4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/charmsupport/test_nrpe.py @@ -0,0 +1,419 @@ +import os +import yaml +import subprocess + +from testtools import TestCase +from mock import patch, call, MagicMock + +from charmhelpers.contrib.charmsupport import nrpe +from charmhelpers.core import host + + +class NRPEBaseTestCase(TestCase): + patches = { + 'config': {'object': nrpe}, + 'copy2': {'object': nrpe.shutil}, + 'log': {'object': nrpe}, + 'getpwnam': {'object': nrpe.pwd}, + 'getgrnam': {'object': nrpe.grp}, + 'glob': {'object': nrpe.glob}, + 'mkdir': {'object': os}, + 'chown': {'object': os}, + 'chmod': {'object': os}, + 'exists': {'object': os.path}, + 'listdir': {'object': os}, + 'remove': {'object': os}, + 'open': {'object': nrpe, 'create': True}, + 'isfile': {'object': os.path}, + 'isdir': {'object': os.path}, + 'call': {'object': subprocess}, + 'relation_get': {'object': nrpe}, + 'relation_ids': {'object': nrpe}, + 'relation_set': {'object': nrpe}, + 'relations_of_type': {'object': nrpe}, + 'service': {'object': nrpe}, + 'init_is_systemd': {'object': host}, + } + + def setUp(self): + super(NRPEBaseTestCase, self).setUp() + self.patched = {} + # Mock the universe. + for attr, data in self.patches.items(): + create = data.get('create', False) + patcher = patch.object(data['object'], attr, create=create) + self.patched[attr] = patcher.start() + self.addCleanup(patcher.stop) + env_patcher = patch.dict('os.environ', + {'JUJU_UNIT_NAME': 'testunit', + 'CHARM_DIR': '/usr/lib/test_charm_dir'}) + env_patcher.start() + self.addCleanup(env_patcher.stop) + + def check_call_counts(self, **kwargs): + for attr, expected in kwargs.items(): + patcher = self.patched[attr] + self.assertEqual(expected, patcher.call_count, attr) + + +class NRPETestCase(NRPEBaseTestCase): + + def test_init_gets_config(self): + self.patched['config'].return_value = {'nagios_context': 'testctx', + 'nagios_servicegroups': 'testsgrps'} + + checker = nrpe.NRPE() + + self.assertEqual('testctx', checker.nagios_context) + self.assertEqual('testsgrps', checker.nagios_servicegroups) + self.assertEqual('testunit', checker.unit_name) + self.assertEqual('testctx-testunit', checker.hostname) + self.check_call_counts(config=1) + + def test_init_hostname(self): + """Test that the hostname parameter is correctly set""" + checker = nrpe.NRPE() + self.assertEqual(checker.hostname, + "{}-{}".format(checker.nagios_context, + checker.unit_name)) + hostname = "test.host" + checker = nrpe.NRPE(hostname=hostname) + self.assertEqual(checker.hostname, hostname) + + def test_default_servicegroup(self): + """Test that nagios_servicegroups gets set to the default if omitted""" + self.patched['config'].return_value = {'nagios_context': 'testctx'} + checker = nrpe.NRPE() + self.assertEqual(checker.nagios_servicegroups, 'testctx') + + def test_no_nagios_installed_bails(self): + self.patched['config'].return_value = {'nagios_context': 'test', + 'nagios_servicegroups': ''} + self.patched['getgrnam'].side_effect = KeyError + checker = nrpe.NRPE() + + self.assertEqual(None, checker.write()) + + expected = 'Nagios user not set up, nrpe checks not updated' + self.patched['log'].assert_called_with(expected) + self.check_call_counts(log=2, config=1, getpwnam=1, getgrnam=1) + + def test_write_no_checker(self): + self.patched['config'].return_value = {'nagios_context': 'test', + 'nagios_servicegroups': ''} + self.patched['exists'].return_value = True + checker = nrpe.NRPE() + + self.assertEqual(None, checker.write()) + + self.check_call_counts(config=1, getpwnam=1, getgrnam=1, exists=1) + + def test_write_restarts_service(self): + self.patched['config'].return_value = {'nagios_context': 'test', + 'nagios_servicegroups': ''} + self.patched['exists'].return_value = True + checker = nrpe.NRPE() + + self.assertEqual(None, checker.write()) + + self.patched['service'].assert_called_with('restart', 'nagios-nrpe-server') + self.check_call_counts(config=1, getpwnam=1, getgrnam=1, + exists=1, service=1) + + def test_update_nrpe(self): + self.patched['config'].return_value = {'nagios_context': 'a', + 'nagios_servicegroups': ''} + self.patched['exists'].return_value = True + self.patched['relation_get'].return_value = { + 'egress-subnets': '10.66.111.24/32', + 'ingress-address': '10.66.111.24', + 'private-address': '10.66.111.24' + } + + def _rels(rname): + relations = { + 'local-monitors': 'local-monitors:1', + 'nrpe-external-master': 'nrpe-external-master:2', + } + return [relations[rname]] + self.patched['relation_ids'].side_effect = _rels + + checker = nrpe.NRPE() + checker.add_check(shortname="myservice", + description="Check MyService", + check_cmd="check_http http://localhost") + + self.assertEqual(None, checker.write()) + + self.assertEqual(2, self.patched['open'].call_count) + filename = 'check_myservice.cfg' + expected = [ + ('/etc/nagios/nrpe.d/%s' % filename, 'w'), + ('/var/lib/nagios/export/service__a-testunit_%s' % filename, 'w'), + ] + actual = [x[0] for x in self.patched['open'].call_args_list] + self.assertEqual(expected, actual) + outfile = self.patched['open'].return_value.__enter__.return_value + service_file_contents = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service { + use active-service + host_name a-testunit + service_description a-testunit[myservice] Check MyService + check_command check_nrpe!check_myservice + servicegroups a +} +""" + expected = [ + '# check myservice\n', + '# The following header was added automatically by juju\n', + '# Modifying it will affect nagios monitoring and alerting\n', + '# servicegroups: a\n', + 'command[check_myservice]=/usr/lib/nagios/plugins/check_http http://localhost\n', + service_file_contents, + ] + actual = [x[0][0] for x in outfile.write.call_args_list] + self.assertEqual(expected, actual) + + nrpe_monitors = {'myservice': + {'command': 'check_myservice'}} + monitors = yaml.dump( + {"monitors": {"remote": {"nrpe": nrpe_monitors}}}) + relation_set_calls = [ + call(monitors=monitors, relation_id="local-monitors:1"), + call(monitors=monitors, relation_id="nrpe-external-master:2"), + ] + self.patched['relation_set'].assert_has_calls(relation_set_calls, any_order=True) + self.check_call_counts(config=1, getpwnam=1, getgrnam=1, + exists=4, open=2, listdir=1, relation_get=2, + relation_ids=3, relation_set=3) + + +class NRPECheckTestCase(NRPEBaseTestCase): + + def test_invalid_shortname(self): + cases = [ + 'invalid:name', + '', + ] + for shortname in cases: + self.assertRaises(nrpe.CheckException, nrpe.Check, shortname, + 'description', '/some/command') + + def test_valid_shortname(self): + cases = [ + '1_number_is_fine', + 'dots.are.good', + 'dashes-ok', + 'UPPER_case_allowed', + '5', + '@valid', + ] + for shortname in cases: + check = nrpe.Check(shortname, 'description', '/some/command') + self.assertEqual(shortname, check.shortname) + + def test_write_removes_existing_config(self): + self.patched['listdir'].return_value = [ + 'foo', 'bar.cfg', '_check_shortname.cfg'] + check = nrpe.Check('shortname', 'description', '/some/command') + + self.assertEqual(None, check.write('testctx', 'hostname', 'testsgrp')) + + expected = '/var/lib/nagios/export/_check_shortname.cfg' + self.patched['remove'].assert_called_once_with(expected) + self.check_call_counts(exists=3, remove=1, open=2, listdir=1) + + def test_check_write_nrpe_exportdir_not_accessible(self): + self.patched['exists'].return_value = False + check = nrpe.Check('shortname', 'description', '/some/command') + + self.assertEqual(None, check.write('testctx', 'hostname', 'testsgrps')) + expected = ('Not writing service config as ' + '/var/lib/nagios/export is not accessible') + self.patched['log'].assert_has_calls( + [call(expected)], any_order=True) + self.check_call_counts(log=2, open=1) + + def test_locate_cmd_no_args(self): + self.patched['exists'].return_value = True + + check = nrpe.Check('shortname', 'description', '/bin/ls') + + self.assertEqual('/bin/ls', check.check_cmd) + + def test_locate_cmd_not_found(self): + self.patched['exists'].return_value = False + check = nrpe.Check('shortname', 'description', 'check_http -x -y -z') + + self.assertEqual('', check.check_cmd) + self.assertEqual(2, self.patched['exists'].call_count) + expected = [ + '/usr/lib/nagios/plugins/check_http', + '/usr/local/lib/nagios/plugins/check_http', + ] + actual = [x[0][0] for x in self.patched['exists'].call_args_list] + self.assertEqual(expected, actual) + self.check_call_counts(exists=2, log=1) + expected = 'Check command not found: check_http' + self.assertEqual(expected, self.patched['log'].call_args[0][0]) + + def test_run(self): + self.patched['exists'].return_value = True + command = '/usr/bin/wget foo' + check = nrpe.Check('shortname', 'description', command) + + self.assertEqual(None, check.run()) + + self.check_call_counts(exists=1, call=1) + self.assertEqual(command, self.patched['call'].call_args[0][0]) + + +class NRPEMiscTestCase(NRPEBaseTestCase): + def test_get_nagios_hostcontext(self): + rel_info = { + 'nagios_hostname': 'bob-openstack-dashboard-0', + 'private-address': '10.5.3.103', + '__unit__': u'dashboard-nrpe/1', + '__relid__': u'nrpe-external-master:2', + 'nagios_host_context': u'bob', + } + self.patched['relations_of_type'].return_value = [rel_info] + self.assertEqual(nrpe.get_nagios_hostcontext(), 'bob') + + def test_get_nagios_hostname(self): + rel_info = { + 'nagios_hostname': 'bob-openstack-dashboard-0', + 'private-address': '10.5.3.103', + '__unit__': u'dashboard-nrpe/1', + '__relid__': u'nrpe-external-master:2', + 'nagios_host_context': u'bob', + } + self.patched['relations_of_type'].return_value = [rel_info] + self.assertEqual(nrpe.get_nagios_hostname(), 'bob-openstack-dashboard-0') + + def test_get_nagios_unit_name(self): + rel_info = { + 'nagios_hostname': 'bob-openstack-dashboard-0', + 'private-address': '10.5.3.103', + '__unit__': u'dashboard-nrpe/1', + '__relid__': u'nrpe-external-master:2', + 'nagios_host_context': u'bob', + } + self.patched['relations_of_type'].return_value = [rel_info] + self.assertEqual(nrpe.get_nagios_unit_name(), 'bob:testunit') + + def test_get_nagios_unit_name_no_hc(self): + self.patched['relations_of_type'].return_value = [] + self.assertEqual(nrpe.get_nagios_unit_name(), 'testunit') + + @patch.object(os.path, 'isdir') + def test_add_init_service_checks(self, mock_isdir): + def _exists(init_file): + files = ['/etc/init/apache2.conf', + '/usr/lib/nagios/plugins/check_upstart_job', + '/etc/init.d/haproxy', + '/usr/lib/nagios/plugins/check_status_file.py', + '/etc/cron.d/nagios-service-check-haproxy', + '/var/lib/nagios/service-check-haproxy.txt', + '/usr/lib/nagios/plugins/check_systemd.py' + ] + return init_file in files + + self.patched['exists'].side_effect = _exists + + # Test without systemd and /var/lib/nagios does not exist + self.patched['init_is_systemd'].return_value = False + mock_isdir.return_value = False + bill = nrpe.NRPE() + services = ['apache2', 'haproxy'] + nrpe.add_init_service_checks(bill, services, 'testunit') + mock_isdir.assert_called_with('/var/lib/nagios') + self.patched['call'].assert_not_called() + expect_cmds = { + 'apache2': '/usr/lib/nagios/plugins/check_upstart_job apache2', + 'haproxy': '/usr/lib/nagios/plugins/check_status_file.py -f ' + '/var/lib/nagios/service-check-haproxy.txt', + } + self.assertEqual(bill.checks[0].shortname, 'apache2') + self.assertEqual(bill.checks[0].check_cmd, expect_cmds['apache2']) + self.assertEqual(bill.checks[1].shortname, 'haproxy') + self.assertEqual(bill.checks[1].check_cmd, expect_cmds['haproxy']) + + # without systemd and /var/lib/nagios does exist + mock_isdir.return_value = True + f = MagicMock() + self.patched['open'].return_value = f + bill = nrpe.NRPE() + services = ['apache2', 'haproxy'] + nrpe.add_init_service_checks(bill, services, 'testunit') + mock_isdir.assert_called_with('/var/lib/nagios') + self.patched['call'].assert_called_with( + ['/usr/local/lib/nagios/plugins/check_exit_status.pl', '-e', '-s', + '/etc/init.d/haproxy', 'status'], stdout=f, + stderr=subprocess.STDOUT) + + # Test regular services and snap services with systemd + services = ['apache2', 'haproxy', 'snap.test.test', + 'ceph-radosgw@hostname'] + self.patched['init_is_systemd'].return_value = True + nrpe.add_init_service_checks(bill, services, 'testunit') + expect_cmds = { + 'apache2': '/usr/lib/nagios/plugins/check_systemd.py apache2', + 'haproxy': '/usr/lib/nagios/plugins/check_systemd.py haproxy', + 'snap.test.test': '/usr/lib/nagios/plugins/check_systemd.py snap.test.test', + } + self.assertEqual(bill.checks[2].shortname, 'apache2') + self.assertEqual(bill.checks[2].check_cmd, expect_cmds['apache2']) + self.assertEqual(bill.checks[3].shortname, 'haproxy') + self.assertEqual(bill.checks[3].check_cmd, expect_cmds['haproxy']) + self.assertEqual(bill.checks[4].shortname, 'snap.test.test') + self.assertEqual(bill.checks[4].check_cmd, expect_cmds['snap.test.test']) + + def test_copy_nrpe_checks(self): + file_presence = { + 'filea': True, + 'fileb': False} + self.patched['exists'].return_value = True + self.patched['glob'].return_value = ['filea', 'fileb'] + self.patched['isdir'].side_effect = [False, True] + self.patched['isfile'].side_effect = lambda x: file_presence[x] + nrpe.copy_nrpe_checks() + self.patched['glob'].assert_called_once_with( + ('/usr/lib/test_charm_dir/hooks/charmhelpers/contrib/openstack/' + 'files/check_*')) + self.patched['copy2'].assert_called_once_with( + 'filea', + '/usr/local/lib/nagios/plugins/filea') + + def test_copy_nrpe_checks_other_root(self): + file_presence = { + 'filea': True, + 'fileb': False} + self.patched['exists'].return_value = True + self.patched['glob'].return_value = ['filea', 'fileb'] + self.patched['isdir'].side_effect = [True, False] + self.patched['isfile'].side_effect = lambda x: file_presence[x] + nrpe.copy_nrpe_checks() + self.patched['glob'].assert_called_once_with( + ('/usr/lib/test_charm_dir/charmhelpers/contrib/openstack/' + 'files/check_*')) + self.patched['copy2'].assert_called_once_with( + 'filea', + '/usr/local/lib/nagios/plugins/filea') + + def test_copy_nrpe_checks_nrpe_files_dir(self): + file_presence = { + 'filea': True, + 'fileb': False} + self.patched['exists'].return_value = True + self.patched['glob'].return_value = ['filea', 'fileb'] + self.patched['isfile'].side_effect = lambda x: file_presence[x] + nrpe.copy_nrpe_checks(nrpe_files_dir='/other/dir') + self.patched['glob'].assert_called_once_with( + '/other/dir/check_*') + self.patched['copy2'].assert_called_once_with( + 'filea', + '/usr/local/lib/nagios/plugins/filea') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/README b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/README new file mode 100644 index 0000000000000000000000000000000000000000..c58240383d6d1cc6fc1c8388882e50d529804fc8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/README @@ -0,0 +1,7 @@ +The Mysql test is not enabled. In order to enable it, touch __init__.py. + +As it currently stands, the test has poor coverage and fails under python3. +These things should be addressed before re-enabling the test. + +Adam Israel +March 17, 2015 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/test_mysql.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/test_mysql.py new file mode 100644 index 0000000000000000000000000000000000000000..c449fc481f1a76b5da9090288a64d1df8eb5b259 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/database/test_mysql.py @@ -0,0 +1,909 @@ +import os +import mock +import json +import unittest +import sys +import shutil +import tempfile + +from collections import OrderedDict + +sys.modules['MySQLdb'] = mock.Mock() +from charmhelpers.contrib.database import mysql # noqa + + +class MysqlTests(unittest.TestCase): + def setUp(self): + super(MysqlTests, self).setUp() + + def test_connect_host_defined(self): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + with mock.patch.object(mysql, 'log'): + helper.connect(user='user', password='password', host='1.1.1.1') + mysql.MySQLdb.connect.assert_called_with( + passwd='password', host='1.1.1.1', user='user') + + def test_connect_host_not_defined(self): + helper = mysql.MySQLHelper('foo', 'bar') + with mock.patch.object(mysql, 'log'): + helper.connect(user='user', password='password') + mysql.MySQLdb.connect.assert_called_with( + passwd='password', host='localhost', user='user') + + def test_connect_port_defined(self): + helper = mysql.MySQLHelper('foo', 'bar') + with mock.patch.object(mysql, 'log'): + helper.connect(user='user', password='password', port=3316) + mysql.MySQLdb.connect.assert_called_with( + passwd='password', host='localhost', user='user', port=3316) + + @mock.patch.object(mysql.MySQLHelper, 'normalize_address') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password') + @mock.patch.object(mysql.MySQLHelper, 'grant_exists') + @mock.patch.object(mysql, 'relation_get') + @mock.patch.object(mysql, 'related_units') + @mock.patch.object(mysql, 'log') + def test_get_allowed_units(self, mock_log, mock_related_units, + mock_relation_get, + mock_grant_exists, + mock_get_password, + mock_normalize_address): + + # echo + mock_normalize_address.side_effect = lambda addr: addr + + def mock_rel_get(unit, rid): + if unit == 'unit/0': + # Non-prefixed settings + d = {'private-address': '10.0.0.1', + 'hostname': 'hostA'} + elif unit == 'unit/1': + # Containing prefixed settings + d = {'private-address': '10.0.0.2', + 'dbA_hostname': json.dumps(['10.0.0.2', '2001:db8:1::2'])} + elif unit == 'unit/2': + # No hostname + d = {'private-address': '10.0.0.3'} + + return d + + mock_relation_get.side_effect = mock_rel_get + mock_related_units.return_value = ['unit/0', 'unit/1', 'unit/2'] + + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + units = helper.get_allowed_units('dbA', 'userA') + + calls = [mock.call('dbA', 'userA', 'hostA'), + mock.call('dbA', 'userA', '10.0.0.2'), + mock.call('dbA', 'userA', '2001:db8:1::2'), + mock.call('dbA', 'userA', '10.0.0.3')] + + helper.grant_exists.assert_has_calls(calls, any_order=True) + self.assertEqual(units, set(['unit/0', 'unit/1', 'unit/2'])) + + @mock.patch('charmhelpers.contrib.network.ip.log', + lambda *args, **kwargs: None) + @mock.patch('charmhelpers.contrib.network.ip.ns_query') + @mock.patch('charmhelpers.contrib.network.ip.socket') + @mock.patch.object(mysql, 'unit_get') + @mock.patch.object(mysql, 'config_get') + @mock.patch.object(mysql, 'log') + def test_normalize_address(self, mock_log, mock_config_get, mock_unit_get, + mock_socket, mock_ns_query): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + # prefer-ipv6 + mock_config_get.return_value = False + # echo + mock_socket.gethostbyname.side_effect = lambda addr: addr + + mock_unit_get.return_value = '10.0.0.1' + out = helper.normalize_address('10.0.0.1') + self.assertEqual('127.0.0.1', out) + mock_config_get.assert_called_with('prefer-ipv6') + + mock_unit_get.return_value = '10.0.0.1' + out = helper.normalize_address('10.0.0.2') + self.assertEqual('10.0.0.2', out) + mock_config_get.assert_called_with('prefer-ipv6') + + out = helper.normalize_address('2001:db8:1::1') + self.assertEqual('2001:db8:1::1', out) + mock_config_get.assert_called_with('prefer-ipv6') + + mock_socket.gethostbyname.side_effect = Exception + mock_ns_query.return_value = None + out = helper.normalize_address('unresolvable') + self.assertEqual('unresolvable', out) + mock_config_get.assert_called_with('prefer-ipv6') + + # prefer-ipv6 + mock_config_get.return_value = True + mock_socket.gethostbyname.side_effect = 'other' + out = helper.normalize_address('unresolvable') + self.assertEqual('unresolvable', out) + mock_config_get.assert_called_with('prefer-ipv6') + + def test_passwd_keys(self): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + self.assertEqual(list(helper.passwd_keys(None)), ['mysql.passwd']) + self.assertEqual(list(helper.passwd_keys('auser')), + ['mysql-auser.passwd', 'auser.passwd']) + + @mock.patch.object(mysql.MySQLHelper, 'migrate_passwords_to_leader_storage') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password_on_disk') + @mock.patch.object(mysql, 'leader_get') + def test_get_mysql_password_no_peer_passwd(self, mock_leader_get, + mock_get_disk_pw, + mock_migrate_pw): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + store = {} + mock_leader_get.side_effect = lambda key: store.get(key) + mock_get_disk_pw.return_value = "disk-passwd" + self.assertEqual(helper.get_mysql_password(), "disk-passwd") + self.assertTrue(mock_migrate_pw.called) + + @mock.patch.object(mysql.MySQLHelper, 'migrate_passwords_to_leader_storage') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password_on_disk') + @mock.patch.object(mysql, 'leader_get') + def test_get_mysql_password_peer_passwd(self, mock_leader_get, + mock_get_disk_pw, mock_migrate_pw): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + store = {'mysql-userA.passwd': 'passwdA'} + mock_leader_get.side_effect = lambda key: store.get(key) + mock_get_disk_pw.return_value = "disk-passwd" + self.assertEqual(helper.get_mysql_password(username='userA'), + "passwdA") + self.assertTrue(mock_migrate_pw.called) + + @mock.patch.object(mysql.MySQLHelper, 'migrate_passwords_to_leader_storage') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password_on_disk') + @mock.patch.object(mysql, 'leader_get') + def test_get_mysql_password_peer_passwd_legacy(self, mock_leader_get, + mock_get_disk_pw, + mock_migrate_pw): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + store = {'userA.passwd': 'passwdA'} + mock_leader_get.side_effect = lambda key: store.get(key) + mock_get_disk_pw.return_value = "disk-passwd" + self.assertEqual(helper.get_mysql_password(username='userA'), + "passwdA") + self.assertTrue(mock_migrate_pw.called) + + @mock.patch.object(mysql.MySQLHelper, 'migrate_passwords_to_leader_storage') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password_on_disk') + @mock.patch.object(mysql, 'leader_get') + def test_get_mysql_password_peer_passwd_all(self, mock_leader_get, + mock_get_disk_pw, + mock_migrate_pw): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + # Add * so we can identify that the new format key takes precedence + # if found. + store = {'mysql-userA.passwd': 'passwdA', + 'userA.passwd': 'passwdA*'} + mock_leader_get.side_effect = lambda key: store.get(key) + mock_get_disk_pw.return_value = "disk-passwd" + self.assertEqual(helper.get_mysql_password(username='userA'), + "passwdA") + self.assertTrue(mock_migrate_pw.called) + + @mock.patch.object(mysql.MySQLHelper, 'set_mysql_password') + def test_set_mysql_root_password(self, mock_set_passwd): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + helper.set_mysql_root_password(password='1234') + mock_set_passwd.assert_called_with( + 'root', + '1234', + current_password=None) + + @mock.patch.object(mysql.MySQLHelper, 'set_mysql_password') + def test_set_mysql_root_password_cur_passwd(self, mock_set_passwd): + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + helper.set_mysql_root_password(password='1234', current_password='abc') + mock_set_passwd.assert_called_with( + 'root', + '1234', + current_password='abc') + + @mock.patch.object(mysql, 'log', lambda *args, **kwargs: None) + @mock.patch.object(mysql, 'is_leader') + @mock.patch.object(mysql, 'leader_get') + @mock.patch.object(mysql, 'leader_set') + @mock.patch.object(mysql, 'CompareHostReleases') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password') + @mock.patch.object(mysql.MySQLHelper, 'connect') + def test_set_mysql_password(self, mock_connect, mock_get_passwd, + mock_compare_releases, mock_leader_set, + mock_leader_get, mock_is_leader): + mock_connection = mock.MagicMock() + mock_cursor = mock.MagicMock() + mock_connection.cursor.return_value = mock_cursor + mock_get_passwd.return_value = 'asdf' + mock_is_leader.return_value = True + mock_leader_get.return_value = '1234' + mock_compare_releases.return_value = 'artful' + + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + helper.connection = mock_connection + + helper.set_mysql_password(username='root', password='1234') + + mock_connect.assert_has_calls( + [mock.call(user='root', password='asdf'), # original password + mock.call(user='root', password='1234')]) # new password + mock_leader_get.assert_has_calls([mock.call('mysql.passwd')]) + mock_leader_set.assert_has_calls( + [mock.call(settings={'mysql.passwd': '1234'})] + ) + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = " + "PASSWORD( %s ) WHERE user = %s;") + mock_cursor.assert_has_calls( + [mock.call.execute(SQL_UPDATE_PASSWD, ('1234', 'root')), + mock.call.execute('FLUSH PRIVILEGES;'), + mock.call.close(), + mock.call.execute('select 1;'), + mock.call.close()] + ) + mock_get_passwd.assert_called_once_with(None) + + # make sure for the non-leader leader-set is not called + mock_is_leader.return_value = False + mock_leader_set.reset_mock() + mock_get_passwd.reset_mock() + helper.set_mysql_password(username='root', password='1234') + mock_leader_set.assert_not_called() + mock_get_passwd.assert_called_once_with(None) + + mock_get_passwd.reset_mock() + mock_compare_releases.return_value = 'bionic' + helper.set_mysql_password(username='root', password='1234') + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET " + "authentication_string = " + "PASSWORD( %s ) WHERE user = %s;") + mock_cursor.assert_has_calls( + [mock.call.execute(SQL_UPDATE_PASSWD, ('1234', 'root')), + mock.call.execute('FLUSH PRIVILEGES;'), + mock.call.close(), + mock.call.execute('select 1;'), + mock.call.close()] + ) + mock_get_passwd.assert_called_once_with(None) + + # Test supplying the current password + mock_is_leader.return_value = False + mock_connect.reset_mock() + mock_get_passwd.reset_mock() + helper.set_mysql_password( + username='root', + password='1234', + current_password='currpass') + self.assertFalse(mock_get_passwd.called) + mock_connect.assert_has_calls( + [mock.call(user='root', password='currpass'), # original password + mock.call(user='root', password='1234')]) # new password + + @mock.patch.object(mysql, 'leader_get') + @mock.patch.object(mysql, 'leader_set') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password') + @mock.patch.object(mysql.MySQLHelper, 'connect') + def test_set_mysql_password_fail_to_connect(self, mock_connect, + mock_get_passwd, + mock_leader_set, + mock_leader_get): + + class FakeOperationalError(Exception): + pass + + def fake_connect(*args, **kwargs): + raise FakeOperationalError('foobar') + + mysql.MySQLdb.OperationalError = FakeOperationalError + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + mock_connect.side_effect = fake_connect + self.assertRaises(mysql.MySQLSetPasswordError, + helper.set_mysql_password, + username='root', password='1234') + + @mock.patch.object(mysql, 'leader_get') + @mock.patch.object(mysql, 'leader_set') + @mock.patch.object(mysql.MySQLHelper, 'get_mysql_password') + @mock.patch.object(mysql.MySQLHelper, 'connect') + def test_set_mysql_password_fail_to_connect2(self, mock_connect, + mock_get_passwd, + mock_leader_set, + mock_leader_get): + + class FakeOperationalError(Exception): + def __str__(self): + return 'some-error' + + operational_error = FakeOperationalError('foobar') + + def fake_connect(user, password): + # fail for the new password + if user == 'root' and password == '1234': + raise operational_error + else: + return mock.MagicMock() + + mysql.MySQLdb.OperationalError = FakeOperationalError + helper = mysql.MySQLHelper('foo', 'bar', host='hostA') + helper.connection = mock.MagicMock() + mock_connect.side_effect = fake_connect + with self.assertRaises(mysql.MySQLSetPasswordError) as cm: + helper.set_mysql_password(username='root', password='1234') + + ex = cm.exception + self.assertEqual(ex.args[0], 'Cannot connect using new password: some-error') + self.assertEqual(ex.args[1], operational_error) + + @mock.patch.object(mysql, 'is_leader') + @mock.patch.object(mysql, 'leader_set') + def test_migrate_passwords_to_leader_storage(self, mock_leader_set, + mock_is_leader): + files = {'mysql.passwd': '1', + 'userA.passwd': '2', + 'mysql-userA.passwd': '3'} + store = {} + + def _store(settings): + store.update(settings) + + mock_is_leader.return_value = True + + tmpdir = tempfile.mkdtemp('charm-helpers-unit-tests') + try: + root_tmplt = "%s/mysql.passwd" % (tmpdir) + helper = mysql.MySQLHelper(root_tmplt, None, host='hostA') + for f in files: + with open(os.path.join(tmpdir, f), 'w') as fd: + fd.write(files[f]) + + mock_leader_set.side_effect = _store + helper.migrate_passwords_to_leader_storage() + + calls = [mock.call(settings={'mysql.passwd': '1'}), + mock.call(settings={'userA.passwd': '2'}), + mock.call(settings={'mysql-userA.passwd': '3'})] + + mock_leader_set.assert_has_calls(calls, + any_order=True) + finally: + shutil.rmtree(tmpdir) + + # Note that legacy key/val is NOT overwritten + self.assertEqual(store, {'mysql.passwd': '1', + 'userA.passwd': '2', + 'mysql-userA.passwd': '3'}) + + @mock.patch.object(mysql, 'log', lambda *args, **kwargs: None) + @mock.patch.object(mysql, 'is_leader') + @mock.patch.object(mysql, 'leader_set') + def test_migrate_passwords_to_leader_storage_not_leader(self, mock_leader_set, + mock_is_leader): + mock_is_leader.return_value = False + tmpdir = tempfile.mkdtemp('charm-helpers-unit-tests') + try: + root_tmplt = "%s/mysql.passwd" % (tmpdir) + helper = mysql.MySQLHelper(root_tmplt, None, host='hostA') + helper.migrate_passwords_to_leader_storage() + finally: + shutil.rmtree(tmpdir) + mock_leader_set.assert_not_called() + + +class MySQLConfigHelperTests(unittest.TestCase): + + def setUp(self): + super(MySQLConfigHelperTests, self).setUp() + self.config_data = {} + self.config = mock.MagicMock() + mysql.config_get = self.config + self.config.side_effect = self._fake_config + + def _fake_config(self, key=None): + if key: + try: + return self.config_data[key] + except KeyError: + return None + else: + return self.config_data + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_innodb_pool_fixed(self, log, mem): + mem.return_value = "100G" + self.config_data = { + 'innodb-buffer-pool-size': "50%", + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_buffer_pool_size(), + helper.human_to_bytes("50G")) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_innodb_pool_not_set(self, mog, mem): + mem.return_value = "100G" + self.config_data = { + 'innodb-buffer-pool-size': '', + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_buffer_pool_size(), + helper.DEFAULT_INNODB_BUFFER_SIZE_MAX) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_innodb_buffer_unset(self, mog, mem): + mem.return_value = "100G" + self.config_data = { + 'innodb-buffer-pool-size': None, + 'dataset-size': None, + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_buffer_pool_size(), + helper.DEFAULT_INNODB_BUFFER_SIZE_MAX) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_innodb_buffer_unset_small(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'innodb-buffer-pool-size': None, + 'dataset-size': None, + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_buffer_pool_size(), + int(helper.human_to_bytes(mem.return_value) * + helper.DEFAULT_INNODB_BUFFER_FACTOR)) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_innodb_dataset_size(self, mog, mem): + mem.return_value = "100G" + self.config_data = { + 'dataset-size': "10G", + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_buffer_pool_size(), + int(helper.human_to_bytes("10G"))) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_tuning_level(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'tuning-level': 'safest', + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_flush_log_at_trx_commit(), + 1 + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_tuning_level_fast(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'tuning-level': 'fast', + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_flush_log_at_trx_commit(), + 2 + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_tuning_level_unsafe(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'tuning-level': 'unsafe', + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_flush_log_at_trx_commit(), + 0 + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_innodb_valid_values(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'innodb-change-buffering': 'all', + } + + helper = mysql.MySQLConfigHelper() + + self.assertEqual( + helper.get_innodb_change_buffering(), + 'all' + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_get_innodb_invalid_values(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'innodb-change-buffering': 'invalid', + } + + helper = mysql.MySQLConfigHelper() + + self.assertTrue(helper.get_innodb_change_buffering() is None) + + +class PerconaTests(unittest.TestCase): + + def setUp(self): + super(PerconaTests, self).setUp() + self.config_data = {} + self.config = mock.MagicMock() + mysql.config_get = self.config + self.config.side_effect = self._fake_config + + def _fake_config(self, key=None): + if key: + try: + return self.config_data[key] + except KeyError: + return None + else: + return self.config_data + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_innodb_pool_fixed(self, log, mem): + mem.return_value = "100G" + self.config_data = { + 'innodb-buffer-pool-size': "50%", + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual(mysql_config.get('innodb_buffer_pool_size'), + helper.human_to_bytes("50G")) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_innodb_pool_not_set(self, mog, mem): + mem.return_value = "100G" + self.config_data = { + 'innodb-buffer-pool-size': '', + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_buffer_pool_size'), + helper.DEFAULT_INNODB_BUFFER_SIZE_MAX) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_innodb_buffer_unset(self, mog, mem): + mem.return_value = "100G" + self.config_data = { + 'innodb-buffer-pool-size': None, + 'dataset-size': None, + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_buffer_pool_size'), + helper.DEFAULT_INNODB_BUFFER_SIZE_MAX) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_innodb_buffer_unset_small(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'innodb-buffer-pool-size': None, + 'dataset-size': None, + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_buffer_pool_size'), + int(helper.human_to_bytes(mem.return_value) * + helper.DEFAULT_INNODB_BUFFER_FACTOR)) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_innodb_dataset_size(self, mog, mem): + mem.return_value = "100G" + self.config_data = { + 'dataset-size': "10G", + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_buffer_pool_size'), + int(helper.human_to_bytes("10G"))) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_wait_timeout(self, mog, mem): + mem.return_value = "100G" + + timeout = 314 + self.config_data = { + 'wait-timeout': timeout, + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('wait_timeout'), + timeout) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_tuning_level(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'tuning-level': 'safest', + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_flush_log_at_trx_commit'), + 1 + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_tuning_level_fast(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'tuning-level': 'fast', + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_flush_log_at_trx_commit'), + 2 + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_tuning_level_unsafe(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'tuning-level': 'unsafe', + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_flush_log_at_trx_commit'), + 0 + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_innodb_valid_values(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'innodb-change-buffering': 'all', + 'innodb-io-capacity': 100, + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertEqual( + mysql_config.get('innodb_change_buffering'), + 'all' + ) + + self.assertEqual( + mysql_config.get('innodb_io_capacity'), + 100 + ) + + @mock.patch.object(mysql.MySQLConfigHelper, 'get_mem_total') + @mock.patch.object(mysql, 'log') + def test_parse_config_innodb_invalid_values(self, mog, mem): + mem.return_value = "512M" + self.config_data = { + 'innodb-change-buffering': 'invalid', + } + + helper = mysql.PerconaClusterHelper() + mysql_config = helper.parse_config() + + self.assertTrue('innodb_change_buffering' not in mysql_config) + self.assertTrue('innodb_io_capacity' not in mysql_config) + + +class Mysql8Tests(unittest.TestCase): + + def setUp(self): + super(Mysql8Tests, self).setUp() + self.template = "/tmp/mysql-passwd.txt" + self.connection = mock.MagicMock() + self.cursor = mock.MagicMock() + self.connection.cursor.return_value = self.cursor + self.helper = mysql.MySQL8Helper( + rpasswdf_template=self.template, + upasswdf_template=self.template) + self.helper.connection = self.connection + self.user = "user" + self.host = "10.5.0.21" + self.password = "passwd" + self.db = "mydb" + + def test_grant_exists(self): + # With backticks + self.cursor.fetchall.return_value = ( + ("GRANT USAGE ON *.* TO `{}`@`{}`".format(self.user, self.host),), + ("GRANT ALL PRIVILEGES ON `{}`.* TO `{}`@`{}`" + .format(self.db, self.user, self.host),)) + self.assertTrue(self.helper.grant_exists(self.db, self.user, self.host)) + + self.cursor.execute.assert_called_with( + "SHOW GRANTS FOR '{}'@'{}'".format(self.user, self.host)) + + # With single quotes + self.cursor.fetchall.return_value = ( + ("GRANT USAGE ON *.* TO '{}'@'{}'".format(self.user, self.host),), + ("GRANT ALL PRIVILEGES ON '{}'.* TO '{}'@'{}'" + .format(self.db, self.user, self.host),)) + self.assertTrue(self.helper.grant_exists(self.db, self.user, self.host)) + + # Grant not there + self.cursor.fetchall.return_value = ( + ("GRANT USAGE ON *.* TO '{}'@'{}'".format("someuser", "notmyhost"),), + ("GRANT ALL PRIVILEGES ON '{}'.* TO '{}'@'{}'" + .format("somedb", "someuser", "notmyhost"),)) + self.assertFalse(self.helper.grant_exists(self.db, self.user, self.host)) + + def test_create_grant(self): + self.helper.grant_exists = mock.MagicMock(return_value=False) + self.helper.create_user = mock.MagicMock() + + self.helper.create_grant(self.db, self.user, self.host, self.password) + self.cursor.execute.assert_called_with( + "GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}'" + .format(self.db, self.user, self.host)) + self.helper.create_user.assert_called_with(self.user, self.host, self.password) + + def test_create_user(self): + self.helper.create_user(self.user, self.host, self.password) + self.cursor.execute.assert_called_with( + "CREATE USER '{}'@'{}' IDENTIFIED BY '{}'". + format(self.user, self.host, self.password)) + + def test_create_router_grant(self): + self.helper.create_user = mock.MagicMock() + + self.helper.create_router_grant(self.user, self.host, self.password) + _calls = [ + mock.call("GRANT CREATE USER ON *.* TO '{}'@'{}' WITH GRANT OPTION" + .format(self.user, self.host)), + mock.call("GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON " + "mysql_innodb_cluster_metadata.* TO '{}'@'{}'" + .format(self.user, self.host)), + mock.call("GRANT SELECT ON mysql.user TO '{}'@'{}'" + .format(self.user, self.host)), + mock.call("GRANT SELECT ON " + "performance_schema.replication_group_members TO " + "'{}'@'{}'".format(self.user, self.host)), + mock.call("GRANT SELECT ON " + "performance_schema.replication_group_member_stats TO " + "'{}'@'{}'".format(self.user, self.host)), + mock.call("GRANT SELECT ON " + "performance_schema.global_variables TO " + "'{}'@'{}'".format(self.user, self.host))] + + self.cursor.execute.assert_has_calls(_calls) + self.helper.create_user.assert_called_with(self.user, self.host, self.password) + + def test_configure_router(self): + self.helper.create_user = mock.MagicMock() + self.helper.create_router_grant = mock.MagicMock() + self.helper.normalize_address = mock.MagicMock(return_value=self.host) + self.helper.get_mysql_password = mock.MagicMock(return_value=self.password) + + self.assertEqual(self.password, self.helper.configure_router(self.host, self.user)) + self.helper.create_user.assert_called_with(self.user, self.host, self.password) + self.helper.create_router_grant.assert_called_with(self.user, self.host, self.password) + + +class MysqlHelperTests(unittest.TestCase): + + def setUp(self): + super(MysqlHelperTests, self).setUp() + + def test_get_prefix(self): + _tests = { + "prefix1": "prefix1_username", + "prefix2": "prefix2_database", + "prefix3": "prefix3_hostname"} + + for key in _tests.keys(): + self.assertEqual( + key, + mysql.get_prefix(_tests[key])) + + def test_get_db_data(self): + _unprefixed = "myprefix" + # Test relation data has every variation of shared-db/db-router data + _relation_data = { + "egress-subnets": "10.5.0.43/32", + "ingress-address": "10.5.0.43", + "nova_database": "nova", + "nova_hostname": "10.5.0.43", + "nova_username": "nova", + "novaapi_database": "nova_api", + "novaapi_hostname": "10.5.0.43", + "novaapi_username": "nova", + "novacell0_database": "nova_cell0", + "novacell0_hostname": "10.5.0.43", + "novacell0_username": "nova", + "private-address": "10.5.0.43", + "database": "keystone", + "username": "keystone", + "hostname": "10.5.0.43", + "mysqlrouter_username": + "mysqlrouteruser", + "mysqlrouter_hostname": "10.5.0.43"} + + _expected_data = OrderedDict([ + ('nova', OrderedDict([('database', 'nova'), + ('hostname', '10.5.0.43'), + ('username', 'nova')])), + ('novaapi', OrderedDict([('database', 'nova_api'), + ('hostname', '10.5.0.43'), + ('username', 'nova')])), + ('novacell0', OrderedDict([('database', 'nova_cell0'), + ('hostname', '10.5.0.43'), + ('username', 'nova')])), + ('mysqlrouter', OrderedDict([('username', 'mysqlrouteruser'), + ('hostname', '10.5.0.43')])), + ('myprefix', OrderedDict([('hostname', '10.5.0.43'), + ('database', 'keystone'), + ('username', 'keystone')]))]) + + _results = mysql.get_db_data(_relation_data, unprefixed=_unprefixed) + + for prefix in _expected_data.keys(): + for key in _expected_data[prefix].keys(): + self.assertEqual( + _results[prefix][key], _expected_data[prefix][key]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/test_apache_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/test_apache_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7b755693b92dcbebc3928f2957bca960a301a927 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/test_apache_utils.py @@ -0,0 +1,133 @@ +from mock import patch, call + +from testtools import TestCase +from tests.helpers import patch_open, FakeRelation + +import charmhelpers.contrib.hahelpers.apache as apache_utils + +cert = ''' + -----BEGIN CERTIFICATE----- +MIIDXTCCAkWgAwIBAgIJAMO1fWOu8ntUMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQwHhcNMTQwNDIyMTUzNDA0WhcNMjQwNDE5MTUzNDA0WjBF +MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 +ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAuk6dmZnMvVxykNidNjbIwXM3ShhMpwCvUmWwpybFAIqhtNTuGJF9Ikp5 +kzB+ThQV1onK8O8YarNGQx+MOISEnlJ5npj3Atp33pKGHRn69lHKGVfJvRbN4A90 +1hTueYsELzfPV2YWm4c6nQiXRT6Cy0yaw/DE8fBTHzAiE9+/XGPsjn5VPv8H6Wa1 +f/d5FblE+RtHP6YpRo9Jh3XAn3iC9fVr8rblS4rk7ev8LfH/yIG2wRVOEPC6lYfu +MEIwPpxKV0c3Z6lqtMOgC5dgzWjrbItnQfB0JaIzSFMMxDhNCJocQRJDQ+0jmj+K +rMGB1QRZlVLZxx0xnv38G0GyfFMv8QIDAQABo1AwTjAdBgNVHQ4EFgQUcxEj7X26 +poFDa0lw40aAKIqyNp0wHwYDVR0jBBgwFoAUcxEj7X26poFDa0lw40aAKIqyNp0w +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAQe6RUCqTYf0Ns8fKfAEb +QSxZKqCst02oC0F3Gm0opWiUetxZqmAYTAjztmlRFIw7hgF/P95SY1ujGLZmiAlU +poOTjQ/i7MvjkXPVCo92izwXi65qRmJGbjduIirOAYtBmBmm3qS9BmoDlLQMVNYn +bwFImc9ar0h+o3/VH1hry+2vEVikXiKK5uKZI6B7ejNYfAWydq6ilzfKIh75W852 +OSbKt3NB/BTZZUdCvK6+B+MoeuzQHDO0/QKBEBfaKFeJki3mdyzFlNbYio1z00rM +E2zl3kh9gkZnMuV1uzHdfKJbtTcNn4hCls5x7T21jn4joADHaVez8FloykBUABu3 +qw== +-----END CERTIFICATE----- +''' + +IDENTITY_NEW_STYLE_CERTS = { + 'identity-service:0': { + 'keystone/0': { + 'ssl_cert_test-cn': 'keystone_provided_cert', + 'ssl_key_test-cn': 'keystone_provided_key', + } + } +} + +IDENTITY_OLD_STYLE_CERTS = { + 'identity-service:0': { + 'keystone/0': { + 'ssl_cert': 'keystone_provided_cert', + 'ssl_key': 'keystone_provided_key', + } + } +} + + +class ApacheUtilsTests(TestCase): + def setUp(self): + super(ApacheUtilsTests, self).setUp() + [self._patch(m) for m in [ + 'log', + 'config_get', + 'relation_get', + 'relation_ids', + 'relation_list', + 'host', + ]] + + def _patch(self, method): + _m = patch.object(apache_utils, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + def test_get_cert_from_config(self): + '''Ensure cert and key from charm config override relation''' + self.config_get.side_effect = [ + 'some_ca_cert', # config_get('ssl_cert') + 'some_ca_key', # config_Get('ssl_key') + ] + result = apache_utils.get_cert('test-cn') + self.assertEquals(('some_ca_cert', 'some_ca_key'), result) + + def test_get_ca_cert_from_config(self): + self.config_get.return_value = "some_ca_cert" + self.assertEquals('some_ca_cert', apache_utils.get_ca_cert()) + + def test_get_cert_from_relation(self): + self.config_get.return_value = None + rel = FakeRelation(IDENTITY_NEW_STYLE_CERTS) + self.relation_ids.side_effect = rel.relation_ids + self.relation_list.side_effect = rel.relation_units + self.relation_get.side_effect = rel.get + result = apache_utils.get_cert('test-cn') + self.assertEquals(('keystone_provided_cert', 'keystone_provided_key'), + result) + + def test_get_cert_from_relation_deprecated(self): + self.config_get.return_value = None + rel = FakeRelation(IDENTITY_OLD_STYLE_CERTS) + self.relation_ids.side_effect = rel.relation_ids + self.relation_list.side_effect = rel.relation_units + self.relation_get.side_effect = rel.get + result = apache_utils.get_cert() + self.assertEquals(('keystone_provided_cert', 'keystone_provided_key'), + result) + + def test_get_ca_cert_from_relation(self): + self.config_get.return_value = None + self.relation_ids.side_effect = [['identity-service:0'], + ['identity-credentials:1']] + self.relation_list.return_value = 'keystone/0' + self.relation_get.side_effect = [ + 'keystone_provided_ca', + ] + result = apache_utils.get_ca_cert() + self.relation_ids.assert_has_calls([call('identity-service'), + call('identity-credentials')]) + self.assertEquals('keystone_provided_ca', + result) + + @patch.object(apache_utils.os.path, 'isfile') + def test_retrieve_ca_cert(self, _isfile): + _isfile.return_value = True + with patch_open() as (_open, _file): + _file.read.return_value = cert + self.assertEqual( + apache_utils.retrieve_ca_cert('mycertfile'), + cert) + _open.assert_called_once_with('mycertfile', 'rb') + + @patch.object(apache_utils.os.path, 'isfile') + def test_retrieve_ca_cert_no_file(self, _isfile): + _isfile.return_value = False + with patch_open() as (_open, _file): + self.assertEqual( + apache_utils.retrieve_ca_cert('mycertfile'), + None) + self.assertFalse(_open.called) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/test_cluster_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/test_cluster_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..990f1dcb0bfbde735b90b8b8418a82d47659cf25 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hahelpers/test_cluster_utils.py @@ -0,0 +1,590 @@ +from mock import patch, MagicMock, call + +from subprocess import CalledProcessError +from testtools import TestCase + +import charmhelpers.contrib.hahelpers.cluster as cluster_utils + +CRM_STATUS = b''' +Last updated: Thu May 14 14:46:35 2015 +Last change: Thu May 14 14:43:51 2015 via crmd on juju-trusty-machine-1 +Stack: corosync +Current DC: juju-trusty-machine-2 (168108171) - partition with quorum +Version: 1.1.10-42f2063 +3 Nodes configured +4 Resources configured + + +Online: [ juju-trusty-machine-1 juju-trusty-machine-2 juju-trusty-machine-3 ] + + Resource Group: grp_percona_cluster + res_mysql_vip (ocf::heartbeat:IPaddr2): Started juju-trusty-machine-1 + Clone Set: cl_mysql_monitor [res_mysql_monitor] + Started: [ juju-trusty-machine-1 juju-trusty-machine-2 juju-trusty-machine-3 ] +''' + +CRM_DC_NONE = b''' +Last updated: Thu May 14 14:46:35 2015 +Last change: Thu May 14 14:43:51 2015 via crmd on juju-trusty-machine-1 +Stack: corosync +Current DC: NONE +1 Nodes configured, 2 expected votes +0 Resources configured + + +Node node1: UNCLEAN (offline) +''' + + +class ClusterUtilsTests(TestCase): + def setUp(self): + super(ClusterUtilsTests, self).setUp() + [self._patch(m) for m in [ + 'log', + 'relation_ids', + 'relation_list', + 'relation_get', + 'get_unit_hostname', + 'config_get', + 'unit_get', + ]] + + def _patch(self, method): + _m = patch.object(cluster_utils, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + def test_is_clustered(self): + '''It determines whether or not a unit is clustered''' + self.relation_ids.return_value = ['ha:0'] + self.relation_list.return_value = ['ha/0'] + self.relation_get.return_value = 'yes' + self.assertTrue(cluster_utils.is_clustered()) + + def test_is_not_clustered(self): + '''It determines whether or not a unit is clustered''' + self.relation_ids.return_value = ['ha:0'] + self.relation_list.return_value = ['ha/0'] + self.relation_get.return_value = None + self.assertFalse(cluster_utils.is_clustered()) + + @patch('subprocess.check_output') + def test_is_crm_dc(self, check_output): + '''It determines its unit is leader''' + self.get_unit_hostname.return_value = 'juju-trusty-machine-2' + check_output.return_value = CRM_STATUS + self.assertTrue(cluster_utils.is_crm_dc()) + + @patch('subprocess.check_output') + def test_is_crm_dc_no_cluster(self, check_output): + '''It is not leader if there is no cluster up''' + def r(*args, **kwargs): + raise CalledProcessError(1, 'crm') + + check_output.side_effect = r + self.assertRaises(cluster_utils.CRMDCNotFound, cluster_utils.is_crm_dc) + + @patch('subprocess.check_output') + def test_is_crm_dc_false(self, check_output): + '''It determines its unit is leader''' + self.get_unit_hostname.return_value = 'juju-trusty-machine-1' + check_output.return_value = CRM_STATUS + self.assertFalse(cluster_utils.is_crm_dc()) + + @patch('subprocess.check_output') + def test_is_crm_dc_current_none(self, check_output): + '''It determines its unit is leader''' + self.get_unit_hostname.return_value = 'juju-trusty-machine-1' + check_output.return_value = CRM_DC_NONE + self.assertRaises(cluster_utils.CRMDCNotFound, cluster_utils.is_crm_dc) + + @patch('subprocess.check_output') + def test_is_crm_leader(self, check_output): + '''It determines its unit is leader''' + self.get_unit_hostname.return_value = 'node1' + crm = b'resource vip is running on: node1' + check_output.return_value = crm + self.assertTrue(cluster_utils.is_crm_leader('vip')) + + @patch('charmhelpers.core.decorators.time') + @patch('subprocess.check_output') + def test_is_not_leader(self, check_output, mock_time): + '''It determines its unit is not leader''' + self.get_unit_hostname.return_value = 'node1' + crm = b'resource vip is running on: node2' + check_output.return_value = crm + self.assertFalse(cluster_utils.is_crm_leader('some_resource')) + self.assertFalse(mock_time.called) + + @patch('charmhelpers.core.decorators.log') + @patch('charmhelpers.core.decorators.time') + @patch('subprocess.check_output') + def test_is_not_leader_resource_not_exists(self, check_output, mock_time, + mock_log): + '''It determines its unit is not leader''' + self.get_unit_hostname.return_value = 'node1' + check_output.return_value = "resource vip is NOT running" + self.assertRaises(cluster_utils.CRMResourceNotFound, + cluster_utils.is_crm_leader, 'vip') + mock_time.assert_has_calls([call.sleep(2), call.sleep(4), + call.sleep(6)]) + + @patch('charmhelpers.core.decorators.time') + @patch('subprocess.check_output') + def test_is_crm_leader_no_cluster(self, check_output, mock_time): + '''It is not leader if there is no cluster up''' + check_output.side_effect = CalledProcessError(1, 'crm') + self.assertFalse(cluster_utils.is_crm_leader('vip')) + self.assertFalse(mock_time.called) + + @patch.object(cluster_utils, 'is_crm_dc') + def test_is_crm_leader_dc_resource(self, _is_crm_dc): + '''Call out to is_crm_dc''' + cluster_utils.is_crm_leader(cluster_utils.DC_RESOURCE_NAME) + _is_crm_dc.assert_called_with() + + def test_peer_units(self): + '''It lists all peer units for cluster relation''' + peers = ['peer_node/1', 'peer_node/2'] + self.relation_ids.return_value = ['cluster:0'] + self.relation_list.return_value = peers + self.assertEquals(peers, cluster_utils.peer_units()) + + def test_peer_ips(self): + '''Get a dict of peers and their ips''' + peers = { + 'peer_node/1': '10.0.0.1', + 'peer_node/2': '10.0.0.2', + } + + def _relation_get(attr, rid, unit): + return peers[unit] + self.relation_ids.return_value = ['cluster:0'] + self.relation_list.return_value = peers.keys() + self.relation_get.side_effect = _relation_get + self.assertEquals(peers, cluster_utils.peer_ips()) + + @patch('os.getenv') + def test_is_oldest_peer(self, getenv): + '''It detects local unit is the oldest of all peers''' + peers = ['peer_node/1', 'peer_node/2', 'peer_node/3'] + getenv.return_value = 'peer_node/1' + self.assertTrue(cluster_utils.oldest_peer(peers)) + + @patch('os.getenv') + def test_is_not_oldest_peer(self, getenv): + '''It detects local unit is not the oldest of all peers''' + peers = ['peer_node/1', 'peer_node/2', 'peer_node/3'] + getenv.return_value = 'peer_node/2' + self.assertFalse(cluster_utils.oldest_peer(peers)) + + @patch.object(cluster_utils, 'is_crm_leader') + @patch.object(cluster_utils, 'is_clustered') + def test_is_elected_leader_clustered(self, is_clustered, is_crm_leader): + '''It detects it is the eligible leader in a hacluster of units''' + is_clustered.return_value = True + is_crm_leader.return_value = True + self.assertTrue(cluster_utils.is_elected_leader('vip')) + + @patch.object(cluster_utils, 'is_crm_leader') + @patch.object(cluster_utils, 'is_clustered') + def test_not_is_elected_leader_clustered(self, is_clustered, is_crm_leader): + '''It detects it is not the eligible leader in a hacluster of units''' + is_clustered.return_value = True + is_crm_leader.return_value = False + self.assertFalse(cluster_utils.is_elected_leader('vip')) + + @patch.object(cluster_utils, 'oldest_peer') + @patch.object(cluster_utils, 'peer_units') + @patch.object(cluster_utils, 'is_clustered') + def test_is_is_elected_leader_unclustered(self, is_clustered, + peer_units, oldest_peer): + '''It detects it is the eligible leader in non-clustered peer group''' + is_clustered.return_value = False + oldest_peer.return_value = True + self.assertTrue(cluster_utils.is_elected_leader('vip')) + + @patch.object(cluster_utils, 'oldest_peer') + @patch.object(cluster_utils, 'peer_units') + @patch.object(cluster_utils, 'is_clustered') + def test_not_is_elected_leader_unclustered(self, is_clustered, + peer_units, oldest_peer): + '''It detects it is not the eligible leader in non-clustered group''' + is_clustered.return_value = False + oldest_peer.return_value = False + self.assertFalse(cluster_utils.is_elected_leader('vip')) + + def test_https_explict(self): + '''It determines https is available if configured explicitly''' + # config_get('use-https') + self.config_get.return_value = 'yes' + self.assertTrue(cluster_utils.https()) + + def test_https_cert_key_in_config(self): + '''It determines https is available if cert + key in charm config''' + # config_get('use-https') + self.config_get.side_effect = [ + 'no', # config_get('use-https') + 'cert', # config_get('ssl_cert') + 'key', # config_get('ssl_key') + ] + self.assertTrue(cluster_utils.https()) + + def test_https_cert_key_in_identity_relation(self): + '''It determines https is available if cert in identity-service''' + self.config_get.return_value = False + self.relation_ids.return_value = 'identity-service:0' + self.relation_list.return_value = 'keystone/0' + self.relation_get.side_effect = [ + 'yes', # relation_get('https_keystone') + 'cert', # relation_get('ssl_cert') + 'key', # relation_get('ssl_key') + 'ca_cert', # relation_get('ca_cert') + ] + self.assertTrue(cluster_utils.https()) + + def test_https_cert_key_incomplete_identity_relation(self): + '''It determines https unavailable if cert not in identity-service''' + self.config_get.return_value = False + self.relation_ids.return_value = 'identity-service:0' + self.relation_list.return_value = 'keystone/0' + self.relation_get.return_value = None + self.assertFalse(cluster_utils.https()) + + @patch.object(cluster_utils, 'https') + @patch.object(cluster_utils, 'peer_units') + def test_determine_api_port_with_peers(self, peer_units, https): + '''It determines API port in presence of peers''' + peer_units.return_value = ['peer1'] + https.return_value = False + self.assertEquals(9686, cluster_utils.determine_api_port(9696)) + + @patch.object(cluster_utils, 'https') + @patch.object(cluster_utils, 'peer_units') + def test_determine_api_port_nopeers_singlemode(self, peer_units, https): + '''It determines API port with a single unit in singlemode''' + peer_units.return_value = [] + https.return_value = False + port = cluster_utils.determine_api_port(9696, singlenode_mode=True) + self.assertEquals(9686, port) + + @patch.object(cluster_utils, 'is_clustered') + @patch.object(cluster_utils, 'https') + @patch.object(cluster_utils, 'peer_units') + def test_determine_api_port_clustered(self, peer_units, https, + is_clustered): + '''It determines API port in presence of an hacluster''' + peer_units.return_value = [] + is_clustered.return_value = True + https.return_value = False + self.assertEquals(9686, cluster_utils.determine_api_port(9696)) + + @patch.object(cluster_utils, 'is_clustered') + @patch.object(cluster_utils, 'https') + @patch.object(cluster_utils, 'peer_units') + def test_determine_api_port_clustered_https(self, peer_units, https, + is_clustered): + '''It determines API port in presence of hacluster + https''' + peer_units.return_value = [] + is_clustered.return_value = True + https.return_value = True + self.assertEquals(9676, cluster_utils.determine_api_port(9696)) + + @patch.object(cluster_utils, 'https') + def test_determine_apache_port_https(self, https): + '''It determines haproxy port with https enabled''' + https.return_value = True + self.assertEquals(9696, cluster_utils.determine_apache_port(9696)) + + @patch.object(cluster_utils, 'https') + @patch.object(cluster_utils, 'is_clustered') + def test_determine_apache_port_clustered(self, https, is_clustered): + '''It determines haproxy port with https disabled''' + https.return_value = True + is_clustered.return_value = True + self.assertEquals(9686, cluster_utils.determine_apache_port(9696)) + + @patch.object(cluster_utils, 'peer_units') + @patch.object(cluster_utils, 'https') + @patch.object(cluster_utils, 'is_clustered') + def test_determine_apache_port_nopeers_singlemode(self, https, + is_clustered, + peer_units): + '''It determines haproxy port with a single unit in singlemode''' + peer_units.return_value = [] + https.return_value = False + is_clustered.return_value = False + port = cluster_utils.determine_apache_port(9696, singlenode_mode=True) + self.assertEquals(9686, port) + + @patch.object(cluster_utils, 'valid_hacluster_config') + def test_get_hacluster_config_complete(self, valid_hacluster_config): + '''It fetches all hacluster charm config''' + conf = { + 'ha-bindiface': 'eth1', + 'ha-mcastport': '3333', + 'vip': '10.0.0.1', + 'os-admin-hostname': None, + 'os-public-hostname': None, + 'os-internal-hostname': None, + 'os-access-hostname': None, + } + + valid_hacluster_config.return_value = True + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + self.assertEquals(conf, cluster_utils.get_hacluster_config()) + + @patch.object(cluster_utils, 'valid_hacluster_config') + def test_get_hacluster_config_incomplete(self, valid_hacluster_config): + '''It raises exception if some hacluster charm config missing''' + conf = { + 'ha-bindiface': 'eth1', + 'ha-mcastport': '3333', + 'vip': None, + 'os-admin-hostname': None, + 'os-public-hostname': None, + 'os-internal-hostname': None, + 'os-access-hostname': None, + } + + valid_hacluster_config.return_value = False + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + self.assertRaises(cluster_utils.HAIncorrectConfig, + cluster_utils.get_hacluster_config) + + @patch.object(cluster_utils, 'valid_hacluster_config') + def test_get_hacluster_config_with_excludes(self, valid_hacluster_config): + '''It fetches all hacluster charm config''' + conf = { + 'ha-bindiface': 'eth1', + 'ha-mcastport': '3333', + } + valid_hacluster_config.return_value = True + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + exclude_keys = ['vip', 'os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname', 'os-access-hostname'] + result = cluster_utils.get_hacluster_config(exclude_keys) + self.assertEquals(conf, result) + + @patch.object(cluster_utils, 'is_clustered') + def test_canonical_url_bare(self, is_clustered): + '''It constructs a URL to host with no https or cluster present''' + self.unit_get.return_value = 'foohost1' + is_clustered.return_value = False + configs = MagicMock() + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = [] + url = cluster_utils.canonical_url(configs) + self.assertEquals('http://foohost1', url) + + @patch.object(cluster_utils, 'is_clustered') + def test_canonical_url_https_no_cluster(self, is_clustered): + '''It constructs a URL to host with https and no cluster present''' + self.unit_get.return_value = 'foohost1' + is_clustered.return_value = False + configs = MagicMock() + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = ['https'] + url = cluster_utils.canonical_url(configs) + self.assertEquals('https://foohost1', url) + + @patch.object(cluster_utils, 'is_clustered') + def test_canonical_url_https_cluster(self, is_clustered): + '''It constructs a URL to host with https and cluster present''' + self.config_get.return_value = '10.0.0.1' + is_clustered.return_value = True + configs = MagicMock() + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = ['https'] + url = cluster_utils.canonical_url(configs) + self.assertEquals('https://10.0.0.1', url) + + @patch.object(cluster_utils, 'is_clustered') + def test_canonical_url_cluster_no_https(self, is_clustered): + '''It constructs a URL to host with no https and cluster present''' + self.config_get.return_value = '10.0.0.1' + self.unit_get.return_value = 'foohost1' + is_clustered.return_value = True + configs = MagicMock() + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = [] + url = cluster_utils.canonical_url(configs) + self.assertEquals('http://10.0.0.1', url) + + @patch.object(cluster_utils, 'status_set') + def test_valid_hacluster_config_incomplete(self, status_set): + '''Returns False with incomplete HA config''' + conf = { + 'vip': None, + 'os-admin-hostname': None, + 'os-public-hostname': None, + 'os-internal-hostname': None, + 'os-access-hostname': None, + 'dns-ha': False, + } + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + self.assertRaises(cluster_utils.HAIncorrectConfig, + cluster_utils.valid_hacluster_config) + + @patch.object(cluster_utils, 'status_set') + def test_valid_hacluster_config_both(self, status_set): + '''Returns False when both VIP and DNS HA are set''' + conf = { + 'vip': '10.0.0.1', + 'os-admin-hostname': None, + 'os-public-hostname': None, + 'os-internal-hostname': None, + 'os-access-hostname': None, + 'dns-ha': True, + } + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + self.assertRaises(cluster_utils.HAIncorrectConfig, + cluster_utils.valid_hacluster_config) + + @patch.object(cluster_utils, 'status_set') + def test_valid_hacluster_config_vip_ha(self, status_set): + '''Returns True with complete VIP HA config''' + conf = { + 'vip': '10.0.0.1', + 'os-admin-hostname': None, + 'os-public-hostname': None, + 'os-internal-hostname': None, + 'os-access-hostname': None, + 'dns-ha': False, + } + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + self.assertTrue(cluster_utils.valid_hacluster_config()) + self.assertFalse(status_set.called) + + @patch.object(cluster_utils, 'status_set') + def test_valid_hacluster_config_dns_incomplete(self, status_set): + '''Returns False with incomplete DNS HA config''' + conf = { + 'vip': None, + 'os-admin-hostname': None, + 'os-public-hostname': None, + 'os-internal-hostname': None, + 'os-access-hostname': None, + 'dns-ha': True, + } + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + self.assertRaises(cluster_utils.HAIncompleteConfig, + cluster_utils.valid_hacluster_config) + + @patch.object(cluster_utils, 'status_set') + def test_valid_hacluster_config_dns_ha(self, status_set): + '''Returns True with complete DNS HA config''' + conf = { + 'vip': None, + 'os-admin-hostname': 'somehostname', + 'os-public-hostname': None, + 'os-internal-hostname': None, + 'os-access-hostname': None, + 'dns-ha': True, + } + + def _fake_config_get(setting): + return conf[setting] + + self.config_get.side_effect = _fake_config_get + self.assertTrue(cluster_utils.valid_hacluster_config()) + self.assertFalse(status_set.called) + + @patch.object(cluster_utils, 'juju_is_leader') + @patch.object(cluster_utils, 'status_set') + @patch.object(cluster_utils.time, 'sleep') + @patch.object(cluster_utils, 'modulo_distribution') + @patch.object(cluster_utils, 'log') + def test_distributed_wait(self, log, modulo_distribution, sleep, + status_set, is_leader): + + # Leader regardless of modulo should not wait + is_leader.return_value = True + cluster_utils.distributed_wait(modulo=9, wait=23) + modulo_distribution.assert_not_called() + sleep.assert_called_with(0) + + # The rest of the tests are non-leader units + is_leader.return_value = False + + def _fake_config_get(setting): + return conf[setting] + + # Uses fallback defaults + conf = { + 'modulo-nodes': None, + 'known-wait': None, + } + self.config_get.side_effect = _fake_config_get + cluster_utils.distributed_wait() + modulo_distribution.assert_called_with(modulo=3, wait=30, + non_zero_wait=True) + + # Uses config values + conf = { + 'modulo-nodes': 7, + 'known-wait': 10, + } + self.config_get.side_effect = _fake_config_get + cluster_utils.distributed_wait() + modulo_distribution.assert_called_with(modulo=7, wait=10, + non_zero_wait=True) + + # Uses passed values + cluster_utils.distributed_wait(modulo=5, wait=45) + modulo_distribution.assert_called_with(modulo=5, wait=45, + non_zero_wait=True) + + @patch.object(cluster_utils, 'relation_ids') + def test_get_managed_services_and_ports(self, relation_ids): + relation_ids.return_value = ['rel:2'] + self.assertEqual( + cluster_utils.get_managed_services_and_ports( + ['apache2', 'haproxy'], + [8067, 4545, 6732]), + (['apache2'], [8057, 4535, 6722])) + self.assertEqual( + cluster_utils.get_managed_services_and_ports( + ['apache2', 'haproxy'], + [8067, 4545, 6732], + external_services=['apache2']), + (['haproxy'], [8057, 4535, 6722])) + + def add_ten(x): + return x + 10 + + self.assertEqual( + cluster_utils.get_managed_services_and_ports( + ['apache2', 'haproxy'], + [8067, 4545, 6732], + port_conv_f=add_ten), + (['apache2'], [8077, 4555, 6742])) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/apache/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/apache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/apache/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/apache/checks/test_config.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/apache/checks/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..da080da395488bd9e91945f443bd302e9a0d26a6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/apache/checks/test_config.py @@ -0,0 +1,110 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile + +from unittest import TestCase +from mock import patch + +from charmhelpers.contrib.hardening.apache.checks import config + +TEST_TMPDIR = None +APACHE_VERSION_STR = b"""Server version: Apache/2.4.7 (Ubuntu) +Server built: Jan 14 2016 17:45:23 +""" + + +class ApacheConfigTestCase(TestCase): + + def setUp(self): + global TEST_TMPDIR + TEST_TMPDIR = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(TEST_TMPDIR) + + @patch.object(config.subprocess, 'call', lambda *args, **kwargs: 1) + def test_get_audits_apache_not_installed(self): + audits = config.get_audits() + self.assertEqual([], audits) + + @patch.object(config.utils, 'get_settings', lambda x: { + 'common': {'apache_dir': TEST_TMPDIR, + 'traceenable': 'Off'}, + 'hardening': { + 'allowed_http_methods': {'GOGETEM'}, + 'modules_to_disable': {'modfoo'} + } + }) + @patch.object(config.subprocess, 'call', lambda *args, **kwargs: 0) + def test_get_audits_apache_is_installed(self): + audits = config.get_audits() + self.assertEqual(7, len(audits)) + + @patch.object(config.utils, 'get_settings', lambda x: { + 'common': {'apache_dir': TEST_TMPDIR}, + 'hardening': { + 'allowed_http_methods': {'GOGETEM'}, + 'modules_to_disable': {'modfoo'}, + 'traceenable': 'off', + 'servertokens': 'Prod', + 'honor_cipher_order': 'on', + 'cipher_suite': 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' + } + }) + @patch.object(config, 'subprocess') + def test_ApacheConfContext(self, mock_subprocess): + mock_subprocess.call.return_value = 0 + + with tempfile.NamedTemporaryFile() as ftmp: # noqa + def fake_check_output(cmd, *args, **kwargs): + if cmd[0] == 'apache2': + return APACHE_VERSION_STR + + mock_subprocess.check_output.side_effect = fake_check_output + ctxt = config.ApacheConfContext() + self.assertEqual(ctxt(), { + 'allowed_http_methods': set(['GOGETEM']), + 'apache_icondir': + '/usr/share/apache2/icons/', + 'apache_version': '2.4.7', + 'modules_to_disable': set(['modfoo']), + 'servertokens': 'Prod', + 'traceenable': 'off', + 'honor_cipher_order': 'on', + 'cipher_suite': 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' + }) + + @patch.object(config.utils, 'get_settings', lambda x: { + 'common': {'apache_dir': TEST_TMPDIR}, + 'hardening': { + 'allowed_http_methods': {'GOGETEM'}, + 'modules_to_disable': {'modfoo'}, + 'traceenable': 'off', + 'servertokens': 'Prod', + 'honor_cipher_order': 'on', + 'cipher_suite': 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' + } + }) + @patch.object(config.subprocess, 'call', lambda *args, **kwargs: 0) + def test_file_permission_audit(self): + audits = config.get_audits() + settings = config.utils.get_settings('apache') + conf_file_name = 'apache2.conf' + conf_file_path = os.path.join( + settings['common']['apache_dir'], conf_file_name + ) + self.assertEqual(audits[0].paths[0], conf_file_path) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..30a3e94359e94011cd247de7ade76667346e7379 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_apache_audits.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_apache_audits.py new file mode 100644 index 0000000000000000000000000000000000000000..69e7c081c6e7fcd0579f9417b025c66d5f39c976 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_apache_audits.py @@ -0,0 +1,86 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import call +from mock import patch + +from unittest import TestCase +from charmhelpers.contrib.hardening.audits import apache +from subprocess import CalledProcessError + + +class DisabledModuleAuditsTest(TestCase): + + def setup(self): + super(DisabledModuleAuditsTest, self).setUp() + self._patch_obj(apache, 'log') + + def _patch_obj(self, obj, method): + _m = patch.object(obj, method) + mock = _m.start() + self.addCleanup(mock.stop) + setattr(self, method, mock) + + def test_init_string(self): + audit = apache.DisabledModuleAudit('foo') + self.assertEqual(['foo'], audit.modules) + + def test_init_list(self): + audit = apache.DisabledModuleAudit(['foo', 'bar']) + self.assertEqual(['foo', 'bar'], audit.modules) + + @patch.object(apache.DisabledModuleAudit, '_get_loaded_modules') + def test_ensure_compliance_no_modules(self, mock_loaded_modules): + audit = apache.DisabledModuleAudit(None) + audit.ensure_compliance() + self.assertFalse(mock_loaded_modules.called) + + @patch.object(apache.DisabledModuleAudit, '_get_loaded_modules') + @patch.object(apache, 'log', lambda *args, **kwargs: None) + def test_ensure_compliance_loaded_modules_raises_ex(self, + mock_loaded_modules): + mock_loaded_modules.side_effect = CalledProcessError(1, 'test', 'err') + audit = apache.DisabledModuleAudit('foo') + audit.ensure_compliance() + + @patch.object(apache.DisabledModuleAudit, '_get_loaded_modules') + @patch.object(apache.DisabledModuleAudit, '_disable_module') + @patch.object(apache, 'log', lambda *args, **kwargs: None) + def test_disabled_modules_not_loaded(self, mock_disable_module, + mock_loaded_modules): + mock_loaded_modules.return_value = ['foo'] + audit = apache.DisabledModuleAudit('bar') + audit.ensure_compliance() + self.assertFalse(mock_disable_module.called) + + @patch.object(apache.DisabledModuleAudit, '_get_loaded_modules') + @patch.object(apache.DisabledModuleAudit, '_disable_module') + @patch.object(apache.DisabledModuleAudit, '_restart_apache') + @patch.object(apache, 'log', lambda *args, **kwargs: None) + def test_disabled_modules_loaded(self, mock_restart_apache, + mock_disable_module, mock_loaded_modules): + mock_loaded_modules.return_value = ['foo', 'bar'] + audit = apache.DisabledModuleAudit('bar') + audit.ensure_compliance() + mock_disable_module.assert_has_calls([call('bar')]) + mock_restart_apache.assert_has_calls([call()]) + + @patch('subprocess.check_output') + def test_get_loaded_modules(self, mock_check_output): + mock_check_output.return_value = (b'Loaded Modules:\n' + b' foo_module (static)\n' + b' bar_module (shared)\n') + audit = apache.DisabledModuleAudit('bar') + result = audit._get_loaded_modules() + self.assertEqual(['foo', 'bar'], result) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_apt_audits.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_apt_audits.py new file mode 100644 index 0000000000000000000000000000000000000000..fc14b2a55a910fd743da21ad126826edc67d0076 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_apt_audits.py @@ -0,0 +1,87 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from mock import call +from mock import MagicMock +from mock import patch + +from charmhelpers.contrib.hardening.audits import apt +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg +from charmhelpers.core import hookenv + + +class RestrictedPackagesTestCase(TestCase): + def setUp(self): + super(RestrictedPackagesTestCase, self).setUp() + + def create_package(self, name, virtual=False): + pkg = MagicMock() + pkg.name = name + pkg.current_ver = '2.0' + if virtual: + pkgver = MagicMock() + pkgver.parent_pkg = self.create_package('foo') + pkg.provides_list = [('virtualfoo', None, pkgver)] + resp = { + 'has_provides': True, + 'has_versions': False, + } + pkg.get.side_effect = resp.get + + return pkg + + @patch.object(apt, 'apt_cache') + @patch.object(apt, 'apt_purge') + @patch.object(apt, 'log', lambda *args, **kwargs: None) + def test_ensure_compliance(self, mock_purge, mock_apt_cache): + pkg = self.create_package('bar') + mock_apt_cache.return_value = {'bar': pkg} + + audit = apt.RestrictedPackages(pkgs=['bar']) + audit.ensure_compliance() + mock_purge.assert_has_calls([call(pkg.name)]) + + @patch.object(apt, 'apt_purge') + @patch.object(apt, 'apt_cache') + @patch.object(apt, 'log', lambda *args, **kwargs: None) + def test_apt_harden_virtual_package(self, mock_apt_cache, mock_apt_purge): + vpkg = self.create_package('virtualfoo', virtual=True) + mock_apt_cache.return_value = {'foo': vpkg} + audit = apt.RestrictedPackages(pkgs=['foo']) + audit.ensure_compliance() + self.assertTrue(mock_apt_cache.called) + mock_apt_purge.assert_has_calls([call('foo')]) + + +class AptConfigTestCase(TestCase): + + @patch.object(apt, 'apt_pkg') + def test_ensure_compliance(self, mock_apt_pkg): + mock_apt_pkg.init.return_value = None + mock_apt_pkg.config.side_effect = {} + mock_apt_pkg.config.get.return_value = None + audit = apt.AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}]) + audit.ensure_compliance() + self.assertTrue(mock_apt_pkg.config.get.called) + + @patch.object(hookenv, 'log') + def test_verify_config(self, mock_log): + cfg = apt_pkg.config + key, value = list(cfg.items())[0] + audit = apt.AptConfig([{"key": key, "expected": value}]) + audit.ensure_compliance() + self.assertFalse(mock_log.called) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_base_audits.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_base_audits.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9f2367304d30fca6f48f63b640e77cc51e3d97 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_base_audits.py @@ -0,0 +1,52 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class BaseAuditTestCase(TestCase): + + def setUp(self): + super(BaseAuditTestCase, self).setUp() + + def test_take_action_default(self): + check = BaseAudit() + take_action = check._take_action() + self.assertTrue(take_action) + + def test_take_action_unless_true(self): + check = BaseAudit(unless=True) + take_action = check._take_action() + self.assertFalse(take_action) + + def test_take_action_unless_false(self): + check = BaseAudit(unless=False) + take_action = check._take_action() + self.assertTrue(take_action) + + def test_take_action_unless_callback_false(self): + def callback(): + return False + check = BaseAudit(unless=callback) + take_action = check._take_action() + self.assertTrue(take_action) + + def test_take_action_unless_callback_true(self): + def callback(): + return True + check = BaseAudit(unless=callback) + take_action = check._take_action() + self.assertFalse(take_action) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_file_audits.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_file_audits.py new file mode 100644 index 0000000000000000000000000000000000000000..8718f6115dbaddeab44d8fe9fb9c9f6ffec17091 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/audits/test_file_audits.py @@ -0,0 +1,334 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile + +from mock import call, patch + +from unittest import TestCase + +from charmhelpers.core import unitdata +from charmhelpers.contrib.hardening.audits import file + + +@patch('os.path.exists') +class BaseFileAuditTestCase(TestCase): + + def setUp(self): + super(BaseFileAuditTestCase, self).setUp() + self._patch_obj(file.BaseFileAudit, 'is_compliant') + self._patch_obj(file.BaseFileAudit, 'comply') + self._patch_obj(file, 'log') + + def _patch_obj(self, obj, method): + _m = patch.object(obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + def test_ensure_compliance(self, mock_exists): + mock_exists.return_value = False + check = file.BaseFileAudit(paths='/tmp/foo') + check.ensure_compliance() + self.assertFalse(self.comply.called) + + def test_ensure_compliance_in_compliance(self, mock_exists): + mock_exists.return_value = True + self.is_compliant.return_value = True + check = file.BaseFileAudit(paths=['/tmp/foo']) + check.ensure_compliance() + mock_exists.assert_has_calls([call('/tmp/foo')]) + self.is_compliant.assert_has_calls([call('/tmp/foo')]) + self.assertFalse(self.log.called) + self.assertFalse(self.comply.called) + + def test_ensure_compliance_out_of_compliance(self, mock_exists): + mock_exists.return_value = True + self.is_compliant.return_value = False + check = file.BaseFileAudit(paths=['/tmp/foo']) + check.ensure_compliance() + mock_exists.assert_has_calls([call('/tmp/foo')]) + self.is_compliant.assert_has_calls([call('/tmp/foo')]) + self.assertTrue(self.log.called) + self.comply.assert_has_calls([call('/tmp/foo')]) + + +class EasyMock(dict): + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class FilePermissionAuditTestCase(TestCase): + def setUp(self): + super(FilePermissionAuditTestCase, self).setUp() + self._patch_obj(file.grp, 'getgrnam') + self._patch_obj(file.pwd, 'getpwnam') + self._patch_obj(file.FilePermissionAudit, '_get_stat') + self.getpwnam.return_value = EasyMock({'pw_name': 'testuser', + 'pw_uid': 1000}) + self.getgrnam.return_value = EasyMock({'gr_name': 'testgroup', + 'gr_gid': 1000}) + self._get_stat.return_value = EasyMock({'st_mode': 0o644, + 'st_uid': 1000, + 'st_gid': 1000}) + + def _patch_obj(self, obj, method): + _m = patch.object(obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + def test_is_compliant(self): + check = file.FilePermissionAudit(paths=['/foo/bar'], + user='testuser', + group='testgroup', mode=0o644) + compliant = check.is_compliant('/foo/bar') + self.assertTrue(compliant) + + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_not_compliant_wrong_group(self): + self.getgrnam.return_value = EasyMock({'gr_name': 'testgroup', + 'gr_gid': 222}) + check = file.FilePermissionAudit(paths=['/foo/bar'], user='testuser', + group='testgroup', mode=0o644) + compliant = check.is_compliant('/foo/bar') + self.assertFalse(compliant) + + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_not_compliant_wrong_user(self): + self.getpwnam.return_value = EasyMock({'pw_name': 'fred', + 'pw_uid': 123}) + check = file.FilePermissionAudit(paths=['/foo/bar'], user='testuser', + group='testgroup', mode=0o644) + compliant = check.is_compliant('/foo/bar') + self.assertFalse(compliant) + + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_not_compliant_wrong_permissions(self): + self._get_stat.return_value = EasyMock({'st_mode': 0o777, + 'st_uid': 1000, + 'st_gid': 1000}) + check = file.FilePermissionAudit(paths=['/foo/bar'], user='testuser', + group='testgroup', mode=0o644) + compliant = check.is_compliant('/foo/bar') + self.assertFalse(compliant) + + @patch('charmhelpers.contrib.hardening.utils.ensure_permissions') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_comply(self, _ensure_permissions): + check = file.FilePermissionAudit(paths=['/foo/bar'], user='testuser', + group='testgroup', mode=0o644) + check.comply('/foo/bar') + c = call('/foo/bar', 'testuser', 'testgroup', 0o644) + _ensure_permissions.assert_has_calls([c]) + + +class DirectoryPermissionAuditTestCase(TestCase): + def setUp(self): + super(DirectoryPermissionAuditTestCase, self).setUp() + + @patch('charmhelpers.contrib.hardening.audits.file.os.path.isdir') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_is_compliant_not_directory(self, mock_isdir): + mock_isdir.return_value = False + check = file.DirectoryPermissionAudit(paths=['/foo/bar'], + user='testuser', + group='testgroup', mode=0o0700) + self.assertRaises(ValueError, check.is_compliant, '/foo/bar') + + @patch.object(file.FilePermissionAudit, 'is_compliant') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_is_compliant_file_not_compliant(self, mock_is_compliant): + mock_is_compliant.return_value = False + tmpdir = tempfile.mkdtemp() + try: + check = file.DirectoryPermissionAudit(paths=[tmpdir], + user='testuser', + group='testgroup', + mode=0o0700) + compliant = check.is_compliant(tmpdir) + self.assertFalse(compliant) + finally: + shutil.rmtree(tmpdir) + + +class NoSUIDGUIDAuditTestCase(TestCase): + def setUp(self): + super(NoSUIDGUIDAuditTestCase, self).setUp() + + @patch.object(file.NoSUIDSGIDAudit, '_get_stat') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_is_compliant(self, mock_get_stat): + mock_get_stat.return_value = EasyMock({'st_mode': 0o0644, + 'st_uid': 0, + 'st_gid': 0}) + audit = file.NoSUIDSGIDAudit('/foo/bar') + compliant = audit.is_compliant('/foo/bar') + self.assertTrue(compliant) + + @patch.object(file.NoSUIDSGIDAudit, '_get_stat') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_is_noncompliant(self, mock_get_stat): + mock_get_stat.return_value = EasyMock({'st_mode': 0o6644, + 'st_uid': 0, + 'st_gid': 0}) + audit = file.NoSUIDSGIDAudit('/foo/bar') + compliant = audit.is_compliant('/foo/bar') + self.assertFalse(compliant) + + @patch.object(file, 'log') + @patch.object(file, 'check_output') + def test_comply(self, mock_check_output, mock_log): + audit = file.NoSUIDSGIDAudit('/foo/bar') + audit.comply('/foo/bar') + mock_check_output.assert_has_calls([call(['chmod', '-s', '/foo/bar'])]) + self.assertTrue(mock_log.called) + + +class TemplatedFileTestCase(TestCase): + def setUp(self): + super(TemplatedFileTestCase, self).setUp() + self.kv = patch.object(unitdata, 'kv') + self.kv.start() + self.addCleanup(self.kv.stop) + + @patch.object(file.TemplatedFile, 'templates_match') + @patch.object(file.TemplatedFile, 'contents_match') + @patch.object(file.TemplatedFile, 'permissions_match') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_is_not_compliant(self, contents_match_, permissions_match_, + templates_match_): + contents_match_.return_value = False + permissions_match_.return_value = False + templates_match_.return_value = False + + f = file.TemplatedFile('/foo/bar', None, '/tmp', 0o0644) + compliant = f.is_compliant('/foo/bar') + self.assertFalse(compliant) + + @patch.object(file.TemplatedFile, 'templates_match') + @patch.object(file.TemplatedFile, 'contents_match') + @patch.object(file.TemplatedFile, 'permissions_match') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_is_compliant(self, contents_match_, permissions_match_, + templates_match_): + contents_match_.return_value = True + permissions_match_.return_value = True + templates_match_.return_value = True + + f = file.TemplatedFile('/foo/bar', None, '/tmp', 0o0644) + compliant = f.is_compliant('/foo/bar') + self.assertTrue(compliant) + + @patch.object(file.TemplatedFile, 'templates_match') + @patch.object(file.TemplatedFile, 'contents_match') + @patch.object(file.TemplatedFile, 'permissions_match') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_template_changes(self, contents_match_, permissions_match_, + templates_match_): + contents_match_.return_value = True + permissions_match_.return_value = True + templates_match_.return_value = False + + f = file.TemplatedFile('/foo/bar', None, '/tmp', 0o0644) + compliant = f.is_compliant('/foo/bar') + self.assertFalse(compliant) + + @patch.object(file, 'render_and_write') + @patch.object(file.utils, 'ensure_permissions') + @patch.object(file, 'log', lambda *args, **kwargs: None) + def test_comply(self, mock_ensure_permissions, mock_render_and_write): + class Context(object): + def __call__(self): + return {} + with tempfile.NamedTemporaryFile() as ftmp: + f = file.TemplatedFile(ftmp.name, Context(), + os.path.dirname(ftmp.name), 0o0644) + f.comply(ftmp.name) + calls = [call(os.path.dirname(ftmp.name), ftmp.name, {})] + mock_render_and_write.assert_has_calls(calls) + mock_ensure_permissions.assert_has_calls([call(ftmp.name, 'root', + 'root', 0o0644)]) + + +CONTENTS_PASS = """Ciphers aes256-ctr,aes192-ctr,aes128-ctr +MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160 +KexAlgorithms diffie-hellman-group-exchange-sha256 +""" + + +CONTENTS_FAIL = """Ciphers aes256-ctr,aes192-ctr,aes128-ctr +MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160 +KexAlgorithms diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha1,diffie-hellman-group1-sha1 +""" + + +class FileContentAuditTestCase(TestCase): + + @patch.object(file, 'log') + def test_audit_contents_pass(self, mock_log): + conditions = {'pass': + [r'^KexAlgorithms\s+diffie-hellman-group-exchange-' + 'sha256$'], + 'fail': [r'^KexAlgorithms\s+diffie-hellman-group-' + 'exchange-sha256.+$']} + with tempfile.NamedTemporaryFile() as ftmp: + filename = ftmp.name + with open(filename, 'w') as fd: + fd.write(CONTENTS_FAIL) + + audit = file.FileContentAudit(filename, conditions) + self.assertFalse(audit.is_compliant(filename)) + + calls = [call("Auditing contents of file '%s'" % filename, + level='DEBUG'), + call("Pattern '^KexAlgorithms\\s+diffie-hellman-group-" + "exchange-sha256$' was expected to pass but instead it " + "failed", level='WARNING'), + call("Pattern '^KexAlgorithms\\s+diffie-hellman-group-" + "exchange-sha256.+$' was expected to fail but instead " + "it passed", level='WARNING'), + call('Checked 2 cases and 0 passed', level='DEBUG')] + mock_log.assert_has_calls(calls) + + @patch.object(file, 'log') + def test_audit_contents_fail(self, mock_log): + conditions = {'pass': + [r'^KexAlgorithms\s+diffie-hellman-group-exchange-' + 'sha256$'], + 'fail': + [r'^KexAlgorithms\s+diffie-hellman-group-exchange-' + 'sha256.+$']} + with tempfile.NamedTemporaryFile() as ftmp: + filename = ftmp.name + with open(filename, 'w') as fd: + fd.write(CONTENTS_FAIL) + + audit = file.FileContentAudit(filename, conditions) + self.assertFalse(audit.is_compliant(filename)) + + calls = [call("Auditing contents of file '%s'" % filename, + level='DEBUG'), + call("Pattern '^KexAlgorithms\\s+diffie-hellman-group-" + "exchange-sha256$' was expected to pass but instead " + "it failed", + level='WARNING'), + call("Pattern '^KexAlgorithms\\s+diffie-hellman-group-" + "exchange-sha256.+$' was expected to fail but instead " + "it passed", + level='WARNING'), + call('Checked 2 cases and 0 passed', level='DEBUG')] + mock_log.assert_has_calls(calls) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/apache.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/apache.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f940d4cfa85ca7051dd60a4805d84bb6aebed6d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,16 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/apache.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..c112137cb45c4b63cb05384145b3edf8c443e2b8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,12 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/mysql.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 0000000000000000000000000000000000000000..682d22bf3ded32eb1c8d6188486ec4468d9ec457 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/mysql.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..2edf325c311c6fbb062a072083b4d12cebc3d9c1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/os.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/os.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a8627b5ed2803828e1e4d78260c6b5f90cae659 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/os.yaml @@ -0,0 +1,68 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/os.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..cc3b9c206eae56cbe68826cb76748e2deb9483e1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,43 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: + ssh_tmout: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/ssh.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd529bcae1ec00fef2e969f43dc3cf530b46ef9a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/ssh.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 0000000000000000000000000000000000000000..d05e054bc234015206bb1195152fa9ffd6a33151 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_apt.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_apt.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7ed8ea729f0e4d6f5f2ddf8561b56f97c299ae --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_apt.py @@ -0,0 +1,46 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from mock import patch + +from charmhelpers.contrib.hardening.host.checks import apt + + +class AptHardeningTestCase(TestCase): + + @patch.object(apt, 'get_settings', lambda x: { + 'security': {'packages_clean': False} + }) + def test_dont_clean_packages(self): + audits = apt.get_audits() + self.assertEqual(1, len(audits)) + + @patch.object(apt, 'get_settings', lambda x: { + 'security': {'packages_clean': True, + 'packages_list': []} + }) + def test_no_security_packages(self): + audits = apt.get_audits() + self.assertEqual(1, len(audits)) + + @patch.object(apt, 'get_settings', lambda x: { + 'security': {'packages_clean': True, + 'packages_list': ['foo', 'bar']} + }) + def test_restricted_packages(self): + audits = apt.get_audits() + self.assertEqual(2, len(audits)) + self.assertTrue(isinstance(audits[1], apt.RestrictedPackages)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_limits.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_limits.py new file mode 100644 index 0000000000000000000000000000000000000000..0750fdef2b23ac343cd7ac05b08ab4dae784343e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_limits.py @@ -0,0 +1,43 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from mock import patch + +from charmhelpers.contrib.hardening.host.checks import limits + + +class LimitsTestCase(TestCase): + + @patch.object(limits.utils, 'get_settings', + lambda x: {'security': {'kernel_enable_core_dump': False}}) + def test_core_dump_disabled(self): + audits = limits.get_audits() + self.assertEqual(2, len(audits)) + audit = audits[0] + self.assertTrue(isinstance(audit, limits.DirectoryPermissionAudit)) + self.assertEqual('/etc/security/limits.d', audit.paths[0]) + audit = audits[1] + self.assertTrue(isinstance(audit, limits.TemplatedFile)) + self.assertEqual('/etc/security/limits.d/10.hardcore.conf', + audit.paths[0]) + + @patch.object(limits.utils, 'get_settings', lambda x: { + 'security': {'kernel_enable_core_dump': True} + }) + def test_core_dump_enabled(self): + audits = limits.get_audits() + self.assertEqual(1, len(audits)) + self.assertTrue(isinstance(audits[0], limits.DirectoryPermissionAudit)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_login.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_login.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c6fc85224356faa2b49c8121786d8754662c49 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_login.py @@ -0,0 +1,27 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from charmhelpers.contrib.hardening.host.checks import login + + +class LoginTestCase(TestCase): + + def test_login(self): + audits = login.get_audits() + self.assertEqual(1, len(audits)) + audit = audits[0] + self.assertTrue(isinstance(audit, login.TemplatedFile)) + self.assertEqual('/etc/login.defs', audit.paths[0]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_minimize_access.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_minimize_access.py new file mode 100644 index 0000000000000000000000000000000000000000..2e3838778907289cbe83b0d7b53e525965ff3ec1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_minimize_access.py @@ -0,0 +1,68 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from mock import patch + +from charmhelpers.contrib.hardening.host.checks import minimize_access + + +class MinimizeAccessTestCase(TestCase): + + @patch.object(minimize_access.utils, 'get_settings', lambda x: + {'environment': {'extra_user_paths': []}, + 'security': {'users_allow': []}}) + def test_default_options(self): + audits = minimize_access.get_audits() + self.assertEqual(3, len(audits)) + + # First audit is to ensure that all folders in the $PATH variable + # are read-only. + self.assertTrue(isinstance(audits[0], minimize_access.ReadOnly)) + self.assertEqual({'/usr/local/sbin', '/usr/local/bin', + '/usr/sbin', '/usr/bin', '/bin'}, audits[0].paths) + + # Second audit is to ensure that the /etc/shadow is only readable + # by the root user. + self.assertTrue(isinstance(audits[1], + minimize_access.FilePermissionAudit)) + self.assertEqual(audits[1].paths[0], '/etc/shadow') + self.assertEqual(audits[1].mode, 0o0600) + + # Last audit is to ensure that only root has access to the su + self.assertTrue(isinstance(audits[2], + minimize_access.FilePermissionAudit)) + self.assertEqual(audits[2].paths[0], '/bin/su') + self.assertEqual(audits[2].mode, 0o0750) + + @patch.object(minimize_access.utils, 'get_settings', lambda x: + {'environment': {'extra_user_paths': []}, + 'security': {'users_allow': ['change_user']}}) + def test_allow_change_user(self): + audits = minimize_access.get_audits() + self.assertEqual(2, len(audits)) + + # First audit is to ensure that all folders in the $PATH variable + # are read-only. + self.assertTrue(isinstance(audits[0], minimize_access.ReadOnly)) + self.assertEqual({'/usr/local/sbin', '/usr/local/bin', + '/usr/sbin', '/usr/bin', '/bin'}, audits[0].paths) + + # Second audit is to ensure that the /etc/shadow is only readable + # by the root user. + self.assertTrue(isinstance(audits[1], + minimize_access.FilePermissionAudit)) + self.assertEqual(audits[1].paths[0], '/etc/shadow') + self.assertEqual(audits[1].mode, 0o0600) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_pam.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_pam.py new file mode 100644 index 0000000000000000000000000000000000000000..2879bc87ec127e04222f4fc8e1a7568cf8985cc0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_pam.py @@ -0,0 +1,53 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from mock import patch + +from charmhelpers.contrib.hardening.host.checks import pam + + +class PAMTestCase(TestCase): + + @patch.object(pam.utils, 'get_settings', lambda x: { + 'auth': {'pam_passwdqc_enable': True, + 'retries': False} + }) + def test_enable_passwdqc(self): + audits = pam.get_audits() + self.assertEqual(2, len(audits)) + audit = audits[0] + self.assertTrue(isinstance(audit, pam.PasswdqcPAM)) + audit = audits[1] + self.assertTrue(isinstance(audit, pam.DeletedFile)) + self.assertEqual('/usr/share/pam-configs/tally2', audit.paths[0]) + + @patch.object(pam.utils, 'get_settings', lambda x: { + 'auth': {'pam_passwdqc_enable': False, + 'retries': True} + }) + def test_disable_passwdqc(self): + audits = pam.get_audits() + self.assertEqual(1, len(audits)) + self.assertFalse(isinstance(audits[0], pam.PasswdqcPAM)) + + @patch.object(pam.utils, 'get_settings', lambda x: { + 'auth': {'pam_passwdqc_enable': False, + 'retries': True} + }) + def test_auth_retries(self): + audits = pam.get_audits() + self.assertEqual(1, len(audits)) + self.assertTrue(isinstance(audits[0], pam.Tally2PAM)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_profile.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_profile.py new file mode 100644 index 0000000000000000000000000000000000000000..1d7fc99bdb2ec86ecf02bb3b0a3d7b01c179730e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_profile.py @@ -0,0 +1,65 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from unittest import TestCase + +from mock import patch + +from charmhelpers.contrib.hardening.host.checks import profile + + +class ProfileTestCase(TestCase): + + def setUp(self): + super(ProfileTestCase, self).setUp() + + os.environ['JUJU_CHARM_DIR'] = '/tmp' + self.addCleanup(lambda: os.environ.pop('JUJU_CHARM_DIR')) + + @patch.object(profile.utils, 'get_settings', lambda x: + {'security': {'kernel_enable_core_dump': False, 'ssh_tmout': False}}) + def test_core_dump_disabled(self): + audits = profile.get_audits() + self.assertEqual(1, len(audits)) + self.assertTrue(isinstance(audits[0], profile.TemplatedFile)) + + @patch.object(profile.utils, 'get_settings', lambda x: { + 'security': {'kernel_enable_core_dump': True, 'ssh_tmout': False} + }) + def test_core_dump_enabled(self): + audits = profile.get_audits() + self.assertEqual(0, len(audits)) + + @patch.object(profile.utils, 'get_settings', lambda x: + {'security': {'kernel_enable_core_dump': True, 'ssh_tmout': False}}) + def test_ssh_tmout_disabled(self): + audits = profile.get_audits() + self.assertEqual(0, len(audits)) + + @patch.object(profile.utils, 'get_settings', lambda x: { + 'security': {'kernel_enable_core_dump': True, 'ssh_tmout': 300} + }) + def test_ssh_tmout_enabled(self): + audits = profile.get_audits() + self.assertEqual(1, len(audits)) + self.assertTrue(isinstance(audits[0], profile.TemplatedFile)) + + @patch.object(profile.utils, 'log', lambda *args, **kwargs: None) + def test_ProfileContext(self): + ctxt = profile.ProfileContext() + self.assertEqual(ctxt(), { + 'ssh_tmout': 300 + }) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_securetty.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_securetty.py new file mode 100644 index 0000000000000000000000000000000000000000..4c5d7acd94550bf1e8929e415b7ef05ff96378b2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_securetty.py @@ -0,0 +1,27 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from charmhelpers.contrib.hardening.host.checks import securetty + + +class SecureTTYTestCase(TestCase): + + def test_securetty(self): + audits = securetty.get_audits() + self.assertEqual(1, len(audits)) + audit = audits[0] + self.assertTrue(isinstance(audit, securetty.TemplatedFile)) + self.assertEqual('/etc/securetty', audit.paths[0]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_suid_guid.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_suid_guid.py new file mode 100644 index 0000000000000000000000000000000000000000..96f78b28b35ee9a58d9117bb0b3552d6841a69b6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/host/checks/test_suid_guid.py @@ -0,0 +1,55 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile + +from unittest import TestCase + +from mock import call +from mock import patch + +from charmhelpers.contrib.hardening.host.checks import suid_sgid + + +@patch.object(suid_sgid, 'log', lambda *args, **kwargs: None) +class SUIDSGIDTestCase(TestCase): + + @patch.object(suid_sgid.utils, 'get_settings', lambda x: { + 'security': {'suid_sgid_enforce': False} + }) + def test_no_enforcement(self): + audits = suid_sgid.get_audits() + self.assertEqual(0, len(audits)) + + @patch.object(suid_sgid, 'subprocess') + @patch.object(suid_sgid.utils, 'get_settings', lambda x: { + 'security': {'suid_sgid_enforce': True, + 'suid_sgid_remove_from_unknown': True, + 'suid_sgid_blacklist': [], + 'suid_sgid_whitelist': [], + 'suid_sgid_dry_run_on_unknown': True}, + 'environment': {'root_path': '/'} + }) + def test_suid_guid_harden(self, mock_subprocess): + p = mock_subprocess.Popen.return_value + with tempfile.NamedTemporaryFile() as tmp: + p.communicate.return_value = (tmp.name, "stderr") + + audits = suid_sgid.get_audits() + self.assertEqual(2, len(audits)) + cmd = ['find', '/', '-perm', '-4000', '-o', '-perm', '-2000', '-type', + 'f', '!', '-path', '/proc/*', '-print'] + calls = [call(cmd, stderr=mock_subprocess.PIPE, + stdout=mock_subprocess.PIPE)] + mock_subprocess.Popen.assert_has_calls(calls) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/mysql/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/mysql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/mysql/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/mysql/checks/test_config.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/mysql/checks/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..af015f6aaa1f902fcc97c1e95d31a623246a1fa4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/mysql/checks/test_config.py @@ -0,0 +1,33 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import TestCase + +from mock import patch + +from charmhelpers.contrib.hardening.mysql.checks import config + + +class MySQLConfigTestCase(TestCase): + + @patch.object(config.subprocess, 'call', lambda *args, **kwargs: 0) + @patch.object(config.utils, 'get_settings', lambda x: { + 'hardening': { + 'mysql-conf': {}, + 'hardening-conf': {} + } + }) + def test_get_audits(self): + audits = config.get_audits() + self.assertEqual(4, len(audits)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/ssh/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/ssh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/ssh/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/ssh/checks/test_config.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/ssh/checks/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..49cea4e3be6f6156d4549c265016fe68cd9c31ca --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/ssh/checks/test_config.py @@ -0,0 +1,27 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from testtools import TestCase + +from mock import patch + +from charmhelpers.contrib.hardening.ssh.checks import config + + +class SSHConfigTestCase(TestCase): + + @patch.object(config.utils, 'get_settings', lambda x: {}) + def test_dont_clean_packages(self): + audits = config.get_audits() + self.assertEqual(4, len(audits)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_defaults.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..35baa2bedda1329d37b5a4c08446a6e93ecbfbd8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_defaults.py @@ -0,0 +1,58 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import glob +import yaml + +from unittest import TestCase + +TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'defaults') + + +class DefaultsTestCase(TestCase): + + def setUp(self): + super(DefaultsTestCase, self).setUp() + + def get_keys(self, dicto, keys=None): + if keys is None: + keys = [] + + if dicto: + if type(dicto) is not dict: + raise Exception("Unexpected entry: %s" % dicto) + + for key in dicto.keys(): + keys.append(key) + if type(dicto[key]) is dict: + self.get_keys(dicto[key], keys) + + return keys + + def test_defaults(self): + defaults_paths = glob.glob('%s/*.yaml' % TEMPLATES_DIR) + for defaults in defaults_paths: + schema = "%s.schema" % defaults + self.assertTrue(os.path.exists(schema)) + a = yaml.safe_load(open(schema)) + b = yaml.safe_load(open(defaults)) + if not a and not b: + continue + + # Test that all keys in default are present in their associated + # schema. + skeys = self.get_keys(a) + dkeys = self.get_keys(b) + self.assertEqual(set(dkeys).symmetric_difference(skeys), set([])) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_harden.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_harden.py new file mode 100644 index 0000000000000000000000000000000000000000..25364350ba9bca1546042c6246d21d4018de02e9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_harden.py @@ -0,0 +1,81 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import patch, call +from unittest import TestCase + +from charmhelpers.contrib.hardening import harden + + +class HardenTestCase(TestCase): + + def setUp(self): + super(HardenTestCase, self).setUp() + + @patch.object(harden, 'log', lambda *args, **kwargs: None) + @patch.object(harden, 'run_apache_checks') + @patch.object(harden, 'run_mysql_checks') + @patch.object(harden, 'run_ssh_checks') + @patch.object(harden, 'run_os_checks') + def test_harden(self, mock_host, mock_ssh, mock_mysql, mock_apache): + mock_host.__name__ = 'host' + mock_ssh.__name__ = 'ssh' + mock_mysql.__name__ = 'mysql' + mock_apache.__name__ = 'apache' + + @harden.harden(overrides=['ssh', 'mysql']) + def foo(arg1, kwarg1=None): + return "done." + + self.assertEqual(foo('anarg', kwarg1='akwarg'), "done.") + self.assertTrue(mock_ssh.called) + self.assertTrue(mock_mysql.called) + self.assertFalse(mock_apache.called) + self.assertFalse(mock_host.called) + + @patch.object(harden, 'log') + @patch.object(harden, 'run_apache_checks') + @patch.object(harden, 'run_mysql_checks') + @patch.object(harden, 'run_ssh_checks') + @patch.object(harden, 'run_os_checks') + def test_harden_logs_work(self, mock_host, mock_ssh, mock_mysql, + mock_apache, mock_log): + mock_host.__name__ = 'host' + mock_ssh.__name__ = 'ssh' + mock_mysql.__name__ = 'mysql' + mock_apache.__name__ = 'apache' + + @harden.harden(overrides=['ssh', 'mysql']) + def foo(arg1, kwarg1=None): + return arg1 + kwarg1 + + mock_log.assert_not_called() + self.assertEqual(foo('anarg', kwarg1='akwarg'), "anargakwarg") + mock_log.assert_any_call("Hardening function 'foo'", level="DEBUG") + + @harden.harden(overrides=['ssh', 'mysql']) + def bar(arg1, kwarg1=None): + return arg1 + kwarg1 + + mock_log.reset_mock() + self.assertEqual(bar("a", kwarg1="b"), "ab") + mock_log.assert_any_call("Hardening function 'bar'", level="DEBUG") + + # check it only logs the function name once + mock_log.reset_mock() + self.assertEqual(bar("a", kwarg1="b"), "ab") + self.assertEqual( + mock_log.call_args_list, + [call("Executing hardening module 'ssh'", level="DEBUG"), + call("Executing hardening module 'mysql'", level="DEBUG")]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_templating.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_templating.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2f26d783fe712469e2d11f3f9afbf53b8d7bc0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_templating.py @@ -0,0 +1,310 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import os +import six + +from mock import call, patch +from unittest import TestCase + +from charmhelpers.contrib.hardening import templating +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh.checks import ( + config as ssh_config_check +) +from charmhelpers.contrib.hardening.host.checks import ( + sysctl, + securetty, +) +from charmhelpers.contrib.hardening.apache.checks import ( + config as apache_config_check +) +from charmhelpers.contrib.hardening.mysql.checks import ( + config as mysql_config_check +) + + +class TemplatingTestCase(TestCase): + + def setUp(self): + super(TemplatingTestCase, self).setUp() + + os.environ['JUJU_CHARM_DIR'] = '/tmp' + self.pathindex = {} + self.addCleanup(lambda: os.environ.pop('JUJU_CHARM_DIR')) + + def get_renderers(self, audits): + renderers = [] + for a in audits: + if issubclass(a.__class__, TemplatedFile): + renderers.append(a) + + return renderers + + def get_contentcheckers(self, audits): + contentcheckers = [] + for a in audits: + if issubclass(a.__class__, FileContentAudit): + contentcheckers.append(a) + + return contentcheckers + + def render(self, renderers): + for template in renderers: + with patch.object(template, 'pre_write', lambda: None): + with patch.object(template, 'post_write', lambda: None): + with patch.object(template, 'run_service_actions'): + with patch.object(template, 'save_checksum'): + for p in template.paths: + template.comply(p) + + def checkcontents(self, contentcheckers): + for check in contentcheckers: + if check.path not in self.pathindex: + continue + + self.assertTrue(check.is_compliant(self.pathindex[check.path])) + + @patch.object(ssh_config_check, 'lsb_release', + lambda: {'DISTRIB_CODENAME': 'precise'}) + @patch.object(utils, 'ensure_permissions') + @patch.object(templating, 'write') + @patch('charmhelpers.contrib.hardening.audits.file.log') + @patch.object(templating, 'log', lambda *args, **kwargs: None) + @patch.object(utils, 'log', lambda *args, **kwargs: None) + @patch.object(ssh_config_check, 'log', lambda *args, **kwargs: None) + def test_ssh_config_render_and_check_lt_trusty(self, mock_log, mock_write, + mock_ensure_permissions): + audits = ssh_config_check.get_audits() + contentcheckers = self.get_contentcheckers(audits) + renderers = self.get_renderers(audits) + configs = {} + + def write(path, data): + with tempfile.NamedTemporaryFile(delete=False) as ftmp: + if os.path.basename(path) == "ssh_config": + configs['ssh'] = ftmp.name + elif os.path.basename(path) == "sshd_config": + configs['sshd'] = ftmp.name + + if path in self.pathindex: + raise Exception("File already rendered '%s'" % path) + + self.pathindex[path] = ftmp.name + with open(ftmp.name, 'wb') as fd: + fd.write(data) + + mock_write.side_effect = write + self.render(renderers) + self.checkcontents(contentcheckers) + self.assertTrue(mock_write.called) + args_list = mock_write.call_args_list + self.assertEqual('/etc/ssh/ssh_config', args_list[0][0][0]) + self.assertEqual('/etc/ssh/sshd_config', args_list[1][0][0]) + self.assertEqual(mock_write.call_count, 2) + + calls = [call("Auditing contents of file '%s'" % configs['ssh'], + level='DEBUG'), + call('Checked 10 cases and 10 passed', level='DEBUG'), + call("Auditing contents of file '%s'" % configs['sshd'], + level='DEBUG'), + call('Checked 10 cases and 10 passed', level='DEBUG')] + mock_log.assert_has_calls(calls) + + @patch.object(ssh_config_check, 'lsb_release', + lambda: {'DISTRIB_CODENAME': 'trusty'}) + @patch.object(utils, 'ensure_permissions') + @patch.object(templating, 'write') + @patch('charmhelpers.contrib.hardening.audits.file.log') + @patch.object(templating, 'log', lambda *args, **kwargs: None) + @patch.object(utils, 'log', lambda *args, **kwargs: None) + @patch.object(ssh_config_check, 'log', lambda *args, **kwargs: None) + def test_ssh_config_render_and_check_gte_trusty(self, mock_log, mock_write, + mock_ensure_permissions): + audits = ssh_config_check.get_audits() + contentcheckers = self.get_contentcheckers(audits) + renderers = self.get_renderers(audits) + + def write(path, data): + with tempfile.NamedTemporaryFile(delete=False) as ftmp: + if path in self.pathindex: + raise Exception("File already rendered '%s'" % path) + + self.pathindex[path] = ftmp.name + with open(ftmp.name, 'wb') as fd: + fd.write(data) + + mock_write.side_effect = write + self.render(renderers) + self.checkcontents(contentcheckers) + self.assertTrue(mock_write.called) + args_list = mock_write.call_args_list + self.assertEqual('/etc/ssh/ssh_config', args_list[0][0][0]) + self.assertEqual('/etc/ssh/sshd_config', args_list[1][0][0]) + self.assertEqual(mock_write.call_count, 2) + + mock_log.assert_has_calls([call('Checked 9 cases and 9 passed', + level='DEBUG')]) + + @patch.object(utils, 'ensure_permissions') + @patch.object(templating, 'write') + @patch.object(sysctl, 'log', lambda *args, **kwargs: None) + @patch.object(templating, 'log', lambda *args, **kwargs: None) + @patch.object(utils, 'log', lambda *args, **kwargs: None) + def test_os_sysctl_and_check(self, mock_write, mock_ensure_permissions): + audits = sysctl.get_audits() + contentcheckers = self.get_contentcheckers(audits) + renderers = self.get_renderers(audits) + + def write(path, data): + if path in self.pathindex: + raise Exception("File already rendered '%s'" % path) + + with tempfile.NamedTemporaryFile(delete=False) as ftmp: + self.pathindex[path] = ftmp.name + with open(ftmp.name, 'wb') as fd: + fd.write(data) + + mock_write.side_effect = write + self.render(renderers) + self.checkcontents(contentcheckers) + self.assertTrue(mock_write.called) + args_list = mock_write.call_args_list + self.assertEqual('/etc/sysctl.d/99-juju-hardening.conf', + args_list[0][0][0]) + self.assertEqual(mock_write.call_count, 1) + + @patch.object(utils, 'ensure_permissions') + @patch.object(templating, 'write') + @patch.object(sysctl, 'log', lambda *args, **kwargs: None) + @patch.object(templating, 'log', lambda *args, **kwargs: None) + @patch.object(utils, 'log', lambda *args, **kwargs: None) + def test_os_securetty_and_check(self, mock_write, mock_ensure_permissions): + audits = securetty.get_audits() + contentcheckers = self.get_contentcheckers(audits) + renderers = self.get_renderers(audits) + + def write(path, data): + if path in self.pathindex: + raise Exception("File already rendered '%s'" % path) + + with tempfile.NamedTemporaryFile(delete=False) as ftmp: + self.pathindex[path] = ftmp.name + with open(ftmp.name, 'wb') as fd: + fd.write(data) + + mock_write.side_effect = write + self.render(renderers) + self.checkcontents(contentcheckers) + self.assertTrue(mock_write.called) + args_list = mock_write.call_args_list + self.assertEqual('/etc/securetty', args_list[0][0][0]) + self.assertEqual(mock_write.call_count, 1) + + @patch.object(apache_config_check.utils, 'get_settings', lambda x: { + 'common': {'apache_dir': '/tmp/foo'}, + 'hardening': { + 'allowed_http_methods': {'GOGETEM'}, + 'modules_to_disable': {'modfoo'}, + 'traceenable': 'off', + 'servertokens': 'Prod', + 'honor_cipher_order': 'on', + 'cipher_suite': 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' + } + }) + @patch('charmhelpers.contrib.hardening.audits.file.os.path.exists', + lambda *a, **kwa: True) + @patch.object(apache_config_check, 'subprocess') + @patch.object(utils, 'ensure_permissions') + @patch.object(templating, 'write') + @patch.object(templating, 'log', lambda *args, **kwargs: None) + @patch.object(utils, 'log', lambda *args, **kwargs: None) + def test_apache_conf_and_check(self, mock_write, mock_ensure_permissions, + mock_subprocess): + mock_subprocess.call.return_value = 0 + apache_version = b"""Server version: Apache/2.4.7 (Ubuntu) + Server built: Jan 14 2016 17:45:23 + """ + mock_subprocess.check_output.return_value = apache_version + audits = apache_config_check.get_audits() + contentcheckers = self.get_contentcheckers(audits) + renderers = self.get_renderers(audits) + + def write(path, data): + if path in self.pathindex: + raise Exception("File already rendered '%s'" % path) + + with tempfile.NamedTemporaryFile(delete=False) as ftmp: + self.pathindex[path] = ftmp.name + with open(ftmp.name, 'wb') as fd: + fd.write(data) + + mock_write.side_effect = write + self.render(renderers) + self.checkcontents(contentcheckers) + self.assertTrue(mock_write.called) + args_list = mock_write.call_args_list + self.assertEqual('/tmp/foo/mods-available/alias.conf', + args_list[0][0][0]) + self.assertEqual(mock_write.call_count, 2) + + @patch.object(apache_config_check.utils, 'get_settings', lambda x: { + 'security': {}, + 'hardening': { + 'mysql-conf': '/tmp/foo/mysql.cnf', + 'hardening-conf': '/tmp/foo/conf.d/hardening.cnf' + } + }) + @patch('charmhelpers.contrib.hardening.audits.file.os.path.exists', + lambda *a, **kwa: True) + @patch.object(utils, 'ensure_permissions') + @patch.object(templating, 'write') + @patch.object(mysql_config_check.subprocess, 'call', + lambda *args, **kwargs: 0) + @patch.object(templating, 'log', lambda *args, **kwargs: None) + @patch.object(utils, 'log', lambda *args, **kwargs: None) + def test_mysql_conf_and_check(self, mock_write, mock_ensure_permissions): + audits = mysql_config_check.get_audits() + contentcheckers = self.get_contentcheckers(audits) + renderers = self.get_renderers(audits) + + def write(path, data): + if path in self.pathindex: + raise Exception("File already rendered '%s'" % path) + + with tempfile.NamedTemporaryFile(delete=False) as ftmp: + self.pathindex[path] = ftmp.name + with open(ftmp.name, 'wb') as fd: + fd.write(data) + + mock_write.side_effect = write + self.render(renderers) + self.checkcontents(contentcheckers) + self.assertTrue(mock_write.called) + args_list = mock_write.call_args_list + self.assertEqual('/tmp/foo/conf.d/hardening.cnf', + args_list[0][0][0]) + self.assertEqual(mock_write.call_count, 1) + + def tearDown(self): + # Cleanup + for path in six.itervalues(self.pathindex): + os.remove(path) + + super(TemplatingTestCase, self).tearDown() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2b83895d56e12fc91fec5d40254ce2289c8712ff --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/hardening/test_utils.py @@ -0,0 +1,63 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import tempfile + +from mock import ( + MagicMock, + call, + patch +) +from unittest import TestCase + +from charmhelpers.contrib.hardening import utils + + +class UtilsTestCase(TestCase): + + def setUp(self): + super(UtilsTestCase, self).setUp() + utils.__SETTINGS__ = {} + + @patch.object(utils.grp, 'getgrnam') + @patch.object(utils.pwd, 'getpwnam') + @patch.object(utils, 'os') + @patch.object(utils, 'log', lambda *args, **kwargs: None) + def test_ensure_permissions(self, mock_os, mock_getpwnam, mock_getgrnam): + user = MagicMock() + user.pw_uid = '12' + mock_getpwnam.return_value = user + group = MagicMock() + group.gr_gid = '23' + mock_getgrnam.return_value = group + + with tempfile.NamedTemporaryFile() as tmp: + utils.ensure_permissions(tmp.name, 'testuser', 'testgroup', 0o0440) + + mock_getpwnam.assert_has_calls([call('testuser')]) + mock_getgrnam.assert_has_calls([call('testgroup')]) + mock_os.chown.assert_has_calls([call(tmp.name, '12', '23')]) + mock_os.chmod.assert_has_calls([call(tmp.name, 0o0440)]) + + @patch.object(utils, '_get_user_provided_overrides') + def test_settings_cache(self, mock_get_user_provided_overrides): + mock_get_user_provided_overrides.return_value = {} + self.assertEqual(utils.__SETTINGS__, {}) + self.assertTrue('sysctl' in utils.get_settings('os')) + self.assertEqual(sorted(list(six.iterkeys(utils.__SETTINGS__))), + ['os']) + self.assertTrue('server' in utils.get_settings('ssh')) + self.assertEqual(sorted(list(six.iterkeys(utils.__SETTINGS__))), + ['os', 'ssh']) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/mellanox/test_infiniband.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/mellanox/test_infiniband.py new file mode 100644 index 0000000000000000000000000000000000000000..bc60be7a6306bd4d90b701a1ef9828751ae889a5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/mellanox/test_infiniband.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +from charmhelpers.contrib.mellanox import infiniband + +from mock import patch, call +import unittest + +TO_PATCH = [ + "log", + "INFO", + "apt_install", + "apt_update", + "modprobe", + "network_interfaces" +] + +NETWORK_INTERFACES = [ + 'lo', + 'eth0', + 'eth1', + 'eth2', + 'eth3', + 'eth4', + 'juju-br0', + 'ib0', + 'virbr0', + 'ovs-system', + 'br-int', + 'br-ex', + 'br-data', + 'phy-br-data', + 'int-br-data', + 'br-tun' +] + + +IBSTAT_OUTPUT = """ +CA 'mlx4_0' + CA type: MT4103 + Number of ports: 2 + Firmware version: 2.33.5000 + Hardware version: 0 + Node GUID: 0xe41d2d03000a1120 + System image GUID: 0xe41d2d03000a1123 +""" + + +class InfinibandTest(unittest.TestCase): + + def setUp(self): + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + + def _patch(self, method): + _m = patch('charmhelpers.contrib.mellanox.infiniband.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def test_load_modules(self): + infiniband.load_modules() + + self.modprobe.assert_has_calls(map(lambda x: call(x, persist=True), + infiniband.REQUIRED_MODULES)) + + def test_install_packages(self): + infiniband.install_packages() + + self.apt_update.assert_is_called_once() + self.apt_install.assert_is_called_once() + + @patch("os.path.exists") + def test_is_enabled(self, exists): + exists.return_value = True + self.assertTrue(infiniband.is_enabled()) + + @patch("subprocess.check_output") + def test_stat(self, check_output): + infiniband.stat() + + check_output.assert_called_with(["ibstat"]) + + @patch("subprocess.check_output") + def test_devices(self, check_output): + infiniband.devices() + + check_output.assert_called_with(["ibstat", "-l"]) + + @patch("subprocess.check_output") + def test_device_info(self, check_output): + check_output.return_value = IBSTAT_OUTPUT + + info = infiniband.device_info("mlx4_0") + + self.assertEquals(info.num_ports, "2") + self.assertEquals(info.device_type, "MT4103") + self.assertEquals(info.fw_ver, "2.33.5000") + self.assertEquals(info.hw_ver, "0") + self.assertEquals(info.node_guid, "0xe41d2d03000a1120") + self.assertEquals(info.sys_guid, "0xe41d2d03000a1123") + + @patch("subprocess.check_output") + def test_ipoib_interfaces(self, check_output): + self.network_interfaces.return_value = NETWORK_INTERFACES + + ipoib_nic = "ib0" + + def c(*args, **kwargs): + if ipoib_nic in args[0]: + return "driver: ib_ipoib" + else: + return "driver: mock" + + check_output.side_effect = c + self.assertEquals(infiniband.ipoib_interfaces(), [ipoib_nic]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovn.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovn.py new file mode 100644 index 0000000000000000000000000000000000000000..c039693a4cb93806f4753f61c177cc4600ce2b91 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovn.py @@ -0,0 +1,122 @@ +import textwrap +import uuid + +import charmhelpers.contrib.network.ovs.ovn as ovn + +import tests.utils as test_utils + + +CLUSTER_STATUS = textwrap.dedent(""" + 0ea6 + Name: OVN_Northbound + Cluster ID: f6a3 (f6a36e77-97bf-4740-b46a-705cbe4fef45) + Server ID: 0ea6 (0ea6e785-c2bb-4640-b7a2-85104c11a2c1) + Address: ssl:10.219.3.174:6643 + Status: cluster member + Role: follower + Term: 3 + Leader: 22dd + Vote: unknown + + Election timer: 1000 + Log: [2, 10] + Entries not yet committed: 0 + Entries not yet applied: 0 + Connections: ->f6cf ->22dd <-22dd <-f6cf + Servers: + 0ea6 (0ea6 at ssl:10.219.3.174:6643) (self) + f6cf (f6cf at ssl:10.219.3.64:6643) + 22dd (22dd at ssl:10.219.3.137:6643) + """) + +NORTHD_STATUS_ACTIVE = textwrap.dedent(""" + Status: active + """) + +NORTHD_STATUS_STANDBY = textwrap.dedent(""" + Status: standby + """) + + +class TestOVN(test_utils.BaseTestCase): + + def test_ovn_appctl(self): + self.patch_object(ovn.utils, '_run') + ovn.ovn_appctl('ovn-northd', ('is-paused',)) + self._run.assert_called_once_with('ovn-appctl', '-t', 'ovn-northd', + 'is-paused') + self._run.reset_mock() + ovn.ovn_appctl('ovnnb_db', ('cluster/status',)) + self._run.assert_called_once_with('ovn-appctl', '-t', + '/var/run/ovn/ovnnb_db.ctl', + 'cluster/status') + self._run.reset_mock() + ovn.ovn_appctl('ovnnb_db', ('cluster/status',), use_ovs_appctl=True) + self._run.assert_called_once_with('ovs-appctl', '-t', + '/var/run/ovn/ovnnb_db.ctl', + 'cluster/status') + self._run.reset_mock() + ovn.ovn_appctl('ovnsb_db', ('cluster/status',), + rundir='/var/run/openvswitch') + self._run.assert_called_once_with('ovn-appctl', '-t', + '/var/run/openvswitch/ovnsb_db.ctl', + 'cluster/status') + + def test_cluster_status(self): + self.patch_object(ovn, 'ovn_appctl') + self.ovn_appctl.return_value = CLUSTER_STATUS + expect = ovn.OVNClusterStatus( + 'OVN_Northbound', + uuid.UUID('f6a36e77-97bf-4740-b46a-705cbe4fef45'), + uuid.UUID('0ea6e785-c2bb-4640-b7a2-85104c11a2c1'), + 'ssl:10.219.3.174:6643', + 'cluster member', + 'follower', + 3, + '22dd', + 'unknown', + 1000, + '[2, 10]', + 0, + 0, + '->f6cf ->22dd <-22dd <-f6cf', + [ + ('0ea6', 'ssl:10.219.3.174:6643'), + ('f6cf', 'ssl:10.219.3.64:6643'), + ('22dd', 'ssl:10.219.3.137:6643'), + ]) + self.assertEquals(ovn.cluster_status('ovnnb_db'), expect) + self.ovn_appctl.assert_called_once_with('ovnnb_db', ('cluster/status', + 'OVN_Northbound'), + rundir=None, + use_ovs_appctl=False) + self.assertFalse(expect.is_cluster_leader) + expect = ovn.OVNClusterStatus( + 'OVN_Northbound', + uuid.UUID('f6a36e77-97bf-4740-b46a-705cbe4fef45'), + uuid.UUID('0ea6e785-c2bb-4640-b7a2-85104c11a2c1'), + 'ssl:10.219.3.174:6643', + 'cluster member', + 'leader', + 3, + 'self', + 'unknown', + 1000, + '[2, 10]', + 0, + 0, + '->f6cf ->22dd <-22dd <-f6cf', + [ + ('0ea6', 'ssl:10.219.3.174:6643'), + ('f6cf', 'ssl:10.219.3.64:6643'), + ('22dd', 'ssl:10.219.3.137:6643'), + ]) + self.assertTrue(expect.is_cluster_leader) + + def test_is_northd_active(self): + self.patch_object(ovn, 'ovn_appctl') + self.ovn_appctl.return_value = NORTHD_STATUS_ACTIVE + self.assertTrue(ovn.is_northd_active()) + self.ovn_appctl.assert_called_once_with('ovn-northd', ('status',)) + self.ovn_appctl.return_value = NORTHD_STATUS_STANDBY + self.assertFalse(ovn.is_northd_active()) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovs.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovs.py new file mode 100644 index 0000000000000000000000000000000000000000..9f66cbb0262fc93f348f6f51b8b28f361f564926 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovs.py @@ -0,0 +1,174 @@ +import mock + +import charmhelpers.contrib.network.ovs as ovs + +import tests.utils as test_utils + + +# NOTE(fnordahl): some functions drectly under the ``contrib.network.ovs`` +# module have their unit tests in the ``test_ovs.py`` module in the +# ``tests.contrib.network`` package. + + +class TestOVS(test_utils.BaseTestCase): + + def test__dict_to_vsctl_set(self): + indata = { + 'key': 'value', + 'otherkey': { + 'nestedkey': 'nestedvalue', + }, + } + # due to varying Dict ordering depending on Python version we need + # to be a bit elaborate rather than comparing result directly + result1 = ('--', 'set', 'aTable', 'anItem', 'key=value') + result2 = ('--', 'set', 'aTable', 'anItem', + 'otherkey:nestedkey=nestedvalue') + for setcmd in ovs._dict_to_vsctl_set(indata, 'aTable', 'anItem'): + self.assertTrue(setcmd == result1 or setcmd == result2) + + def test_add_bridge(self): + self.patch_object(ovs.subprocess, 'check_call') + self.patch_object(ovs, 'log') + ovs.add_bridge('test') + self.check_call.assert_called_once_with([ + "ovs-vsctl", "--", "--may-exist", + "add-br", 'test']) + self.assertTrue(self.log.call_count == 1) + + self.check_call.reset_mock() + self.log.reset_mock() + ovs.add_bridge('test', datapath_type='netdev') + self.check_call.assert_called_with([ + "ovs-vsctl", "--", "--may-exist", + "add-br", 'test', "--", "set", + "bridge", "test", "datapath_type=netdev", + ]) + self.assertTrue(self.log.call_count == 2) + + self.check_call.reset_mock() + ovs.add_bridge('test', exclusive=True) + self.check_call.assert_called_once_with([ + "ovs-vsctl", "--", "add-br", 'test']) + + self.check_call.reset_mock() + self.patch_object(ovs, '_dict_to_vsctl_set') + self._dict_to_vsctl_set.return_value = [['--', 'fakeextradata']] + ovs.add_bridge('test', brdata={'fakeinput': None}) + self._dict_to_vsctl_set.assert_called_once_with( + {'fakeinput': None}, 'bridge', 'test') + self.check_call.assert_called_once_with([ + 'ovs-vsctl', '--', '--may-exist', 'add-br', 'test', + '--', 'fakeextradata']) + + def test_add_bridge_port(self): + self.patch_object(ovs.subprocess, 'check_call') + self.patch_object(ovs, 'log') + ovs.add_bridge_port('test', 'eth1') + self.check_call.assert_has_calls([ + mock.call(['ovs-vsctl', '--', '--may-exist', 'add-port', + 'test', 'eth1']), + mock.call(['ip', 'link', 'set', 'eth1', 'up']), + mock.call(['ip', 'link', 'set', 'eth1', 'promisc', 'off']) + ]) + self.assertTrue(self.log.call_count == 1) + + self.check_call.reset_mock() + self.log.reset_mock() + ovs.add_bridge_port('test', 'eth1', promisc=True) + self.check_call.assert_has_calls([ + mock.call(['ovs-vsctl', '--', '--may-exist', 'add-port', + 'test', 'eth1']), + mock.call(['ip', 'link', 'set', 'eth1', 'up']), + mock.call(['ip', 'link', 'set', 'eth1', 'promisc', 'on']) + ]) + self.assertTrue(self.log.call_count == 1) + + self.check_call.reset_mock() + self.log.reset_mock() + ovs.add_bridge_port('test', 'eth1', promisc=None) + self.check_call.assert_has_calls([ + mock.call(['ovs-vsctl', '--', '--may-exist', 'add-port', + 'test', 'eth1']), + mock.call(['ip', 'link', 'set', 'eth1', 'up']), + ]) + self.assertTrue(self.log.call_count == 1) + + self.check_call.reset_mock() + ovs.add_bridge_port('test', 'eth1', exclusive=True, linkup=False) + self.check_call.assert_has_calls([ + mock.call(['ovs-vsctl', '--', 'add-port', 'test', 'eth1']), + mock.call(['ip', 'link', 'set', 'eth1', 'promisc', 'off']) + ]) + + self.check_call.reset_mock() + self.patch_object(ovs, '_dict_to_vsctl_set') + self._dict_to_vsctl_set.return_value = [['--', 'fakeextradata']] + ovs.add_bridge_port('test', 'eth1', ifdata={'fakeinput': None}) + self._dict_to_vsctl_set.assert_called_once_with( + {'fakeinput': None}, 'Interface', 'eth1') + self.check_call.assert_has_calls([ + mock.call(['ovs-vsctl', '--', '--may-exist', 'add-port', + 'test', 'eth1', '--', 'fakeextradata']), + mock.call(['ip', 'link', 'set', 'eth1', 'up']), + mock.call(['ip', 'link', 'set', 'eth1', 'promisc', 'off']) + ]) + self._dict_to_vsctl_set.reset_mock() + self.check_call.reset_mock() + ovs.add_bridge_port('test', 'eth1', portdata={'fakeportinput': None}) + self._dict_to_vsctl_set.assert_called_once_with( + {'fakeportinput': None}, 'Port', 'eth1') + self.check_call.assert_has_calls([ + mock.call(['ovs-vsctl', '--', '--may-exist', 'add-port', + 'test', 'eth1', '--', 'fakeextradata']), + mock.call(['ip', 'link', 'set', 'eth1', 'up']), + mock.call(['ip', 'link', 'set', 'eth1', 'promisc', 'off']) + ]) + + def test_ovs_appctl(self): + self.patch_object(ovs.subprocess, 'check_output') + ovs.ovs_appctl('ovs-vswitchd', ('ofproto/list',)) + self.check_output.assert_called_once_with( + ['ovs-appctl', '-t', 'ovs-vswitchd', 'ofproto/list'], + universal_newlines=True) + + def test_add_bridge_bond(self): + self.patch_object(ovs.subprocess, 'check_call') + self.patch_object(ovs, '_dict_to_vsctl_set') + self._dict_to_vsctl_set.return_value = [['--', 'fakekey=fakevalue']] + portdata = { + 'bond-mode': 'balance-tcp', + 'lacp': 'active', + 'other-config': { + 'lacp-time': 'fast', + }, + } + ifdatamap = { + 'eth0': { + 'type': 'dpdk', + 'mtu-request': '9000', + 'options': { + 'dpdk-devargs': '0000:01:00.0', + }, + }, + 'eth1': { + 'type': 'dpdk', + 'mtu-request': '9000', + 'options': { + 'dpdk-devargs': '0000:02:00.0', + }, + }, + } + ovs.add_bridge_bond('br-ex', 'bond42', ['eth0', 'eth1'], + portdata, ifdatamap) + self._dict_to_vsctl_set.assert_has_calls([ + mock.call(portdata, 'port', 'bond42'), + mock.call(ifdatamap['eth0'], 'Interface', 'eth0'), + mock.call(ifdatamap['eth1'], 'Interface', 'eth1'), + ], any_order=True) + self.check_call.assert_called_once_with([ + 'ovs-vsctl', + '--', '--may-exist', 'add-bond', 'br-ex', 'bond42', 'eth0', 'eth1', + '--', 'fakekey=fakevalue', + '--', 'fakekey=fakevalue', + '--', 'fakekey=fakevalue']) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovsdb.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovsdb.py new file mode 100644 index 0000000000000000000000000000000000000000..6ecc7085ac47a4e6f8ede01e77f9cf91c53fa100 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_ovsdb.py @@ -0,0 +1,123 @@ +import mock +import textwrap +import uuid + +import charmhelpers.contrib.network.ovs.ovsdb as ovsdb + +import tests.utils as test_utils + + +VSCTL_BRIDGE_TBL = textwrap.dedent(""" + {"data":[[["uuid","1e21ba48-61ff-4b32-b35e-cb80411da351"], + ["set",[]],["set",[]],"0000a0369fdd3890","","", + ["map",[["charm-ovn-chassis","managed"],["other","value"]]], + ["set",[]],["set",[]],["map",[]],["set",[]],false,["set",[]], + "br-test",["set",[]],["map",[]],["set", + [["uuid","617f9359-77e2-41be-8af6-4c44e7a6bcc3"], + ["uuid","da840476-8809-4107-8733-591f4696f056"]]], + ["set",[]],false,["map",[]],["set",[]],["map",[]],false], + [["uuid","bb685b0f-a383-40a1-b7a5-b5c2066bfa42"], + ["set",[]],["set",[]],"00000e5b68bba140","","", + ["map",[]],"secure",["set",[]],["map",[]],["set",[]],false, + ["set",[]],"br-int",["set",[]],["map",[["disable-in-band","true"]]], + ["set",[["uuid","07f4c231-9fd2-49b0-a558-5b69d657fdb0"], + ["uuid","8bbd2441-866f-4317-a284-09491702776c"], + ["uuid","d9e9c081-6482-4006-b7d6-239182b56c2e"]]], + ["set",[]],false,["map",[]],["set",[]],["map",[]],false]], + "headings":["_uuid","auto_attach","controller","datapath_id", + "datapath_type","datapath_version","external_ids","fail_mode", + "flood_vlans","flow_tables","ipfix","mcast_snooping_enable", + "mirrors","name","netflow","other_config","ports","protocols", + "rstp_enable","rstp_status","sflow","status","stp_enable"]} + """) + + +class TestSimpleOVSDB(test_utils.BaseTestCase): + + def patch_target(self, attr, return_value=None): + mocked = mock.patch.object(self.target, attr) + self._patches[attr] = mocked + started = mocked.start() + started.return_value = return_value + self._patches_start[attr] = started + setattr(self, attr, started) + + def test___init__(self): + with self.assertRaises(RuntimeError): + self.target = ovsdb.SimpleOVSDB('atool') + with self.assertRaises(AttributeError): + self.target = ovsdb.SimpleOVSDB('ovs-vsctl') + self.target.unknown_table.find() + + def test__find_tbl(self): + self.target = ovsdb.SimpleOVSDB('ovs-vsctl') + self.patch_object(ovsdb.utils, '_run') + self._run.return_value = VSCTL_BRIDGE_TBL + self.maxDiff = None + expect = { + '_uuid': uuid.UUID('1e21ba48-61ff-4b32-b35e-cb80411da351'), + 'auto_attach': [], + 'controller': [], + 'datapath_id': '0000a0369fdd3890', + 'datapath_type': '', + 'datapath_version': '', + 'external_ids': { + 'charm-ovn-chassis': 'managed', + 'other': 'value', + }, + 'fail_mode': [], + 'flood_vlans': [], + 'flow_tables': {}, + 'ipfix': [], + 'mcast_snooping_enable': False, + 'mirrors': [], + 'name': 'br-test', + 'netflow': [], + 'other_config': {}, + 'ports': [['uuid', '617f9359-77e2-41be-8af6-4c44e7a6bcc3'], + ['uuid', 'da840476-8809-4107-8733-591f4696f056']], + 'protocols': [], + 'rstp_enable': False, + 'rstp_status': {}, + 'sflow': [], + 'status': {}, + 'stp_enable': False} + # this in effect also tests the __iter__ front end method + for el in self.target.bridge: + self.assertDictEqual(el, expect) + break + self._run.assert_called_once_with( + 'ovs-vsctl', '-f', 'json', 'find', 'bridge') + self._run.reset_mock() + # this in effect also tests the find front end method + for el in self.target.bridge.find(condition='name=br-test'): + break + self._run.assert_called_once_with( + 'ovs-vsctl', '-f', 'json', 'find', 'bridge', 'name=br-test') + + def test_clear(self): + self.target = ovsdb.SimpleOVSDB('ovs-vsctl') + self.patch_object(ovsdb.utils, '_run') + self.target.interface.clear('1e21ba48-61ff-4b32-b35e-cb80411da351', + 'external_ids') + self._run.assert_called_once_with( + 'ovs-vsctl', 'clear', 'interface', + '1e21ba48-61ff-4b32-b35e-cb80411da351', 'external_ids') + + def test_remove(self): + self.target = ovsdb.SimpleOVSDB('ovs-vsctl') + self.patch_object(ovsdb.utils, '_run') + self.target.interface.remove('1e21ba48-61ff-4b32-b35e-cb80411da351', + 'external_ids', 'other') + self._run.assert_called_once_with( + 'ovs-vsctl', 'remove', 'interface', + '1e21ba48-61ff-4b32-b35e-cb80411da351', 'external_ids', 'other') + + def test_set(self): + self.target = ovsdb.SimpleOVSDB('ovs-vsctl') + self.patch_object(ovsdb.utils, '_run') + self.target.interface.set('1e21ba48-61ff-4b32-b35e-cb80411da351', + 'external_ids:other', 'value') + self._run.assert_called_once_with( + 'ovs-vsctl', 'set', 'interface', + '1e21ba48-61ff-4b32-b35e-cb80411da351', 'external_ids:other=value') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8b7e4b1af51df750a0dfab73b88ec8902d113a8f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/ovs/test_utils.py @@ -0,0 +1,13 @@ +import charmhelpers.contrib.network.ovs.utils as utils + +import tests.utils as test_utils + + +class TestUtils(test_utils.BaseTestCase): + + def test__run(self): + self.patch_object(utils.subprocess, 'check_output') + self.check_output.return_value = 'aReturn' + self.assertEquals(utils._run('aArg'), 'aReturn') + self.check_output.assert_called_once_with( + ('aArg',), universal_newlines=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ip.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ip.py new file mode 100644 index 0000000000000000000000000000000000000000..d638abc275440e48a8b5eaafdd323ff7d94d9f74 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ip.py @@ -0,0 +1,848 @@ +import subprocess +import unittest + +import mock +import netifaces + +import charmhelpers.contrib.network.ip as net_ip +from mock import patch, MagicMock + +import nose.tools +import six + +if not six.PY3: + builtin_open = '__builtin__.open' + builtin_import = '__builtin__.__import__' +else: + builtin_open = 'builtins.open' + builtin_import = 'builtins.__import__' + +DUMMY_ADDRESSES = { + 'lo': { + 17: [{'peer': '00:00:00:00:00:00', + 'addr': '00:00:00:00:00:00'}], + 2: [{'peer': '127.0.0.1', 'netmask': + '255.0.0.0', 'addr': '127.0.0.1'}], + 10: [{'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', + 'addr': '::1'}] + }, + 'eth0': { + 2: [{'addr': '192.168.1.55', + 'broadcast': '192.168.1.255', + 'netmask': '255.255.255.0'}], + 10: [{'addr': '2a01:348:2f4:0:685e:5748:ae62:209f', + 'netmask': 'ffff:ffff:ffff:ffff::'}, + {'addr': 'fe80::3e97:eff:fe8b:1cf7%eth0', + 'netmask': 'ffff:ffff:ffff:ffff::'}, + {'netmask': 'ffff:ffff:ffff:ffff::/64', + 'addr': 'fd2d:dec4:cf59:3c16::1'}, + {'addr': '2001:db8:1::', + 'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'}], + 17: [{'addr': '3c:97:0e:8b:1c:f7', + 'broadcast': 'ff:ff:ff:ff:ff:ff'}] + }, + 'eth0:1': { + 2: [{'addr': '192.168.1.56', + 'broadcast': '192.168.1.255', + 'netmask': '255.255.255.0'}], + }, + 'eth1': { + 2: [{'addr': '10.5.0.1', + 'broadcast': '10.5.255.255', + 'netmask': '255.255.0.0'}, + {'addr': '10.6.0.2', + 'broadcast': '10.6.0.255', + 'netmask': '255.255.255.0'}], + 3: [{'addr': 'fe80::3e97:eff:fe8b:1cf7%eth1', + 'netmask': 'ffff:ffff:ffff:ffff::'}], + 17: [{'addr': '3c:97:0e:8b:1c:f7', + 'broadcast': 'ff:ff:ff:ff:ff:ff'}] + }, + 'eth2': { + 10: [{'addr': '3a01:348:2f4:0:685e:5748:ae62:209f', + 'netmask': 'ffff:ffff:ffff:ffff::'}, + {'addr': 'fe80::3e97:edd:fe8b:1cf7%eth0', + 'netmask': 'ffff:ffff:ffff:ffff::'}], + 17: [{'addr': '3c:97:0e:8b:1c:f7', + 'broadcast': 'ff:ff:ff:ff:ff:ff'}] + }, + 'eth2:1': { + 2: [{'addr': '192.168.10.58', + 'broadcast': '192.168.1.255', + 'netmask': '255.255.255.0'}], + }, +} + +IP_OUTPUT = b"""link/ether fa:16:3e:2a:cc:ce brd ff:ff:ff:ff:ff:ff + inet 10.5.16.93/16 brd 10.5.255.255 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:1:0:d0cf:528c:23eb:6000/64 scope global + valid_lft forever preferred_lft forever + inet6 2001:db8:1:0:2918:3444:852:5b8a/64 scope global temporary dynamic + valid_lft 86400sec preferred_lft 14400sec + inet6 2001:db8:1:0:f816:3eff:fe2a:ccce/64 scope global dynamic + valid_lft 86400sec preferred_lft 14400sec + inet6 fe80::f816:3eff:fe2a:ccce/64 scope link + valid_lft forever preferred_lft forever +""" + +IP2_OUTPUT = b"""link/ether fa:16:3e:2a:cc:ce brd ff:ff:ff:ff:ff:ff + inet 10.5.16.93/16 brd 10.5.255.255 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:1:0:d0cf:528c:23eb:6000/64 scope global + valid_lft forever preferred_lft forever + inet6 2001:db8:1:0:2918:3444:852:5b8a/64 scope global temporary dynamic + valid_lft 86400sec preferred_lft 14400sec + inet6 2001:db8:1:0:f816:3eff:fe2a:ccce/64 scope global mngtmpaddr dynamic + valid_lft 86400sec preferred_lft 14400sec + inet6 fe80::f816:3eff:fe2a:ccce/64 scope link + valid_lft forever preferred_lft forever +""" + +IP_OUTPUT_NO_VALID = b"""link/ether fa:16:3e:2a:cc:ce brd ff:ff:ff:ff:ff:ff + inet 10.5.16.93/16 brd 10.5.255.255 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:1:0:2918:3444:852:5b8a/64 scope global temporary dynamic + valid_lft 86400sec preferred_lft 14400sec + inet6 fe80::f816:3eff:fe2a:ccce/64 scope link + valid_lft forever preferred_lft forever +""" + + +class FakeAnswer(object): + def __init__(self, ip): + self.ip = ip + + def __str__(self): + return self.ip + + +class FakeResolver(object): + def __init__(self, ip): + self.ip = ip + + def query(self, hostname, query_type): + if self.ip == '': + return [] + else: + return [FakeAnswer(self.ip)] + + +class FakeReverse(object): + def from_address(self, address): + return '156.94.189.91.in-addr.arpa' + + +class FakeDNSName(object): + def __init__(self, dnsname): + pass + + +class FakeDNS(object): + def __init__(self, ip): + self.resolver = FakeResolver(ip) + self.reversename = FakeReverse() + self.name = MagicMock() + self.name.Name = FakeDNSName + + +class IPTest(unittest.TestCase): + + def mock_ifaddresses(self, iface): + return DUMMY_ADDRESSES[iface] + + def test_get_address_in_network_with_invalid_net(self): + for net in ['192.168.300/22', '192.168.1.0/2a', '2.a']: + self.assertRaises(ValueError, + net_ip.get_address_in_network, + net) + + def _test_get_address_in_network(self, expect_ip_addr, + network, fallback=None, fatal=False): + + def side_effect(iface): + return DUMMY_ADDRESSES[iface] + + with mock.patch.object(netifaces, 'interfaces') as interfaces: + interfaces.return_value = sorted(DUMMY_ADDRESSES.keys()) + with mock.patch.object(netifaces, 'ifaddresses') as ifaddresses: + ifaddresses.side_effect = side_effect + if not fatal: + self.assertEqual(expect_ip_addr, + net_ip.get_address_in_network(network, + fallback, + fatal)) + else: + net_ip.get_address_in_network(network, fallback, fatal) + + @mock.patch.object(subprocess, 'call') + def test_get_address_in_network_with_none(self, popen): + fallback = '10.10.10.10' + self.assertEqual(fallback, + net_ip.get_address_in_network(None, fallback)) + self.assertEqual(None, + net_ip.get_address_in_network(None)) + + self.assertRaises(ValueError, self._test_get_address_in_network, + None, None, fatal=True) + + def test_get_address_in_network_ipv4(self): + self._test_get_address_in_network('192.168.1.55', '192.168.1.0/24') + + def test_get_address_in_network_ipv4_multi(self): + # Assumes that there is an address configured on both but the first + # one is picked# + self._test_get_address_in_network('192.168.1.55', + '192.168.1.0/24 192.168.10.0/24') + + def test_get_address_in_network_ipv4_multi2(self): + # Assumes that there is nothing configured on 192.168.11.0/24 + self._test_get_address_in_network('192.168.10.58', + '192.168.11.0/24 192.168.10.0/24') + + def test_get_address_in_network_ipv4_secondary(self): + self._test_get_address_in_network('10.6.0.2', + '10.6.0.0/24') + + def test_get_address_in_network_ipv6(self): + self._test_get_address_in_network('2a01:348:2f4:0:685e:5748:ae62:209f', + '2a01:348:2f4::/64') + + def test_get_address_in_network_with_non_existent_net(self): + self._test_get_address_in_network(None, '172.16.0.0/16') + + def test_get_address_in_network_fallback_works(self): + fallback = '10.10.0.0' + self._test_get_address_in_network(fallback, '172.16.0.0/16', fallback) + + @mock.patch.object(subprocess, 'call') + def test_get_address_in_network_not_found_fatal(self, popen): + self.assertRaises(ValueError, self._test_get_address_in_network, + None, '172.16.0.0/16', fatal=True) + + def test_get_address_in_network_not_found_not_fatal(self): + self._test_get_address_in_network(None, '172.16.0.0/16', fatal=False) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_address_in_network_netmask(self, _interfaces, _ifaddresses): + """ + Validates that get_address_in_network works with a netmask + that uses the format 'ffff:ffff:ffff::/prefixlen' + """ + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + self._test_get_address_in_network('fd2d:dec4:cf59:3c16::1', + 'fd2d:dec4:cf59:3c16::/64', + fatal=False) + + def test_is_address_in_network(self): + self.assertTrue( + net_ip.is_address_in_network( + '192.168.1.0/24', + '192.168.1.1')) + self.assertFalse( + net_ip.is_address_in_network( + '192.168.1.0/24', + '10.5.1.1')) + self.assertRaises(ValueError, net_ip.is_address_in_network, + 'broken', '192.168.1.1') + self.assertRaises(ValueError, net_ip.is_address_in_network, + '192.168.1.0/24', 'hostname') + self.assertTrue( + net_ip.is_address_in_network( + '2a01:348:2f4::/64', + '2a01:348:2f4:0:685e:5748:ae62:209f') + ) + self.assertFalse( + net_ip.is_address_in_network( + '2a01:348:2f4::/64', + 'fdfc:3bd5:210b:cc8d:8c80:9e10:3f07:371') + ) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_for_address(self, _interfaces, _ifaddresses): + def mock_ifaddresses(iface): + return DUMMY_ADDRESSES[iface] + _interfaces.return_value = ['eth0', 'eth1'] + _ifaddresses.side_effect = mock_ifaddresses + self.assertEquals( + net_ip.get_iface_for_address('192.168.1.220'), + 'eth0') + self.assertEquals(net_ip.get_iface_for_address('10.5.20.4'), 'eth1') + self.assertEquals( + net_ip.get_iface_for_address('2a01:348:2f4:0:685e:5748:ae62:210f'), + 'eth0' + ) + self.assertEquals(net_ip.get_iface_for_address('172.4.5.5'), None) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_netmask_for_address(self, _interfaces, _ifaddresses): + def mock_ifaddresses(iface): + return DUMMY_ADDRESSES[iface] + _interfaces.return_value = ['eth0', 'eth1'] + _ifaddresses.side_effect = mock_ifaddresses + self.assertEquals( + net_ip.get_netmask_for_address('192.168.1.220'), + '255.255.255.0') + self.assertEquals( + net_ip.get_netmask_for_address('10.5.20.4'), + '255.255.0.0') + self.assertEquals(net_ip.get_netmask_for_address('172.4.5.5'), None) + self.assertEquals( + net_ip.get_netmask_for_address( + '2a01:348:2f4:0:685e:5748:ae62:210f'), + '64' + ) + self.assertEquals( + net_ip.get_netmask_for_address('2001:db8:1::'), + '128' + ) + + def test_is_ipv6(self): + self.assertFalse(net_ip.is_ipv6('myhost')) + self.assertFalse(net_ip.is_ipv6('172.4.5.5')) + self.assertTrue(net_ip.is_ipv6('2a01:348:2f4:0:685e:5748:ae62:209f')) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_ipv6_addr_no_ipv6(self, _interfaces, _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + with nose.tools.assert_raises(Exception): + net_ip.get_ipv6_addr('eth0:1') + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_ipv6_addr_no_global_ipv6(self, _interfaces, + _ifaddresses): + DUMMY_ADDRESSES = { + 'eth0': { + 10: [{'addr': 'fe80::3e97:eff:fe8b:1cf7%eth0', + 'netmask': 'ffff:ffff:ffff:ffff::'}], + } + } + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + self.assertRaises(Exception, net_ip.get_ipv6_addr) + + @patch('charmhelpers.contrib.network.ip.get_iface_from_addr') + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_ipv6_addr_exc_list(self, _interfaces, _ifaddresses, + mock_get_iface_from_addr): + def mock_ifaddresses(iface): + return DUMMY_ADDRESSES[iface] + + _interfaces.return_value = ['eth0', 'eth1'] + _ifaddresses.side_effect = mock_ifaddresses + + result = net_ip.get_ipv6_addr( + exc_list='2a01:348:2f4:0:685e:5748:ae62:209f', + inc_aliases=True, + fatal=False + ) + self.assertEqual([], result) + + @patch('charmhelpers.contrib.network.ip.get_iface_from_addr') + @patch('charmhelpers.contrib.network.ip.subprocess.check_output') + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_ipv6_addr(self, _interfaces, _ifaddresses, mock_check_out, + mock_get_iface_from_addr): + mock_get_iface_from_addr.return_value = 'eth0' + mock_check_out.return_value = \ + b"inet6 2a01:348:2f4:0:685e:5748:ae62:209f/64 scope global dynamic" + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_ipv6_addr(dynamic_only=False) + self.assertEqual(['2a01:348:2f4:0:685e:5748:ae62:209f'], result) + + @patch('charmhelpers.contrib.network.ip.get_iface_from_addr') + @patch('charmhelpers.contrib.network.ip.subprocess.check_output') + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_ipv6_addr_global_dynamic(self, _interfaces, _ifaddresses, + mock_check_out, + mock_get_iface_from_addr): + mock_get_iface_from_addr.return_value = 'eth0' + mock_check_out.return_value = \ + b"inet6 2a01:348:2f4:0:685e:5748:ae62:209f/64 scope global dynamic" + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_ipv6_addr(dynamic_only=False) + self.assertEqual(['2a01:348:2f4:0:685e:5748:ae62:209f'], result) + + @patch.object(netifaces, 'interfaces') + def test_get_ipv6_addr_invalid_nic(self, _interfaces): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + self.assertRaises(Exception, net_ip.get_ipv6_addr, 'eth1') + + @patch('charmhelpers.contrib.network.ip.subprocess.check_output') + def test_is_ipv6_disabled(self, mock_check_output): + # verify that the function does look for the right thing + mock_check_output.return_value = """ + Some lines before + net.ipv6.conf.all.disable_ipv6 = 1 + Some lines afterward + """ + self.assertTrue(net_ip.is_ipv6_disabled()) + mock_check_output.assert_called_once_with( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT, universal_newlines=True) + # if it isn't there, it must return false + mock_check_output.return_value = "" + self.assertFalse(net_ip.is_ipv6_disabled()) + # If the syscall returns an error, then return True + + def fake_check_call(*args, **kwargs): + raise subprocess.CalledProcessError(['called'], 1) + mock_check_output.side_effect = fake_check_call + self.assertTrue(net_ip.is_ipv6_disabled()) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr(self, _interfaces, _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_iface_addr("eth0") + self.assertEqual(["192.168.1.55"], result) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_excaliases(self, _interfaces, _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_iface_addr("eth0") + self.assertEqual(['192.168.1.55'], result) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_incaliases(self, _interfaces, _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_iface_addr("eth0", inc_aliases=True) + self.assertEqual(['192.168.1.55', '192.168.1.56'], result) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_exclist(self, _interfaces, _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_iface_addr("eth0", inc_aliases=True, + exc_list=['192.168.1.55']) + self.assertEqual(['192.168.1.56'], result) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_mixedaddr(self, _interfaces, _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_iface_addr("eth2", inc_aliases=True) + self.assertEqual(["192.168.10.58"], result) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_full_interface_path(self, _interfaces, + _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + result = net_ip.get_iface_addr("/dev/eth0") + self.assertEqual(["192.168.1.55"], result) + + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_invalid_type(self, _interfaces): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + with nose.tools.assert_raises(Exception): + net_ip.get_iface_addr(iface='eth0', inet_type='AF_BOB') + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_invalid_interface(self, _interfaces, _ifaddresses): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + result = net_ip.get_ipv4_addr("eth3", fatal=False) + self.assertEqual([], result) + + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_invalid_interface_fatal(self, _interfaces): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + with nose.tools.assert_raises(Exception): + net_ip.get_ipv4_addr("eth3", fatal=True) + + @patch.object(netifaces, 'interfaces') + def test_get_iface_addr_invalid_interface_fatal_incaliases(self, + _interfaces): + _interfaces.return_value = DUMMY_ADDRESSES.keys() + with nose.tools.assert_raises(Exception): + net_ip.get_ipv4_addr("eth3", fatal=True, inc_aliases=True) + + @patch.object(netifaces, 'ifaddresses') + @patch.object(netifaces, 'interfaces') + def test_get_get_iface_addr_interface_has_no_ipv4(self, _interfaces, + _ifaddresses): + + # This will raise a KeyError since we are looking for "2" + # (actally, netiface.AF_INET). + DUMMY_ADDRESSES = { + 'eth0': { + 10: [{'addr': 'fe80::3e97:eff:fe8b:1cf7%eth0', + 'netmask': 'ffff:ffff:ffff:ffff::'}], + } + } + + _interfaces.return_value = DUMMY_ADDRESSES.keys() + _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ + + result = net_ip.get_ipv4_addr("eth0", fatal=False) + self.assertEqual([], result) + + @patch('glob.glob') + def test_get_bridges(self, _glob): + _glob.return_value = ['/sys/devices/virtual/net/br0/bridge'] + self.assertEqual(['br0'], net_ip.get_bridges()) + + @patch.object(net_ip, 'get_bridges') + @patch('glob.glob') + def test_get_bridge_nics(self, _glob, _get_bridges): + _glob.return_value = ['/sys/devices/virtual/net/br0/brif/eth4', + '/sys/devices/virtual/net/br0/brif/eth5'] + self.assertEqual(['eth4', 'eth5'], net_ip.get_bridge_nics('br0')) + + @patch.object(net_ip, 'get_bridges') + @patch('glob.glob') + def test_get_bridge_nics_invalid_br(self, _glob, _get_bridges): + _glob.return_value = [] + self.assertEqual([], net_ip.get_bridge_nics('br1')) + + @patch.object(net_ip, 'get_bridges') + @patch.object(net_ip, 'get_bridge_nics') + def test_is_bridge_member(self, _get_bridge_nics, _get_bridges): + _get_bridges.return_value = ['br0'] + _get_bridge_nics.return_value = ['eth4', 'eth5'] + self.assertTrue(net_ip.is_bridge_member('eth4')) + self.assertFalse(net_ip.is_bridge_member('eth6')) + + def test_format_ipv6_addr(self): + DUMMY_ADDRESS = '2001:db8:1:0:f131:fc84:ea37:7d4' + self.assertEquals(net_ip.format_ipv6_addr(DUMMY_ADDRESS), + '[2001:db8:1:0:f131:fc84:ea37:7d4]') + + def test_format_invalid_ipv6_addr(self): + INVALID_IPV6_ADDR = 'myhost' + self.assertEquals(net_ip.format_ipv6_addr(INVALID_IPV6_ADDR), + None) + + @patch('charmhelpers.contrib.network.ip.get_iface_from_addr') + @patch('charmhelpers.contrib.network.ip.subprocess.check_output') + @patch('charmhelpers.contrib.network.ip.get_iface_addr') + def test_get_ipv6_global_address(self, mock_get_iface_addr, mock_check_out, + mock_get_iface_from_addr): + mock_get_iface_from_addr.return_value = 'eth0' + mock_check_out.return_value = IP_OUTPUT + scope_global_addr = '2001:db8:1:0:d0cf:528c:23eb:6000' + scope_global_dyn_addr = '2001:db8:1:0:f816:3eff:fe2a:ccce' + mock_get_iface_addr.return_value = [scope_global_addr, + scope_global_dyn_addr, + '2001:db8:1:0:2918:3444:852:5b8a', + 'fe80::f816:3eff:fe2a:ccce%eth0'] + self.assertEqual([scope_global_addr, scope_global_dyn_addr], + net_ip.get_ipv6_addr(dynamic_only=False)) + + @patch('charmhelpers.contrib.network.ip.get_iface_from_addr') + @patch('charmhelpers.contrib.network.ip.subprocess.check_output') + @patch('charmhelpers.contrib.network.ip.get_iface_addr') + def test_get_ipv6_global_dynamic_address(self, mock_get_iface_addr, + mock_check_out, + mock_get_iface_from_addr): + mock_get_iface_from_addr.return_value = 'eth0' + mock_check_out.return_value = IP_OUTPUT + scope_global_addr = '2001:db8:1:0:d0cf:528c:23eb:6000' + scope_global_dyn_addr = '2001:db8:1:0:f816:3eff:fe2a:ccce' + mock_get_iface_addr.return_value = [scope_global_addr, + scope_global_dyn_addr, + '2001:db8:1:0:2918:3444:852:5b8a', + 'fe80::f816:3eff:fe2a:ccce%eth0'] + self.assertEqual([scope_global_dyn_addr], net_ip.get_ipv6_addr()) + + @patch('charmhelpers.contrib.network.ip.get_iface_from_addr') + @patch('charmhelpers.contrib.network.ip.subprocess.check_output') + @patch('charmhelpers.contrib.network.ip.get_iface_addr') + def test_get_ipv6_global_dynamic_address_ip2(self, mock_get_iface_addr, + mock_check_out, + mock_get_iface_from_addr): + mock_get_iface_from_addr.return_value = 'eth0' + mock_check_out.return_value = IP2_OUTPUT + scope_global_addr = '2001:db8:1:0:d0cf:528c:23eb:6000' + scope_global_dyn_addr = '2001:db8:1:0:f816:3eff:fe2a:ccce' + mock_get_iface_addr.return_value = [scope_global_addr, + scope_global_dyn_addr, + '2001:db8:1:0:2918:3444:852:5b8a', + 'fe80::f816:3eff:fe2a:ccce%eth0'] + self.assertEqual([scope_global_dyn_addr], net_ip.get_ipv6_addr()) + + @patch('charmhelpers.contrib.network.ip.subprocess.check_output') + @patch('charmhelpers.contrib.network.ip.get_iface_addr') + def test_get_ipv6_global_dynamic_address_invalid_address( + self, mock_get_iface_addr, mock_check_out): + mock_get_iface_addr.return_value = [] + with nose.tools.assert_raises(Exception): + net_ip.get_ipv6_addr() + + mock_get_iface_addr.return_value = ['2001:db8:1:0:2918:3444:852:5b8a'] + mock_check_out.return_value = IP_OUTPUT_NO_VALID + with nose.tools.assert_raises(Exception): + net_ip.get_ipv6_addr() + + @patch('charmhelpers.contrib.network.ip.get_iface_addr') + def test_get_ipv6_addr_w_iface(self, mock_get_iface_addr): + mock_get_iface_addr.return_value = [] + net_ip.get_ipv6_addr(iface='testif', fatal=False) + mock_get_iface_addr.assert_called_once_with(iface='testif', + inet_type='AF_INET6', + inc_aliases=False, + fatal=False, exc_list=None) + + @patch('charmhelpers.contrib.network.ip.unit_get') + @patch('charmhelpers.contrib.network.ip.get_iface_from_addr') + @patch('charmhelpers.contrib.network.ip.get_iface_addr') + def test_get_ipv6_addr_no_iface(self, mock_get_iface_addr, + mock_get_iface_from_addr, mock_unit_get): + mock_unit_get.return_value = '1.2.3.4' + mock_get_iface_addr.return_value = [] + mock_get_iface_from_addr.return_value = "testif" + net_ip.get_ipv6_addr(fatal=False) + mock_get_iface_from_addr.assert_called_once_with('1.2.3.4') + mock_get_iface_addr.assert_called_once_with(iface='testif', + inet_type='AF_INET6', + inc_aliases=False, + fatal=False, exc_list=None) + + @patch('netifaces.interfaces') + @patch('netifaces.ifaddresses') + @patch('charmhelpers.contrib.network.ip.log') + def test_get_iface_from_addr(self, mock_log, mock_ifaddresses, + mock_interfaces): + mock_ifaddresses.side_effect = lambda iface: DUMMY_ADDRESSES[iface] + mock_interfaces.return_value = sorted(DUMMY_ADDRESSES.keys()) + addr = 'fe80::3e97:eff:fe8b:1cf7' + self.assertEqual(net_ip.get_iface_from_addr(addr), 'eth0') + + with nose.tools.assert_raises(Exception): + net_ip.get_iface_from_addr('1.2.3.4') + + def test_is_ip(self): + self.assertTrue(net_ip.is_ip('10.0.0.1')) + self.assertTrue(net_ip.is_ip('2001:db8:1:0:2918:3444:852:5b8a')) + self.assertFalse(net_ip.is_ip('www.ubuntu.com')) + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_host_ip_with_hostname(self, apt_install): + fake_dns = FakeDNS('10.0.0.1') + with patch(builtin_import, side_effect=[fake_dns]): + ip = net_ip.get_host_ip('www.ubuntu.com') + self.assertEquals(ip, '10.0.0.1') + + @patch('charmhelpers.contrib.network.ip.ns_query') + @patch('charmhelpers.contrib.network.ip.socket.gethostbyname') + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_host_ip_with_hostname_no_dns(self, apt_install, socket, + ns_query): + ns_query.return_value = [] + fake_dns = FakeDNS(None) + socket.return_value = '10.0.0.1' + with patch(builtin_import, side_effect=[fake_dns]): + ip = net_ip.get_host_ip('www.ubuntu.com') + self.assertEquals(ip, '10.0.0.1') + + @patch('charmhelpers.contrib.network.ip.log') + @patch('charmhelpers.contrib.network.ip.ns_query') + @patch('charmhelpers.contrib.network.ip.socket.gethostbyname') + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_host_ip_with_hostname_fallback(self, apt_install, socket, + ns_query, *args): + ns_query.return_value = [] + fake_dns = FakeDNS(None) + + def r(): + raise Exception() + + socket.side_effect = r + with patch(builtin_import, side_effect=[fake_dns]): + ip = net_ip.get_host_ip('www.ubuntu.com', fallback='127.0.0.1') + self.assertEquals(ip, '127.0.0.1') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_host_ip_with_ip(self, apt_install): + fake_dns = FakeDNS('5.5.5.5') + with patch(builtin_import, side_effect=[fake_dns]): + ip = net_ip.get_host_ip('4.2.2.1') + self.assertEquals(ip, '4.2.2.1') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_ns_query_trigger_apt_install(self, apt_install): + fake_dns = FakeDNS('5.5.5.5') + with patch(builtin_import, side_effect=[ImportError, fake_dns]): + nsq = net_ip.ns_query('5.5.5.5') + if six.PY2: + apt_install.assert_called_with('python-dnspython', fatal=True) + else: + apt_install.assert_called_with('python3-dnspython', fatal=True) + self.assertEquals(nsq, '5.5.5.5') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_ns_query_ptr_record(self, apt_install): + fake_dns = FakeDNS('127.0.0.1') + with patch(builtin_import, side_effect=[fake_dns]): + nsq = net_ip.ns_query('127.0.0.1') + self.assertEquals(nsq, '127.0.0.1') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_ns_query_a_record(self, apt_install): + fake_dns = FakeDNS('127.0.0.1') + fake_dns_name = FakeDNSName('www.somedomain.tld') + with patch(builtin_import, side_effect=[fake_dns]): + nsq = net_ip.ns_query(fake_dns_name) + self.assertEquals(nsq, '127.0.0.1') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_ns_query_blank_record(self, apt_install): + fake_dns = FakeDNS(None) + with patch(builtin_import, side_effect=[fake_dns, fake_dns]): + nsq = net_ip.ns_query(None) + self.assertEquals(nsq, None) + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_ns_query_lookup_fail(self, apt_install): + fake_dns = FakeDNS('') + with patch(builtin_import, side_effect=[fake_dns, fake_dns]): + nsq = net_ip.ns_query('nonexistant') + self.assertEquals(nsq, None) + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_with_ip(self, apt_install): + fake_dns = FakeDNS('www.ubuntu.com') + with patch(builtin_import, side_effect=[fake_dns, fake_dns]): + hn = net_ip.get_hostname('4.2.2.1') + self.assertEquals(hn, 'www.ubuntu.com') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_with_ip_not_fqdn(self, apt_install): + fake_dns = FakeDNS('packages.ubuntu.com') + with patch(builtin_import, side_effect=[fake_dns, fake_dns]): + hn = net_ip.get_hostname('4.2.2.1', fqdn=False) + self.assertEquals(hn, 'packages') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_with_hostname(self, apt_install): + hn = net_ip.get_hostname('www.ubuntu.com') + self.assertEquals(hn, 'www.ubuntu.com') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_with_hostname_trailingdot(self, apt_install): + hn = net_ip.get_hostname('www.ubuntu.com.') + self.assertEquals(hn, 'www.ubuntu.com') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_with_hostname_not_fqdn(self, apt_install): + hn = net_ip.get_hostname('packages.ubuntu.com', fqdn=False) + self.assertEquals(hn, 'packages') + + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_trigger_apt_install(self, apt_install): + fake_dns = FakeDNS('www.ubuntu.com') + with patch(builtin_import, side_effect=[ImportError, fake_dns, + fake_dns]): + hn = net_ip.get_hostname('4.2.2.1') + if six.PY2: + apt_install.assert_called_with('python-dnspython', fatal=True) + else: + apt_install.assert_called_with('python3-dnspython', fatal=True) + + self.assertEquals(hn, 'www.ubuntu.com') + + @patch('charmhelpers.contrib.network.ip.socket.gethostbyaddr') + @patch('charmhelpers.contrib.network.ip.ns_query') + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_lookup_fail(self, apt_install, ns_query, socket): + fake_dns = FakeDNS('www.ubuntu.com') + ns_query.return_value = [] + socket.return_value = () + with patch(builtin_import, side_effect=[fake_dns, fake_dns]): + hn = net_ip.get_hostname('4.2.2.1') + self.assertEquals(hn, None) + + @patch('charmhelpers.contrib.network.ip.socket.gethostbyaddr') + @patch('charmhelpers.contrib.network.ip.ns_query') + @patch('charmhelpers.contrib.network.ip.apt_install') + def test_get_hostname_lookup_fail_gethostbyaddr_fallback( + self, apt_install, ns_query, socket): + fake_dns = FakeDNS('www.ubuntu.com') + ns_query.return_value = [] + socket.return_value = ("www.ubuntu.com", "", "") + with patch(builtin_import, side_effect=[fake_dns]): + hn = net_ip.get_hostname('4.2.2.1') + self.assertEquals(hn, "www.ubuntu.com") + + @patch('charmhelpers.contrib.network.ip.subprocess.call') + def test_port_has_listener(self, subprocess_call): + subprocess_call.return_value = 1 + self.assertEqual(net_ip.port_has_listener('ip-address', 50), False) + subprocess_call.assert_called_with(['nc', '-z', 'ip-address', '50']) + subprocess_call.return_value = 0 + self.assertEqual(net_ip.port_has_listener('ip-address', 70), True) + subprocess_call.assert_called_with(['nc', '-z', 'ip-address', '70']) + + @patch.object(net_ip, 'log', lambda *args, **kwargs: None) + @patch.object(net_ip, 'config') + @patch.object(net_ip, 'network_get_primary_address') + @patch.object(net_ip, 'get_address_in_network') + @patch.object(net_ip, 'unit_get') + @patch.object(net_ip, 'get_ipv6_addr') + @patch.object(net_ip, 'assert_charm_supports_ipv6') + def test_get_relation_ip(self, assert_charm_supports_ipv6, get_ipv6_addr, + unit_get, get_address_in_network, + network_get_primary_address, config): + ACCESS_IP = '10.50.1.1' + ACCESS_NETWORK = '10.50.1.0/24' + AMQP_IP = '10.200.1.1' + IPV6_IP = '2001:DB8::1' + DEFAULT_IP = '172.16.1.1' + assert_charm_supports_ipv6.return_value = True + get_ipv6_addr.return_value = [IPV6_IP] + unit_get.return_value = DEFAULT_IP + get_address_in_network.return_value = DEFAULT_IP + network_get_primary_address.return_value = AMQP_IP + + # Network-get calls + _config = {'prefer-ipv6': False} + config.side_effect = lambda key: _config.get(key) + + network_get_primary_address.side_effect = NotImplementedError + self.assertEqual(DEFAULT_IP, net_ip.get_relation_ip('amqp')) + + network_get_primary_address.side_effect = net_ip.NoNetworkBinding + self.assertEqual(DEFAULT_IP, net_ip.get_relation_ip('doesnotexist')) + + network_get_primary_address.side_effect = None + self.assertEqual(AMQP_IP, net_ip.get_relation_ip('amqp')) + + self.assertFalse(get_address_in_network.called) + + # Specific CIDR network + get_address_in_network.return_value = ACCESS_IP + network_get_primary_address.return_value = DEFAULT_IP + self.assertEqual( + ACCESS_IP, + net_ip.get_relation_ip('shared-db', + cidr_network=ACCESS_NETWORK)) + get_address_in_network.assert_called_with(ACCESS_NETWORK, DEFAULT_IP) + + self.assertFalse(assert_charm_supports_ipv6.called) + + # IPv6 + _config = {'prefer-ipv6': True} + config.side_effect = lambda key: _config.get(key) + self.assertEqual(IPV6_IP, net_ip.get_relation_ip('amqp')) + assert_charm_supports_ipv6.assert_called_with() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ovs.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ovs.py new file mode 100644 index 0000000000000000000000000000000000000000..9cf8e615123f78420c29e4a545d22805d2ae5f99 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ovs.py @@ -0,0 +1,333 @@ +import subprocess +import unittest + +from mock import patch, call, MagicMock + +import charmhelpers.contrib.network.ovs as ovs + +from tests.helpers import patch_open + + +# NOTE(fnordahl): some functions drectly under the ``contrib.network.ovs`` +# module have their unit tests in the ``test_ovs.py`` module in the +# ``tests.contrib.network.ovs`` package. + + +GOOD_CERT = '''Certificate: + Data: + Version: 1 (0x0) + Serial Number: 13798680962510501282 (0xbf7ec33a136235a2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=Palo Alto, O=Open vSwitch, OU=Open vSwitch + Validity + Not Before: Jun 28 17:02:19 2013 GMT + Not After : Jun 28 17:02:19 2019 GMT + Subject: C=US, ST=CA, L=Palo Alto, O=Open vSwitch, OU=Open vSwitch + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e8:a7:db:0a:6d:c0:16:4a:14:96:1d:74:91:15: + 64:3f:ae:2a:54:be:2a:fe:10:14:9a:73:39:d8:58: + 74:7f:ab:d5:f2:39:aa:9a:27:7c:31:82:f8:74:42: + 46:8d:c5:3b:42:55:52:be:75:7f:a5:b1:ec:d5:29: + 9f:62:0e:de:31:27:2b:95:1f:24:0d:ca:8c:48:30: + 96:9f:ba:b7:9d:eb:c1:bd:93:05:e3:d8:ca:66:5a: + e9:cb:a5:7a:3a:8d:27:e2:05:9d:88:fc:a9:ef:af: + 47:4c:66:ce:c6:43:73:1a:85:f4:5f:b9:53:5b:29: + f3:c3:23:1f:0c:20:95:11:50:71:b2:f6:01:23:3f: + 66:0f:5c:43:c2:90:fb:e5:98:73:98:e9:38:bb:1f: + 1b:89:97:1e:dc:d7:98:07:68:32:ec:da:1d:69:0b: + e2:df:40:fb:64:52:e5:e9:40:27:b0:ca:73:21:51: + f6:8f:00:20:c0:2b:1a:d4:01:c2:32:38:9d:d1:8d: + 88:71:46:a9:42:0d:ee:3b:1c:88:db:27:69:49:f9: + 60:34:70:61:3d:60:df:7e:e4:e1:1d:c6:16:89:05: + ba:31:06:eb:88:b5:78:94:5d:8c:9d:88:fe:f2:c2: + 80:a1:04:15:d3:84:85:d3:aa:5a:1d:53:5c:f8:57: + ae:61 + Exponent: 65537 (0x10001) + Signature Algorithm: sha1WithRSAEncryption + 14:7e:ca:c3:fc:93:60:9f:80:e0:65:2e:ef:41:2d:f9:af:77: + da:6d:e2:e0:11:70:17:fb:e5:67:4c:f0:ad:39:ec:96:ef:fe: + d5:95:94:70:e5:52:31:68:63:8c:ea:b3:a1:8e:02:e2:91:4b: + a8:8c:07:86:fd:80:98:a2:b1:90:2b:9c:2e:ab:f4:73:9d:8f: + fd:31:b9:8f:fe:6c:af:d6:bf:72:44:89:08:93:19:ef:2b:c3: + 7c:ab:ba:bc:57:ca:f1:17:e4:e8:81:40:ca:65:df:84:be:10: + 2c:42:46:af:d2:e0:0d:df:5d:56:53:65:13:e0:20:55:b4:ee: + cd:5e:b5:c4:97:1d:3e:a6:c1:9c:7e:b8:87:ee:64:78:a5:59: + e5:b2:79:47:9a:8e:59:fa:c4:18:ea:27:fd:a2:d5:76:d0:ae: + d9:05:f6:0e:23:ca:7d:66:a1:ba:18:67:f5:6d:bb:51:5a:f5: + 52:e9:17:bb:63:15:24:b4:61:25:9f:d9:9c:89:58:93:9a:c3: + 74:55:72:3e:f9:ff:ef:54:7d:e8:28:78:ba:3c:c7:15:ba:b9: + c6:e3:8c:61:cb:a9:ed:8d:07:16:0d:8d:f6:1c:36:11:69:08: + b8:45:7d:fc:fd:d1:ab:2d:9b:4e:9c:dd:11:78:50:c7:87:9f: + 4a:24:9c:a0 +-----BEGIN CERTIFICATE----- +MIIDwjCCAqoCCQC/fsM6E2I1ojANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMC +VVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlQYWxvIEFsdG8xFTATBgNVBAoTDE9w +ZW4gdlN3aXRjaDEfMB0GA1UECxMWT3BlbiB2U3dpdGNoIGNlcnRpZmllcjE6MDgG +A1UEAxMxb3ZzY2xpZW50IGlkOjU4MTQ5N2E1LWJjMDAtNGVjYy1iNzkwLTU3NTZj +ZWUxNmE0ODAeFw0xMzA2MjgxNzAyMTlaFw0xOTA2MjgxNzAyMTlaMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVBhbG8gQWx0bzEVMBMGA1UE +ChMMT3BlbiB2U3dpdGNoMR8wHQYDVQQLExZPcGVuIHZTd2l0Y2ggY2VydGlmaWVy +MTowOAYDVQQDEzFvdnNjbGllbnQgaWQ6NTgxNDk3YTUtYmMwMC00ZWNjLWI3OTAt +NTc1NmNlZTE2YTQ4MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6Kfb +Cm3AFkoUlh10kRVkP64qVL4q/hAUmnM52Fh0f6vV8jmqmid8MYL4dEJGjcU7QlVS +vnV/pbHs1SmfYg7eMScrlR8kDcqMSDCWn7q3nevBvZMF49jKZlrpy6V6Oo0n4gWd +iPyp769HTGbOxkNzGoX0X7lTWynzwyMfDCCVEVBxsvYBIz9mD1xDwpD75ZhzmOk4 +ux8biZce3NeYB2gy7NodaQvi30D7ZFLl6UAnsMpzIVH2jwAgwCsa1AHCMjid0Y2I +cUapQg3uOxyI2ydpSflgNHBhPWDffuThHcYWiQW6MQbriLV4lF2MnYj+8sKAoQQV +04SF06paHVNc+FeuYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAUfsrD/JNgn4Dg +ZS7vQS35r3fabeLgEXAX++VnTPCtOeyW7/7VlZRw5VIxaGOM6rOhjgLikUuojAeG +/YCYorGQK5wuq/RznY/9MbmP/myv1r9yRIkIkxnvK8N8q7q8V8rxF+TogUDKZd+E +vhAsQkav0uAN311WU2UT4CBVtO7NXrXElx0+psGcfriH7mR4pVnlsnlHmo5Z+sQY +6if9otV20K7ZBfYOI8p9ZqG6GGf1bbtRWvVS6Re7YxUktGEln9mciViTmsN0VXI+ ++f/vVH3oKHi6PMcVurnG44xhy6ntjQcWDY32HDYRaQi4RX38/dGrLZtOnN0ReFDH +h59KJJyg +-----END CERTIFICATE----- +''' + +PEM_ENCODED = '''-----BEGIN CERTIFICATE----- +MIIDwjCCAqoCCQC/fsM6E2I1ojANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMC +VVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlQYWxvIEFsdG8xFTATBgNVBAoTDE9w +ZW4gdlN3aXRjaDEfMB0GA1UECxMWT3BlbiB2U3dpdGNoIGNlcnRpZmllcjE6MDgG +A1UEAxMxb3ZzY2xpZW50IGlkOjU4MTQ5N2E1LWJjMDAtNGVjYy1iNzkwLTU3NTZj +ZWUxNmE0ODAeFw0xMzA2MjgxNzAyMTlaFw0xOTA2MjgxNzAyMTlaMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVBhbG8gQWx0bzEVMBMGA1UE +ChMMT3BlbiB2U3dpdGNoMR8wHQYDVQQLExZPcGVuIHZTd2l0Y2ggY2VydGlmaWVy +MTowOAYDVQQDEzFvdnNjbGllbnQgaWQ6NTgxNDk3YTUtYmMwMC00ZWNjLWI3OTAt +NTc1NmNlZTE2YTQ4MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6Kfb +Cm3AFkoUlh10kRVkP64qVL4q/hAUmnM52Fh0f6vV8jmqmid8MYL4dEJGjcU7QlVS +vnV/pbHs1SmfYg7eMScrlR8kDcqMSDCWn7q3nevBvZMF49jKZlrpy6V6Oo0n4gWd +iPyp769HTGbOxkNzGoX0X7lTWynzwyMfDCCVEVBxsvYBIz9mD1xDwpD75ZhzmOk4 +ux8biZce3NeYB2gy7NodaQvi30D7ZFLl6UAnsMpzIVH2jwAgwCsa1AHCMjid0Y2I +cUapQg3uOxyI2ydpSflgNHBhPWDffuThHcYWiQW6MQbriLV4lF2MnYj+8sKAoQQV +04SF06paHVNc+FeuYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAUfsrD/JNgn4Dg +ZS7vQS35r3fabeLgEXAX++VnTPCtOeyW7/7VlZRw5VIxaGOM6rOhjgLikUuojAeG +/YCYorGQK5wuq/RznY/9MbmP/myv1r9yRIkIkxnvK8N8q7q8V8rxF+TogUDKZd+E +vhAsQkav0uAN311WU2UT4CBVtO7NXrXElx0+psGcfriH7mR4pVnlsnlHmo5Z+sQY +6if9otV20K7ZBfYOI8p9ZqG6GGf1bbtRWvVS6Re7YxUktGEln9mciViTmsN0VXI+ ++f/vVH3oKHi6PMcVurnG44xhy6ntjQcWDY32HDYRaQi4RX38/dGrLZtOnN0ReFDH +h59KJJyg +-----END CERTIFICATE-----''' + +BAD_CERT = ''' NO MARKERS ''' +TO_PATCH = [ + "apt_install", + "log", + "hashlib", +] + + +class OVSHelpersTest(unittest.TestCase): + + def setUp(self): + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + + def _patch(self, method): + _m = patch('charmhelpers.contrib.network.ovs.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + @patch('subprocess.check_output') + def test_get_bridges(self, check_output): + check_output.return_value = b"br1\n br2 " + self.assertEqual(ovs.get_bridges(), ['br1', 'br2']) + check_output.assert_called_once_with(['ovs-vsctl', 'list-br']) + + @patch('subprocess.check_output') + def test_get_bridge_ports(self, check_output): + check_output.return_value = b"p1\n p2 \np3" + self.assertEqual(ovs.get_bridge_ports('br1'), ['p1', 'p2', 'p3']) + check_output.assert_called_once_with( + ['ovs-vsctl', '--', 'list-ports', 'br1']) + + @patch.object(ovs, 'get_bridges') + @patch.object(ovs, 'get_bridge_ports') + def test_get_bridges_and_ports_map(self, get_bridge_ports, get_bridges): + get_bridges.return_value = ['br1', 'br2'] + get_bridge_ports.side_effect = [ + ['p1', 'p2'], + ['p3']] + self.assertEqual(ovs.get_bridges_and_ports_map(), { + 'br1': ['p1', 'p2'], + 'br2': ['p3'], + }) + + @patch('subprocess.check_call') + def test_del_bridge(self, check_call): + ovs.del_bridge('test') + check_call.assert_called_with(["ovs-vsctl", "--", "--if-exists", + "del-br", 'test']) + self.assertTrue(self.log.call_count == 1) + + @patch('subprocess.check_call') + def test_del_bridge_port(self, check_call): + ovs.del_bridge_port('test', 'eth1') + check_call.assert_has_calls([ + call(["ovs-vsctl", "--", "--if-exists", "del-port", + 'test', 'eth1']), + call(['ip', 'link', 'set', 'eth1', 'down']), + call(['ip', 'link', 'set', 'eth1', 'promisc', 'off']) + ]) + self.assertTrue(self.log.call_count == 1) + + @patch.object(ovs, 'port_to_br') + @patch.object(ovs, 'add_bridge_port') + @patch('subprocess.check_call') + def test_add_ovsbridge_linuxbridge(self, check_call, + add_bridge_port, + port_to_br): + port_to_br.return_value = None + with patch_open() as (mock_open, mock_file): + ovs.add_ovsbridge_linuxbridge('br-ex', 'br-eno1', ifdata={ + 'external-ids': {'mycharm': 'br-ex'} + }) + + check_call.assert_called_with(['ifup', 'veth-br-eno1']) + add_bridge_port.assert_called_with( + 'br-ex', 'veth-br-eno1', ifdata={ + 'external-ids': {'mycharm': 'br-ex'} + } + ) + + @patch.object(ovs, 'port_to_br') + @patch.object(ovs, 'add_bridge_port') + @patch('subprocess.check_call') + def test_add_ovsbridge_linuxbridge_already_direct_wired(self, + check_call, + add_bridge_port, + port_to_br): + port_to_br.return_value = 'br-ex' + ovs.add_ovsbridge_linuxbridge('br-ex', 'br-eno1') + check_call.assert_not_called() + add_bridge_port.assert_not_called() + + @patch.object(ovs, 'port_to_br') + @patch.object(ovs, 'add_bridge_port') + @patch('subprocess.check_call') + def test_add_ovsbridge_linuxbridge_longname(self, check_call, + add_bridge_port, + port_to_br): + port_to_br.return_value = None + mock_hasher = MagicMock() + mock_hasher.hexdigest.return_value = '12345678901234578910' + self.hashlib.sha256.return_value = mock_hasher + with patch_open() as (mock_open, mock_file): + ovs.add_ovsbridge_linuxbridge('br-ex', 'br-reallylongname') + + check_call.assert_called_with(['ifup', 'cvb12345678-10']) + add_bridge_port.assert_called_with( + 'br-ex', 'cvb12345678-10', ifdata=None + ) + + @patch('os.path.exists') + def test_is_linuxbridge_interface_false(self, exists): + exists.return_value = False + result = ovs.is_linuxbridge_interface('eno1') + self.assertFalse(result) + + @patch('os.path.exists') + def test_is_linuxbridge_interface_true(self, exists): + exists.return_value = True + result = ovs.is_linuxbridge_interface('eno1') + self.assertTrue(result) + + @patch('subprocess.check_call') + def test_set_manager(self, check_call): + ovs.set_manager('manager') + check_call.assert_called_with(['ovs-vsctl', 'set-manager', + 'ssl:manager']) + self.assertTrue(self.log.call_count == 1) + + @patch('subprocess.check_call') + def test_set_Open_vSwitch_column_value(self, check_call): + ovs.set_Open_vSwitch_column_value('other_config:foo=bar') + check_call.assert_called_with(['ovs-vsctl', 'set', + 'Open_vSwitch', '.', 'other_config:foo=bar']) + self.assertTrue(self.log.call_count == 1) + + @patch('os.path.exists') + def test_get_certificate_good_cert(self, exists): + exists.return_value = True + with patch_open() as (mock_open, mock_file): + mock_file.read.return_value = GOOD_CERT + self.assertEqual(ovs.get_certificate(), PEM_ENCODED) + self.assertTrue(self.log.call_count == 1) + + @patch('os.path.exists') + def test_get_certificate_bad_cert(self, exists): + exists.return_value = True + with patch_open() as (mock_open, mock_file): + mock_file.read.return_value = BAD_CERT + self.assertRaises(RuntimeError, ovs.get_certificate) + self.assertTrue(self.log.call_count == 1) + + @patch('os.path.exists') + def test_get_certificate_missing(self, exists): + exists.return_value = False + self.assertIsNone(ovs.get_certificate()) + self.assertTrue(self.log.call_count == 1) + + @patch('os.path.exists') + @patch.object(ovs, 'service') + def test_full_restart(self, service, exists): + exists.return_value = False + ovs.full_restart() + service.assert_called_with('force-reload-kmod', 'openvswitch-switch') + + @patch('os.path.exists') + @patch.object(ovs, 'service') + def test_full_restart_upstart(self, service, exists): + exists.return_value = True + ovs.full_restart() + service.assert_called_with('start', 'openvswitch-force-reload-kmod') + + @patch('subprocess.check_output') + def test_port_to_br(self, check_output): + check_output.return_value = b'br-ex' + self.assertEqual(ovs.port_to_br('br-lb'), + 'br-ex') + + @patch('subprocess.check_output') + def test_port_to_br_not_found(self, check_output): + check_output.side_effect = subprocess.CalledProcessError(1, 'not found') + self.assertEqual(ovs.port_to_br('br-lb'), None) + + @patch('subprocess.check_call') + def test_enable_ipfix_defaults(self, check_call): + ovs.enable_ipfix('br-int', + '10.5.0.10:4739') + check_call.assert_called_once_with([ + 'ovs-vsctl', 'set', 'Bridge', 'br-int', 'ipfix=@i', '--', + '--id=@i', 'create', 'IPFIX', + 'targets="10.5.0.10:4739"', + 'sampling=64', + 'cache_active_timeout=60', + 'cache_max_flows=128', + ]) + + @patch('subprocess.check_call') + def test_enable_ipfix_values(self, check_call): + ovs.enable_ipfix('br-int', + '10.5.0.10:4739', + sampling=120, + cache_max_flows=24, + cache_active_timeout=120) + check_call.assert_called_once_with([ + 'ovs-vsctl', 'set', 'Bridge', 'br-int', 'ipfix=@i', '--', + '--id=@i', 'create', 'IPFIX', + 'targets="10.5.0.10:4739"', + 'sampling=120', + 'cache_active_timeout=120', + 'cache_max_flows=24', + ]) + + @patch('subprocess.check_call') + def test_disable_ipfix(self, check_call): + ovs.disable_ipfix('br-int') + check_call.assert_called_once_with( + ['ovs-vsctl', 'clear', 'Bridge', 'br-int', 'ipfix'] + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ufw.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ufw.py new file mode 100644 index 0000000000000000000000000000000000000000..6eddc021cf0208dd4775c4c27d0ef048295890b0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/network/test_ufw.py @@ -0,0 +1,550 @@ +from __future__ import print_function + +import mock +import os +import subprocess +import unittest + +from charmhelpers.contrib.network import ufw + +__author__ = 'Felipe Reyes ' + + +LSMOD_NO_IP6 = """Module Size Used by +raid1 39533 1 +psmouse 106548 0 +raid0 17842 0 +ahci 34062 5 +multipath 13145 0 +r8169 71471 0 +libahci 32424 1 ahci +mii 13934 1 r8169 +linear 12894 0 +""" +LSMOD_IP6 = """Module Size Used by +xt_hl 12521 0 +ip6_tables 27026 0 +ip6t_rt 13537 0 +nf_conntrack_ipv6 18894 0 +nf_defrag_ipv6 34769 1 nf_conntrack_ipv6 +xt_recent 18457 0 +xt_LOG 17702 0 +xt_limit 12711 0 +""" +DEFAULT_POLICY_OUTPUT = """Default incoming policy changed to 'deny' +(be sure to update your rules accordingly) +""" +DEFAULT_POLICY_OUTPUT_OUTGOING = """Default outgoing policy changed to 'allow' +(be sure to update your rules accordingly) +""" + +UFW_STATUS_NUMBERED = """Status: active + + To Action From + -- ------ ---- +[ 1] 6641/tcp ALLOW IN 10.219.3.86 # charm-ovn-central +[12] 6641/tcp REJECT IN Anywhere +[19] 6644/tcp (v6) REJECT IN Anywhere (v6) # charm-ovn-central + +""" + + +class TestUFW(unittest.TestCase): + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + def test_enable_ok(self, modprobe, check_output, log): + msg = 'Firewall is active and enabled on system startup\n' + check_output.return_value = msg + self.assertTrue(ufw.enable()) + + check_output.assert_any_call(['ufw', 'enable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + log.assert_any_call(msg, level='DEBUG') + log.assert_any_call('ufw enabled', level='INFO') + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + def test_enable_fail(self, modprobe, check_output, log): + msg = 'neneene\n' + check_output.return_value = msg + self.assertFalse(ufw.enable()) + + check_output.assert_any_call(['ufw', 'enable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + log.assert_any_call(msg, level='DEBUG') + log.assert_any_call("ufw couldn't be enabled", level='WARN') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_disable_ok(self, check_output, log, is_enabled): + is_enabled.return_value = True + msg = 'Firewall stopped and disabled on system startup\n' + check_output.return_value = msg + self.assertTrue(ufw.disable()) + + check_output.assert_any_call(['ufw', 'disable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + log.assert_any_call(msg, level='DEBUG') + log.assert_any_call('ufw disabled', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_disable_fail(self, check_output, log, is_enabled): + is_enabled.return_value = True + msg = 'neneene\n' + check_output.return_value = msg + self.assertFalse(ufw.disable()) + + check_output.assert_any_call(['ufw', 'disable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + log.assert_any_call(msg, level='DEBUG') + log.assert_any_call("ufw couldn't be disabled", level='WARN') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_modify_access_ufw_is_disabled(self, check_output, log, + is_enabled): + is_enabled.return_value = False + ufw.modify_access('127.0.0.1') + log.assert_any_call('ufw is disabled, skipping modify_access()', + level='WARN') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_allow(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.modify_access('127.0.0.1') + popen.assert_any_call(['ufw', 'allow', 'from', '127.0.0.1', 'to', + 'any'], stdout=subprocess.PIPE) + log.assert_any_call('ufw allow: ufw allow from 127.0.0.1 to any', + level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_allow_set_proto(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.modify_access('127.0.0.1', proto='udp') + popen.assert_any_call(['ufw', 'allow', 'from', '127.0.0.1', 'to', + 'any', 'proto', 'udp'], stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw allow from 127.0.0.1 ' + 'to any proto udp'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_allow_set_port(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.modify_access('127.0.0.1', port='80') + popen.assert_any_call(['ufw', 'allow', 'from', '127.0.0.1', 'to', + 'any', 'port', '80'], stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw allow from 127.0.0.1 ' + 'to any port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_allow_set_dst(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.modify_access('127.0.0.1', dst='127.0.0.1', port='80') + popen.assert_any_call(['ufw', 'allow', 'from', '127.0.0.1', 'to', + '127.0.0.1', 'port', '80'], + stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw allow from 127.0.0.1 ' + 'to 127.0.0.1 port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_allow_ipv6(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.modify_access('::1', dst='::1', port='80') + popen.assert_any_call(['ufw', 'allow', 'from', '::1', 'to', + '::1', 'port', '80'], + stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw allow from ::1 ' + 'to ::1 port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_with_index(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.modify_access('127.0.0.1', dst='127.0.0.1', port='80', index=1) + popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from', + '127.0.0.1', 'to', '127.0.0.1', 'port', '80'], + stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 ' + 'to 127.0.0.1 port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_prepend(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + ufw.modify_access('127.0.0.1', dst='127.0.0.1', port='80', + prepend=True) + popen.assert_any_call(['ufw', 'prepend', 'allow', 'from', '127.0.0.1', + 'to', '127.0.0.1', 'port', '80'], + stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw prepend allow from 127.0.0.1 ' + 'to 127.0.0.1 port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_comment(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + ufw.modify_access('127.0.0.1', dst='127.0.0.1', port='80', + comment='No comment') + popen.assert_any_call(['ufw', 'allow', 'from', '127.0.0.1', + 'to', '127.0.0.1', 'port', '80', + 'comment', 'No comment'], + stdout=subprocess.PIPE) + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_modify_access_delete_index(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + ufw.modify_access(None, dst=None, action='delete', index=42) + popen.assert_any_call(['ufw', '--force', 'delete', '42'], + stdout=subprocess.PIPE) + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_grant_access(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.grant_access('127.0.0.1', dst='127.0.0.1', port='80') + popen.assert_any_call(['ufw', 'allow', 'from', '127.0.0.1', 'to', + '127.0.0.1', 'port', '80'], + stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw allow from 127.0.0.1 ' + 'to 127.0.0.1 port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_grant_access_with_index(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.grant_access('127.0.0.1', dst='127.0.0.1', port='80', index=1) + popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from', + '127.0.0.1', 'to', '127.0.0.1', 'port', '80'], + stdout=subprocess.PIPE) + log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 ' + 'to 127.0.0.1 port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.Popen') + def test_revoke_access(self, popen, log, is_enabled): + is_enabled.return_value = True + p = mock.Mock() + p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), + 'returncode': 0}) + popen.return_value = p + + ufw.revoke_access('127.0.0.1', dst='127.0.0.1', port='80') + popen.assert_any_call(['ufw', 'delete', 'allow', 'from', '127.0.0.1', + 'to', '127.0.0.1', 'port', '80'], + stdout=subprocess.PIPE) + log.assert_any_call(('ufw delete: ufw delete allow from 127.0.0.1 ' + 'to 127.0.0.1 port 80'), level='DEBUG') + log.assert_any_call('stdout', level='INFO') + + @mock.patch('subprocess.check_output') + def test_service_open(self, check_output): + ufw.service('ssh', 'open') + check_output.assert_any_call(['ufw', 'allow', 'ssh'], + universal_newlines=True) + + @mock.patch('subprocess.check_output') + def test_service_close(self, check_output): + ufw.service('ssh', 'close') + check_output.assert_any_call(['ufw', 'delete', 'allow', 'ssh'], + universal_newlines=True) + + @mock.patch('subprocess.check_output') + def test_service_unsupport_action(self, check_output): + self.assertRaises(ufw.UFWError, ufw.service, 'ssh', 'nenene') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('os.path.isdir') + @mock.patch('subprocess.call') + @mock.patch('subprocess.check_output') + def test_no_ipv6(self, check_output, call, isdir, log, is_enabled): + check_output.return_value = ('Firewall is active and enabled ' + 'on system startup\n') + isdir.return_value = False + call.return_value = 0 + is_enabled.return_value = False + ufw.enable() + + call.assert_called_with(['sed', '-i', 's/IPV6=.*/IPV6=no/g', + '/etc/default/ufw']) + log.assert_any_call('IPv6 support in ufw disabled', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('os.path.isdir') + @mock.patch('subprocess.call') + @mock.patch('subprocess.check_output') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + def test_no_ip6_tables(self, modprobe, check_output, call, isdir, log, + is_enabled): + def c(*args, **kwargs): + if args[0] == ['lsmod']: + return LSMOD_NO_IP6 + elif args[0] == ['modprobe', 'ip6_tables']: + return "" + else: + return 'Firewall is active and enabled on system startup\n' + + check_output.side_effect = c + isdir.return_value = True + call.return_value = 0 + + is_enabled.return_value = False + self.assertTrue(ufw.enable()) + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('os.path.isdir') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + @mock.patch('charmhelpers.contrib.network.ufw.is_module_loaded') + def test_no_ip6_tables_fail_to_load(self, is_module_loaded, + modprobe, isdir, log, is_enabled): + is_module_loaded.return_value = False + + def c(m): + raise subprocess.CalledProcessError(1, ['modprobe', + 'ip6_tables'], + "fail to load ip6_tables") + + modprobe.side_effect = c + isdir.return_value = True + is_enabled.return_value = False + + self.assertRaises(ufw.UFWIPv6Error, ufw.enable) + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('os.path.isdir') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + @mock.patch('charmhelpers.contrib.network.ufw.is_module_loaded') + @mock.patch('subprocess.call') + @mock.patch('subprocess.check_output') + def test_no_ip6_tables_fail_to_load_soft_fail(self, check_output, + call, is_module_loaded, + modprobe, + isdir, log, is_enabled): + is_module_loaded.return_value = False + + def c(m): + raise subprocess.CalledProcessError(1, ['modprobe', + 'ip6_tables'], + "fail to load ip6_tables") + + modprobe.side_effect = c + isdir.return_value = True + call.return_value = 0 + check_output.return_value = ("Firewall is active and enabled on " + "system startup\n") + is_enabled.return_value = False + self.assertTrue(ufw.enable(soft_fail=True)) + call.assert_called_with(['sed', '-i', 's/IPV6=.*/IPV6=no/g', + '/etc/default/ufw']) + log.assert_any_call('IPv6 support in ufw disabled', level='INFO') + + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('os.path.isdir') + @mock.patch('subprocess.call') + @mock.patch('subprocess.check_output') + def test_no_ipv6_failed_disabling_ufw(self, check_output, call, isdir, + log, is_enabled): + check_output.return_value = ('Firewall is active and enabled ' + 'on system startup\n') + isdir.return_value = False + call.return_value = 1 + is_enabled.return_value = False + self.assertRaises(ufw.UFWError, ufw.enable) + + call.assert_called_with(['sed', '-i', 's/IPV6=.*/IPV6=no/g', + '/etc/default/ufw']) + log.assert_any_call("Couldn't disable IPv6 support in ufw", + level="ERROR") + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') + @mock.patch('os.path.isdir') + @mock.patch('subprocess.check_output') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + def test_with_ipv6(self, modprobe, check_output, isdir, is_enabled, log): + def c(*args, **kwargs): + if args[0] == ['lsmod']: + return LSMOD_IP6 + else: + return 'Firewall is active and enabled on system startup\n' + + check_output.side_effect = c + is_enabled.return_value = False + isdir.return_value = True + ufw.enable() + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_change_default_policy(self, check_output, log): + check_output.return_value = DEFAULT_POLICY_OUTPUT + self.assertTrue(ufw.default_policy()) + check_output.asser_any_call(['ufw', 'default', 'deny', 'incoming']) + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_change_default_policy_allow_outgoing(self, check_output, log): + check_output.return_value = DEFAULT_POLICY_OUTPUT_OUTGOING + self.assertTrue(ufw.default_policy('allow', 'outgoing')) + check_output.asser_any_call(['ufw', 'default', 'allow', 'outgoing']) + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_change_default_policy_unexpected_output(self, check_output, log): + check_output.return_value = "asdf" + self.assertFalse(ufw.default_policy()) + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_change_default_policy_wrong_policy(self, check_output, log): + self.assertRaises(ufw.UFWError, ufw.default_policy, 'asdf') + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + def test_change_default_policy_wrong_direction(self, check_output, log): + self.assertRaises(ufw.UFWError, ufw.default_policy, 'allow', 'asdf') + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + def test_reload_ok(self, modprobe, check_output, log): + msg = 'Firewall reloaded\n' + check_output.return_value = msg + self.assertTrue(ufw.reload()) + + check_output.assert_any_call(['ufw', 'reload'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + log.assert_any_call(msg, level='DEBUG') + log.assert_any_call('ufw reloaded', level='INFO') + + @mock.patch('charmhelpers.core.hookenv.log') + @mock.patch('subprocess.check_output') + @mock.patch('charmhelpers.contrib.network.ufw.modprobe') + def test_reload_fail(self, modprobe, check_output, log): + msg = 'This did not work\n' + check_output.return_value = msg + self.assertFalse(ufw.reload()) + + check_output.assert_any_call(['ufw', 'reload'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + log.assert_any_call(msg, level='DEBUG') + log.assert_any_call("ufw couldn't be reloaded", level='WARN') + + def test_status(self): + with mock.patch('subprocess.check_output') as check_output: + check_output.return_value = UFW_STATUS_NUMBERED + expect = { + 1: {'to': '6641/tcp', 'action': 'allow in', + 'from': '10.219.3.86', 'ipv6': False, + 'comment': 'charm-ovn-central'}, + 12: {'to': '6641/tcp', 'action': 'reject in', + 'from': 'any', 'ipv6': False, + 'comment': ''}, + 19: {'to': '6644/tcp', 'action': 'reject in', + 'from': 'any', 'ipv6': True, + 'comment': 'charm-ovn-central'}, + } + n_rules = 0 + for n, r in ufw.status(): + self.assertDictEqual(r, expect[n]) + n_rules += 1 + self.assertEquals(n_rules, 3) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/ha/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/ha/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/ha/test_ha_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/ha/test_ha_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a83963211d1079e0237492a1fb162046fbf52b49 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/ha/test_ha_utils.py @@ -0,0 +1,445 @@ +from mock import patch +import unittest +import json + +from charmhelpers.contrib.openstack.ha import utils as ha + +IFACE_LOOKUPS = { + '10.5.100.1': 'eth1', + 'ffff::1': 'eth1', + 'ffaa::1': 'eth2', +} + +NETMASK_LOOKUPS = { + '10.5.100.1': '255.255.255.0', + 'ffff::1': '64', + 'ffaa::1': '32', +} + + +class HATests(unittest.TestCase): + def setUp(self): + super(HATests, self).setUp() + [self._patch(m) for m in [ + 'charm_name', + 'config', + 'relation_set', + 'resolve_address', + 'status_set', + 'get_hacluster_config', + 'get_iface_for_address', + 'get_netmask_for_address', + ]] + self.resources = {'res_test_haproxy': 'lsb:haproxy'} + self.resource_params = {'res_test_haproxy': 'op monitor interval="5s"'} + self.conf = {} + self.config.side_effect = lambda key: self.conf.get(key) + self.maxDiff = None + self.get_iface_for_address.side_effect = \ + lambda x: IFACE_LOOKUPS.get(x) + self.get_netmask_for_address.side_effect = \ + lambda x: NETMASK_LOOKUPS.get(x) + + def _patch(self, method): + _m = patch.object(ha, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + @patch.object(ha, 'log', lambda *args, **kwargs: None) + @patch.object(ha, 'assert_charm_supports_dns_ha') + def test_update_dns_ha_resource_params_none(self, + assert_charm_supports_dns_ha): + self.conf = { + 'os-admin-hostname': None, + 'os-internal-hostname': None, + 'os-public-hostname': None, + } + + with self.assertRaises(ha.DNSHAException): + ha.update_dns_ha_resource_params( + relation_id='ha:1', + resources=self.resources, + resource_params=self.resource_params) + + @patch.object(ha, 'log', lambda *args, **kwargs: None) + @patch.object(ha, 'assert_charm_supports_dns_ha') + def test_update_dns_ha_resource_params_one(self, + assert_charm_supports_dns_ha): + EXPECTED_RESOURCES = {'res_test_public_hostname': 'ocf:maas:dns', + 'res_test_haproxy': 'lsb:haproxy'} + EXPECTED_RESOURCE_PARAMS = { + 'res_test_public_hostname': ('params fqdn="test.maas" ' + 'ip_address="10.0.0.1"'), + 'res_test_haproxy': 'op monitor interval="5s"'} + + self.conf = { + 'os-admin-hostname': None, + 'os-internal-hostname': None, + 'os-public-hostname': 'test.maas', + } + + self.charm_name.return_value = 'test' + self.resolve_address.return_value = '10.0.0.1' + ha.update_dns_ha_resource_params(relation_id='ha:1', + resources=self.resources, + resource_params=self.resource_params) + self.assertEqual(self.resources, EXPECTED_RESOURCES) + self.assertEqual(self.resource_params, EXPECTED_RESOURCE_PARAMS) + self.relation_set.assert_called_with( + groups={'grp_test_hostnames': 'res_test_public_hostname'}, + relation_id='ha:1') + + @patch.object(ha, 'log', lambda *args, **kwargs: None) + @patch.object(ha, 'assert_charm_supports_dns_ha') + def test_update_dns_ha_resource_params_all(self, + assert_charm_supports_dns_ha): + EXPECTED_RESOURCES = {'res_test_admin_hostname': 'ocf:maas:dns', + 'res_test_int_hostname': 'ocf:maas:dns', + 'res_test_public_hostname': 'ocf:maas:dns', + 'res_test_haproxy': 'lsb:haproxy'} + EXPECTED_RESOURCE_PARAMS = { + 'res_test_admin_hostname': ('params fqdn="test.admin.maas" ' + 'ip_address="10.0.0.1"'), + 'res_test_int_hostname': ('params fqdn="test.internal.maas" ' + 'ip_address="10.0.0.1"'), + 'res_test_public_hostname': ('params fqdn="test.public.maas" ' + 'ip_address="10.0.0.1"'), + 'res_test_haproxy': 'op monitor interval="5s"'} + + self.conf = { + 'os-admin-hostname': 'test.admin.maas', + 'os-internal-hostname': 'test.internal.maas', + 'os-public-hostname': 'test.public.maas', + } + + self.charm_name.return_value = 'test' + self.resolve_address.return_value = '10.0.0.1' + ha.update_dns_ha_resource_params(relation_id='ha:1', + resources=self.resources, + resource_params=self.resource_params) + self.assertEqual(self.resources, EXPECTED_RESOURCES) + self.assertEqual(self.resource_params, EXPECTED_RESOURCE_PARAMS) + self.relation_set.assert_called_with( + groups={'grp_test_hostnames': + ('res_test_admin_hostname ' + 'res_test_int_hostname ' + 'res_test_public_hostname')}, + relation_id='ha:1') + + @patch.object(ha, 'lsb_release') + def test_assert_charm_supports_dns_ha(self, lsb_release): + lsb_release.return_value = {'DISTRIB_RELEASE': '16.04'} + self.assertTrue(ha.assert_charm_supports_dns_ha()) + + @patch.object(ha, 'lsb_release') + def test_assert_charm_supports_dns_ha_exception(self, lsb_release): + lsb_release.return_value = {'DISTRIB_RELEASE': '12.04'} + self.assertRaises(ha.DNSHAException, + lambda: ha.assert_charm_supports_dns_ha()) + + @patch.object(ha, 'expected_related_units') + def tests_expect_ha(self, expected_related_units): + expected_related_units.return_value = (x for x in []) + self.conf = {'vip': None, + 'dns-ha': None} + self.assertFalse(ha.expect_ha()) + + expected_related_units.return_value = (x for x in ['hacluster-unit/0', + 'hacluster-unit/1', + 'hacluster-unit/2']) + self.conf = {'vip': None, + 'dns-ha': None} + self.assertTrue(ha.expect_ha()) + + expected_related_units.side_effect = NotImplementedError + self.conf = {'vip': '10.0.0.1', + 'dns-ha': None} + self.assertTrue(ha.expect_ha()) + + self.conf = {'vip': None, + 'dns-ha': True} + self.assertTrue(ha.expect_ha()) + + def test_get_vip_settings(self): + self.assertEqual( + ha.get_vip_settings('10.5.100.1'), + ('eth1', '255.255.255.0', False)) + + def test_get_vip_settings_fallback(self): + self.conf = {'vip_iface': 'eth3', + 'vip_cidr': '255.255.0.0'} + self.assertEqual( + ha.get_vip_settings('192.168.100.1'), + ('eth3', '255.255.0.0', True)) + + def test_update_hacluster_vip_single_vip(self): + self.get_hacluster_config.return_value = { + 'vip': '10.5.100.1' + } + test_data = {'resources': {}, 'resource_params': {}} + expected = { + 'delete_resources': ['res_testservice_eth1_vip'], + 'groups': { + 'grp_testservice_vips': 'res_testservice_242d562_vip' + }, + 'resource_params': { + 'res_testservice_242d562_vip': + ('params ip="10.5.100.1" op monitor ' + 'timeout="20s" interval="10s" depth="0"') + }, + 'resources': { + 'res_testservice_242d562_vip': 'ocf:heartbeat:IPaddr2' + } + } + ha.update_hacluster_vip('testservice', test_data) + self.assertEqual(test_data, expected) + + def test_update_hacluster_vip_single_vip_fallback(self): + self.get_hacluster_config.return_value = { + 'vip': '10.5.100.1' + } + test_data = {'resources': {}, 'resource_params': {}} + expected = { + 'delete_resources': ['res_testservice_eth1_vip'], + 'groups': { + 'grp_testservice_vips': 'res_testservice_242d562_vip' + }, + 'resource_params': { + 'res_testservice_242d562_vip': + ('params ip="10.5.100.1" op monitor ' + 'timeout="20s" interval="10s" depth="0"') + }, + 'resources': { + 'res_testservice_242d562_vip': 'ocf:heartbeat:IPaddr2' + } + } + ha.update_hacluster_vip('testservice', test_data) + self.assertEqual(test_data, expected) + + def test_update_hacluster_config_vip(self): + self.get_iface_for_address.side_effect = lambda x: None + self.get_netmask_for_address.side_effect = lambda x: None + self.conf = {'vip_iface': 'eth1', + 'vip_cidr': '255.255.255.0'} + self.get_hacluster_config.return_value = { + 'vip': '10.5.100.1' + } + test_data = {'resources': {}, 'resource_params': {}} + expected = { + 'delete_resources': ['res_testservice_eth1_vip'], + 'groups': { + 'grp_testservice_vips': 'res_testservice_242d562_vip' + }, + 'resource_params': { + 'res_testservice_242d562_vip': ( + 'params ip="10.5.100.1" cidr_netmask="255.255.255.0" ' + 'nic="eth1" op monitor timeout="20s" ' + 'interval="10s" depth="0"') + + }, + 'resources': { + 'res_testservice_242d562_vip': 'ocf:heartbeat:IPaddr2' + } + } + ha.update_hacluster_vip('testservice', test_data) + self.assertEqual(test_data, expected) + + def test_update_hacluster_vip_multiple_vip(self): + self.get_hacluster_config.return_value = { + 'vip': '10.5.100.1 ffff::1 ffaa::1' + } + test_data = {'resources': {}, 'resource_params': {}} + expected = { + 'groups': { + 'grp_testservice_vips': ('res_testservice_242d562_vip ' + 'res_testservice_856d56f_vip ' + 'res_testservice_f563c5d_vip') + }, + 'delete_resources': ['res_testservice_eth1_vip', + 'res_testservice_eth1_vip_ipv6addr', + 'res_testservice_eth2_vip'], + 'resource_params': { + 'res_testservice_242d562_vip': + ('params ip="10.5.100.1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + 'res_testservice_856d56f_vip': + ('params ipv6addr="ffff::1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + 'res_testservice_f563c5d_vip': + ('params ipv6addr="ffaa::1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + }, + 'resources': { + 'res_testservice_242d562_vip': 'ocf:heartbeat:IPaddr2', + 'res_testservice_856d56f_vip': 'ocf:heartbeat:IPv6addr', + 'res_testservice_f563c5d_vip': 'ocf:heartbeat:IPv6addr', + } + } + ha.update_hacluster_vip('testservice', test_data) + self.assertEqual(test_data, expected) + + def test_generate_ha_relation_data_haproxy_disabled(self): + self.get_hacluster_config.return_value = { + 'vip': '10.5.100.1 ffff::1 ffaa::1' + } + extra_settings = { + 'colocations': {'vip_cauth': 'inf: res_nova_cauth grp_nova_vips'}, + 'init_services': {'res_nova_cauth': 'nova-cauth'}, + 'delete_resources': ['res_ceilometer_polling'], + 'groups': {'grp_testservice_wombles': 'res_testservice_orinoco'}, + } + expected = { + 'colocations': {'vip_cauth': 'inf: res_nova_cauth grp_nova_vips'}, + 'groups': { + 'grp_testservice_vips': ('res_testservice_242d562_vip ' + 'res_testservice_856d56f_vip ' + 'res_testservice_f563c5d_vip'), + 'grp_testservice_wombles': 'res_testservice_orinoco' + }, + 'resource_params': { + 'res_testservice_242d562_vip': + ('params ip="10.5.100.1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + 'res_testservice_856d56f_vip': + ('params ipv6addr="ffff::1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + 'res_testservice_f563c5d_vip': + ('params ipv6addr="ffaa::1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + }, + 'resources': { + 'res_testservice_242d562_vip': 'ocf:heartbeat:IPaddr2', + 'res_testservice_856d56f_vip': 'ocf:heartbeat:IPv6addr', + 'res_testservice_f563c5d_vip': 'ocf:heartbeat:IPv6addr', + }, + 'clones': {}, + 'init_services': { + 'res_nova_cauth': 'nova-cauth' + }, + 'delete_resources': ["res_ceilometer_polling", + "res_testservice_eth1_vip", + "res_testservice_eth1_vip_ipv6addr", + "res_testservice_eth2_vip"], + } + expected = { + 'json_{}'.format(k): json.dumps(v, **ha.JSON_ENCODE_OPTIONS) + for k, v in expected.items() if v + } + self.assertEqual( + ha.generate_ha_relation_data('testservice', + haproxy_enabled=False, + extra_settings=extra_settings), + expected) + + def test_generate_ha_relation_data(self): + self.get_hacluster_config.return_value = { + 'vip': '10.5.100.1 ffff::1 ffaa::1' + } + extra_settings = { + 'colocations': {'vip_cauth': 'inf: res_nova_cauth grp_nova_vips'}, + 'init_services': {'res_nova_cauth': 'nova-cauth'}, + 'delete_resources': ['res_ceilometer_polling'], + 'groups': {'grp_testservice_wombles': 'res_testservice_orinoco'}, + } + expected = { + 'colocations': {'vip_cauth': 'inf: res_nova_cauth grp_nova_vips'}, + 'groups': { + 'grp_testservice_vips': ('res_testservice_242d562_vip ' + 'res_testservice_856d56f_vip ' + 'res_testservice_f563c5d_vip'), + 'grp_testservice_wombles': 'res_testservice_orinoco' + }, + 'resource_params': { + 'res_testservice_242d562_vip': + ('params ip="10.5.100.1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + 'res_testservice_856d56f_vip': + ('params ipv6addr="ffff::1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + 'res_testservice_f563c5d_vip': + ('params ipv6addr="ffaa::1" op monitor ' + 'timeout="20s" interval="10s" depth="0"'), + 'res_testservice_haproxy': + ('meta migration-threshold="INFINITY" failure-timeout="5s" ' + 'op monitor interval="5s"'), + }, + 'resources': { + 'res_testservice_242d562_vip': 'ocf:heartbeat:IPaddr2', + 'res_testservice_856d56f_vip': 'ocf:heartbeat:IPv6addr', + 'res_testservice_f563c5d_vip': 'ocf:heartbeat:IPv6addr', + 'res_testservice_haproxy': 'lsb:haproxy', + }, + 'clones': { + 'cl_testservice_haproxy': 'res_testservice_haproxy', + }, + 'init_services': { + 'res_testservice_haproxy': 'haproxy', + 'res_nova_cauth': 'nova-cauth' + }, + 'delete_resources': ["res_ceilometer_polling", + "res_testservice_eth1_vip", + "res_testservice_eth1_vip_ipv6addr", + "res_testservice_eth2_vip"], + } + expected = { + 'json_{}'.format(k): json.dumps(v, **ha.JSON_ENCODE_OPTIONS) + for k, v in expected.items() if v + } + self.assertEqual( + ha.generate_ha_relation_data('testservice', + extra_settings=extra_settings), + expected) + + @patch.object(ha, 'log') + @patch.object(ha, 'assert_charm_supports_dns_ha') + def test_generate_ha_relation_data_dns_ha(self, + assert_charm_supports_dns_ha, + log): + self.get_hacluster_config.return_value = { + 'vip': '10.5.100.1 ffff::1 ffaa::1' + } + self.conf = { + 'os-admin-hostname': 'test.admin.maas', + 'os-internal-hostname': 'test.internal.maas', + 'os-public-hostname': 'test.public.maas', + 'dns-ha': True, + } + self.resolve_address.return_value = '10.0.0.1' + assert_charm_supports_dns_ha.return_value = True + expected = { + 'groups': { + 'grp_testservice_hostnames': ('res_testservice_admin_hostname' + ' res_testservice_int_hostname' + ' res_testservice_public_hostname') + }, + 'resource_params': { + 'res_testservice_admin_hostname': + 'params fqdn="test.admin.maas" ip_address="10.0.0.1"', + 'res_testservice_int_hostname': + 'params fqdn="test.internal.maas" ip_address="10.0.0.1"', + 'res_testservice_public_hostname': + 'params fqdn="test.public.maas" ip_address="10.0.0.1"', + 'res_testservice_haproxy': + ('meta migration-threshold="INFINITY" failure-timeout="5s" ' + 'op monitor interval="5s"'), + }, + 'resources': { + 'res_testservice_admin_hostname': 'ocf:maas:dns', + 'res_testservice_int_hostname': 'ocf:maas:dns', + 'res_testservice_public_hostname': 'ocf:maas:dns', + 'res_testservice_haproxy': 'lsb:haproxy', + }, + 'clones': { + 'cl_testservice_haproxy': 'res_testservice_haproxy', + }, + 'init_services': { + 'res_testservice_haproxy': 'haproxy' + }, + } + expected = { + 'json_{}'.format(k): json.dumps(v, **ha.JSON_ENCODE_OPTIONS) + for k, v in expected.items() if v + } + self.assertEqual(ha.generate_ha_relation_data('testservice'), + expected) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_alternatives.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_alternatives.py new file mode 100644 index 0000000000000000000000000000000000000000..402f0d42088c7e66afd043028216dd5b76186cdc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_alternatives.py @@ -0,0 +1,76 @@ +from testtools import TestCase +from mock import patch + +import charmhelpers.contrib.openstack.alternatives as alternatives + + +NAME = 'test' +SOURCE = '/var/lib/charm/test/test.conf' +TARGET = '/etc/test/test,conf' + + +class AlternativesTestCase(TestCase): + + @patch('subprocess.os.path') + @patch('subprocess.check_call') + def test_new_alternative(self, _check, _path): + _path.exists.return_value = False + alternatives.install_alternative(NAME, + TARGET, + SOURCE) + _check.assert_called_with( + ['update-alternatives', '--force', '--install', + TARGET, NAME, SOURCE, '50'] + ) + + @patch('subprocess.os.path') + @patch('subprocess.check_call') + def test_priority(self, _check, _path): + _path.exists.return_value = False + alternatives.install_alternative(NAME, + TARGET, + SOURCE, 100) + _check.assert_called_with( + ['update-alternatives', '--force', '--install', + TARGET, NAME, SOURCE, '100'] + ) + + @patch('shutil.move') + @patch('subprocess.os.path') + @patch('subprocess.check_call') + def test_new_alternative_existing_file(self, _check, + _path, _move): + _path.exists.return_value = True + _path.islink.return_value = False + alternatives.install_alternative(NAME, + TARGET, + SOURCE) + _check.assert_called_with( + ['update-alternatives', '--force', '--install', + TARGET, NAME, SOURCE, '50'] + ) + _move.assert_called_with(TARGET, '{}.bak'.format(TARGET)) + + @patch('shutil.move') + @patch('subprocess.os.path') + @patch('subprocess.check_call') + def test_new_alternative_existing_link(self, _check, + _path, _move): + _path.exists.return_value = True + _path.islink.return_value = True + alternatives.install_alternative(NAME, + TARGET, + SOURCE) + _check.assert_called_with( + ['update-alternatives', '--force', '--install', + TARGET, NAME, SOURCE, '50'] + ) + _move.assert_not_called() + + @patch('subprocess.check_call') + def test_remove_alternative(self, _check): + alternatives.remove_alternative(NAME, SOURCE) + _check.assert_called_with( + ['update-alternatives', '--remove', + NAME, SOURCE] + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_audits.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_audits.py new file mode 100644 index 0000000000000000000000000000000000000000..930292dc1bcc4a7973e49d6cedfce3aae3d0f5d7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_audits.py @@ -0,0 +1,329 @@ +from testtools import TestCase, skipIf +from mock import patch, MagicMock, call +import six + +import charmhelpers.contrib.openstack.audits as audits +import charmhelpers.contrib.openstack.audits.openstack_security_guide as guide + + +@skipIf(six.PY2, 'Audits only support Python3') +class AuditTestCase(TestCase): + + @patch('charmhelpers.contrib.openstack.audits._audits', {}) + def test_wrapper(self): + variables = { + 'guard_called': False, + 'test_run': False, + } + + def should_run(audit_options): + variables['guard_called'] = True + return True + + @audits.audit(should_run) + def test(options): + variables['test_run'] = True + + audits.run({}) + self.assertTrue(variables['guard_called']) + self.assertTrue(variables['test_run']) + self.assertEqual(audits._audits['test'], audits.Audit(test, (should_run,))) + + @patch('charmhelpers.contrib.openstack.audits._audits', {}) + def test_wrapper_not_run(self): + variables = { + 'guard_called': False, + 'test_run': False, + } + + def should_run(audit_options): + variables['guard_called'] = True + return False + + @audits.audit(should_run) + def test(options): + variables['test_run'] = True + + audits.run({}) + self.assertTrue(variables['guard_called']) + self.assertFalse(variables['test_run']) + self.assertEqual(audits._audits['test'], audits.Audit(test, (should_run,))) + + @patch('charmhelpers.contrib.openstack.audits._audits', {}) + def test_duplicate_audit(self): + def should_run(audit_options): + return True + + @audits.audit(should_run) + def test(options): + pass + + try: + # Again! + # + # Both of the following '#noqa's are to prevent flake8 from + # noticing the duplicate function `test` The intent in this test + # is for the audits.audit to pick up on the duplicate function. + @audits.audit(should_run) # noqa + def test(options): # noqa + pass + except RuntimeError as e: + self.assertEqual("Test name 'test' used more than once", e.args[0]) + return + self.assertTrue(False, "Duplicate audit should raise an exception") + + @patch('charmhelpers.contrib.openstack.audits._audits', {}) + def test_non_callable_filter(self): + try: + # Again! + @audits.audit(3) + def test(options): + pass + except RuntimeError as e: + self.assertEqual("Configuration includes non-callable filters: [3]", e.args[0]) + return + self.assertTrue(False, "Duplicate audit should raise an exception") + + @patch('charmhelpers.contrib.openstack.audits._audits', {}) + def test_exclude_config(self): + variables = { + 'test_run': False, + } + + @audits.audit() + def test(options): + variables['test_run'] = True + + audits.run({'excludes': ['test']}) + self.assertFalse(variables['test_run']) + + +class AuditsTestCase(TestCase): + + @patch('charmhelpers.contrib.openstack.audits.cmp_pkgrevno') + def test_since_package_less(self, _cmp_pkgrevno): + _cmp_pkgrevno.return_value = 1 + + verifier = audits.since_package('test', '12.0.0') + self.assertEqual(verifier(), True) + + @patch('charmhelpers.contrib.openstack.audits.cmp_pkgrevno') + def test_since_package_greater(self, _cmp_pkgrevno): + _cmp_pkgrevno.return_value = -1 + + verifier = audits.since_package('test', '14.0.0') + self.assertEqual(verifier(), False) + + @patch('charmhelpers.contrib.openstack.audits.cmp_pkgrevno') + def test_since_package_equal(self, _cmp_pkgrevno): + _cmp_pkgrevno.return_value = 0 + + verifier = audits.since_package('test', '13.0.0') + self.assertEqual(verifier(), True) + + @patch('charmhelpers.contrib.openstack.utils.get_os_codename_package') + def test_since_openstack_less(self, _get_os_codename_package): + _get_os_codename_package.return_value = "icehouse" + + verifier = audits.since_openstack_release('test', 'mitaka') + self.assertEqual(verifier(), False) + + @patch('charmhelpers.contrib.openstack.utils.get_os_codename_package') + def test_since_openstack_greater(self, _get_os_codename_package): + _get_os_codename_package.return_value = "rocky" + + verifier = audits.since_openstack_release('test', 'queens') + self.assertEqual(verifier(), True) + + @patch('charmhelpers.contrib.openstack.utils.get_os_codename_package') + def test_since_openstack_equal(self, _get_os_codename_package): + _get_os_codename_package.return_value = "mitaka" + + verifier = audits.since_openstack_release('test', 'mitaka') + self.assertEqual(verifier(), True) + + @patch('charmhelpers.contrib.openstack.utils.get_os_codename_package') + def test_before_openstack_less(self, _get_os_codename_package): + _get_os_codename_package.return_value = "icehouse" + + verifier = audits.before_openstack_release('test', 'mitaka') + self.assertEqual(verifier(), True) + + @patch('charmhelpers.contrib.openstack.utils.get_os_codename_package') + def test_before_openstack_greater(self, _get_os_codename_package): + _get_os_codename_package.return_value = "rocky" + + verifier = audits.before_openstack_release('test', 'queens') + self.assertEqual(verifier(), False) + + @patch('charmhelpers.contrib.openstack.utils.get_os_codename_package') + def test_before_openstack_equal(self, _get_os_codename_package): + _get_os_codename_package.return_value = "mitaka" + + verifier = audits.before_openstack_release('test', 'mitaka') + self.assertEqual(verifier(), False) + + @patch('charmhelpers.contrib.openstack.audits.cmp_pkgrevno') + def test_before_package_less(self, _cmp_pkgrevno): + _cmp_pkgrevno.return_value = 1 + + verifier = audits.before_package('test', '12.0.0') + self.assertEqual(verifier(), False) + + @patch('charmhelpers.contrib.openstack.audits.cmp_pkgrevno') + def test_before_package_greater(self, _cmp_pkgrevno): + _cmp_pkgrevno.return_value = -1 + + verifier = audits.before_package('test', '14.0.0') + self.assertEqual(verifier(), True) + + @patch('charmhelpers.contrib.openstack.audits.cmp_pkgrevno') + def test_before_package_equal(self, _cmp_pkgrevno): + _cmp_pkgrevno.return_value = 0 + + verifier = audits.before_package('test', '13.0.0') + self.assertEqual(verifier(), False) + + def test_is_audit_type_empty(self): + verifier = audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide) + self.assertEqual(verifier({}), False) + + def test_is_audit_type(self): + verifier = audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide) + self.assertEqual(verifier({'audit_type': audits.AuditType.OpenStackSecurityGuide}), True) + + +@skipIf(six.PY2, 'Audits only support Python3') +class OpenstackSecurityGuideTestCase(TestCase): + + @patch('configparser.ConfigParser') + def test_internal_config_parser_is_not_strict(self, _config_parser): + parser = MagicMock() + _config_parser.return_value = parser + guide._config_ini('test') + _config_parser.assert_called_with(strict=False) + parser.read.assert_called_with('test') + + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._stat') + def test_internal_validate_file_ownership(self, _stat): + _stat.return_value = guide.Ownership('test_user', 'test_group', '600') + guide._validate_file_ownership('test_user', 'test_group', 'test-file-name') + _stat.assert_called_with('test-file-name') + pass + + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._stat') + def test_internal_validate_file_mode(self, _stat): + _stat.return_value = guide.Ownership('test_user', 'test_group', '600') + guide._validate_file_mode('600', 'test-file-name') + _stat.assert_called_with('test-file-name') + pass + + @patch('os.path.isfile') + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._validate_file_mode') + def test_validate_file_permissions_defaults(self, _validate_mode, _is_file): + _is_file.return_value = True + config = { + 'files': { + 'test': {} + } + } + guide.validate_file_permissions(config) + _validate_mode.assert_called_once_with('600', 'test', False) + + @patch('os.path.isfile') + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._validate_file_mode') + def test_validate_file_permissions(self, _validate_mode, _is_file): + _is_file.return_value = True + config = { + 'files': { + 'test': { + 'mode': '777' + } + } + } + guide.validate_file_permissions(config) + _validate_mode.assert_called_once_with('777', 'test', False) + + @patch('glob.glob') + @patch('os.path.isfile') + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._validate_file_mode') + def test_validate_file_permissions_glob(self, _validate_mode, _is_file, _glob): + _glob.return_value = ['test'] + _is_file.return_value = True + config = { + 'files': { + '*': { + 'mode': '777' + } + } + } + guide.validate_file_permissions(config) + _validate_mode.assert_called_once_with('777', 'test', False) + + @patch('os.path.isfile') + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._validate_file_ownership') + def test_validate_file_ownership_defaults(self, _validate_owner, _is_file): + _is_file.return_value = True + config = { + 'files': { + 'test': {} + } + } + guide.validate_file_ownership(config) + _validate_owner.assert_called_once_with('root', 'root', 'test', False) + + @patch('os.path.isfile') + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._validate_file_ownership') + def test_validate_file_ownership(self, _validate_owner, _is_file): + _is_file.return_value = True + config = { + 'files': { + 'test': { + 'owner': 'test-user', + 'group': 'test-group', + } + } + } + guide.validate_file_ownership(config) + _validate_owner.assert_called_once_with('test-user', 'test-group', 'test', False) + + @patch('glob.glob') + @patch('os.path.isfile') + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._validate_file_ownership') + def test_validate_file_ownership_glob(self, _validate_owner, _is_file, _glob): + _glob.return_value = ['test'] + _is_file.return_value = True + config = { + 'files': { + '*': { + 'owner': 'test-user', + 'group': 'test-group', + } + } + } + guide.validate_file_ownership(config) + _validate_owner.assert_called_once_with('test-user', 'test-group', 'test', False) + + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._config_section') + def test_validate_uses_keystone(self, _config_section): + _config_section.side_effect = [None, { + 'auth_strategy': 'keystone', + }] + guide.validate_uses_keystone({}) + _config_section.assert_has_calls([call({}, 'api'), call({}, 'DEFAULT')]) + + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._config_section') + def test_validate_uses_tls_for_keystone(self, _config_section): + _config_section.return_value = { + 'auth_uri': 'https://10.10.10.10', + } + guide.validate_uses_tls_for_keystone({}) + _config_section.assert_called_with({}, 'keystone_authtoken') + + @patch('charmhelpers.contrib.openstack.audits.openstack_security_guide._config_section') + def test_validate_uses_tls_for_glance(self, _config_section): + _config_section.return_value = { + 'api_servers': 'https://10.10.10.10', + } + guide.validate_uses_tls_for_glance({}) + _config_section.assert_called_with({}, 'glance') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_cert_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_cert_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4117d3942253c9eb34eca69f8762c5aa768d33 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_cert_utils.py @@ -0,0 +1,308 @@ +import json +import mock +import unittest + +import charmhelpers.contrib.openstack.cert_utils as cert_utils + + +class CertUtilsTests(unittest.TestCase): + + def test_CertRequest(self): + cr = cert_utils.CertRequest() + self.assertEqual(cr.entries, []) + self.assertIsNone(cr.hostname_entry) + + @mock.patch.object(cert_utils, 'local_unit', return_value='unit/2') + def test_CertRequest_add_entry(self, local_unit): + cr = cert_utils.CertRequest() + cr.add_entry('admin', 'admin.openstack.local', ['10.10.10.10']) + self.assertEqual( + cr.get_request(), + {'cert_requests': + '{"admin.openstack.local": {"sans": ["10.10.10.10"]}}', + 'unit_name': 'unit_2'}) + + @mock.patch.object(cert_utils, 'local_unit', return_value='unit/2') + @mock.patch.object(cert_utils, 'resolve_network_cidr') + @mock.patch.object(cert_utils, 'get_vip_in_network') + @mock.patch.object(cert_utils, 'get_hostname') + @mock.patch.object(cert_utils, 'unit_get') + def test_CertRequest_add_hostname_cn(self, unit_get, get_hostname, + get_vip_in_network, + resolve_network_cidr, local_unit): + resolve_network_cidr.side_effect = lambda x: x + get_vip_in_network.return_value = '10.1.2.100' + unit_get.return_value = '10.1.2.3' + get_hostname.return_value = 'juju-unit-2' + cr = cert_utils.CertRequest() + cr.add_hostname_cn() + self.assertEqual( + cr.get_request(), + {'cert_requests': + '{"juju-unit-2": {"sans": ["10.1.2.100", "10.1.2.3"]}}', + 'unit_name': 'unit_2'}) + + @mock.patch.object(cert_utils, 'local_unit', return_value='unit/2') + @mock.patch.object(cert_utils, 'resolve_network_cidr') + @mock.patch.object(cert_utils, 'get_vip_in_network') + @mock.patch.object(cert_utils, 'get_hostname') + @mock.patch.object(cert_utils, 'unit_get') + def test_CertRequest_add_hostname_cn_ip(self, unit_get, get_hostname, + get_vip_in_network, + resolve_network_cidr, local_unit): + resolve_network_cidr.side_effect = lambda x: x + get_vip_in_network.return_value = '10.1.2.100' + unit_get.return_value = '10.1.2.3' + get_hostname.return_value = 'juju-unit-2' + cr = cert_utils.CertRequest() + cr.add_hostname_cn() + cr.add_hostname_cn_ip(['10.1.2.4']) + self.assertEqual( + cr.get_request(), + {'cert_requests': + ('{"juju-unit-2": {"sans": ["10.1.2.100", "10.1.2.3", ' + '"10.1.2.4"]}}'), + 'unit_name': 'unit_2'}) + + @mock.patch.object(cert_utils, 'local_unit', return_value='unit/2') + @mock.patch.object(cert_utils, 'resolve_network_cidr') + @mock.patch.object(cert_utils, 'get_vip_in_network') + @mock.patch.object(cert_utils, 'network_get_primary_address') + @mock.patch.object(cert_utils, 'resolve_address') + @mock.patch.object(cert_utils, 'config') + @mock.patch.object(cert_utils, 'get_hostname') + @mock.patch.object(cert_utils, 'unit_get') + def test_get_certificate_request(self, unit_get, get_hostname, + config, resolve_address, + network_get_primary_address, + get_vip_in_network, resolve_network_cidr, + local_unit): + unit_get.return_value = '10.1.2.3' + get_hostname.return_value = 'juju-unit-2' + _config = { + 'os-internal-hostname': 'internal.openstack.local', + 'os-admin-hostname': 'admin.openstack.local', + 'os-public-hostname': 'public.openstack.local', + } + _resolve_address = { + 'int': '10.0.0.2', + 'admin': '10.10.0.2', + 'public': '10.20.0.2', + } + _npa = { + 'internal': '10.0.0.3', + 'admin': '10.10.0.3', + 'public': '10.20.0.3', + } + _vips = { + '10.0.0.0/16': '10.0.0.100', + '10.10.0.0/16': '10.10.0.100', + '10.20.0.0/16': '10.20.0.100', + } + _resolve_nets = { + '10.0.0.3': '10.0.0.0/16', + '10.10.0.3': '10.10.0.0/16', + '10.20.0.3': '10.20.0.0/16', + } + expect = { + 'admin.openstack.local': { + 'sans': ['10.10.0.100', '10.10.0.2', '10.10.0.3']}, + 'internal.openstack.local': { + 'sans': ['10.0.0.100', '10.0.0.2', '10.0.0.3']}, + 'juju-unit-2': {'sans': ['10.1.2.3']}, + 'public.openstack.local': { + 'sans': ['10.20.0.100', '10.20.0.2', '10.20.0.3']}} + self.maxDiff = None + config.side_effect = lambda x: _config.get(x) + get_vip_in_network.side_effect = lambda x: _vips.get(x) + resolve_network_cidr.side_effect = lambda x: _resolve_nets.get(x) + network_get_primary_address.side_effect = lambda x: _npa.get(x) + resolve_address.side_effect = \ + lambda endpoint_type: _resolve_address[endpoint_type] + output = json.loads( + cert_utils.get_certificate_request()['cert_requests']) + self.assertEqual( + output, + expect) + + @mock.patch.object(cert_utils, 'unit_get') + @mock.patch.object(cert_utils.os, 'symlink') + @mock.patch.object(cert_utils.os.path, 'isfile') + @mock.patch.object(cert_utils, 'resolve_address') + @mock.patch.object(cert_utils, 'get_hostname') + def test_create_ip_cert_links(self, get_hostname, resolve_address, isfile, + symlink, unit_get): + unit_get.return_value = '10.1.2.3' + get_hostname.return_value = 'juju-unit-2' + _resolve_address = { + 'int': '10.0.0.2', + 'admin': '10.10.0.2', + 'public': '10.20.0.2', + } + resolve_address.side_effect = \ + lambda endpoint_type: _resolve_address[endpoint_type] + _files = { + '/etc/ssl/cert_juju-unit-2': True, + '/etc/ssl/cert_10.0.0.2': False, + '/etc/ssl/cert_10.10.0.2': True, + '/etc/ssl/cert_10.20.0.2': False, + '/etc/ssl/cert_funky-name': False, + } + isfile.side_effect = lambda x: _files[x] + expected = [ + mock.call('/etc/ssl/cert_juju-unit-2', '/etc/ssl/cert_10.0.0.2'), + mock.call('/etc/ssl/key_juju-unit-2', '/etc/ssl/key_10.0.0.2'), + mock.call('/etc/ssl/cert_juju-unit-2', '/etc/ssl/cert_10.20.0.2'), + mock.call('/etc/ssl/key_juju-unit-2', '/etc/ssl/key_10.20.0.2'), + ] + cert_utils.create_ip_cert_links('/etc/ssl') + symlink.assert_has_calls(expected) + symlink.reset_mock() + cert_utils.create_ip_cert_links( + '/etc/ssl', + custom_hostname_link='funky-name') + expected.extend([ + mock.call('/etc/ssl/cert_juju-unit-2', '/etc/ssl/cert_funky-name'), + mock.call('/etc/ssl/key_juju-unit-2', '/etc/ssl/key_funky-name'), + ]) + symlink.assert_has_calls(expected) + + @mock.patch.object(cert_utils, 'write_file') + def test_install_certs(self, write_file): + certs = { + 'admin.openstack.local': { + 'cert': 'ADMINCERT', + 'key': 'ADMINKEY'}} + cert_utils.install_certs('/etc/ssl', certs, chain='CHAIN') + expected = [ + mock.call( + path='/etc/ssl/cert_admin.openstack.local', + content='ADMINCERT\nCHAIN', + owner='root', group='root', + perms=0o640), + mock.call( + path='/etc/ssl/key_admin.openstack.local', + content='ADMINKEY', + owner='root', group='root', + perms=0o640), + ] + write_file.assert_has_calls(expected) + + @mock.patch.object(cert_utils, 'write_file') + def test_install_certs_ca(self, write_file): + certs = { + 'admin.openstack.local': { + 'cert': 'ADMINCERT', + 'key': 'ADMINKEY'}} + ca = 'MYCA' + cert_utils.install_certs('/etc/ssl', certs, ca) + expected = [ + mock.call( + path='/etc/ssl/cert_admin.openstack.local', + content='ADMINCERT\nMYCA', + owner='root', group='root', + perms=0o640), + mock.call( + path='/etc/ssl/key_admin.openstack.local', + content='ADMINKEY', + owner='root', group='root', + perms=0o640), + ] + write_file.assert_has_calls(expected) + + @mock.patch.object(cert_utils, 'local_unit') + @mock.patch.object(cert_utils, 'create_ip_cert_links') + @mock.patch.object(cert_utils, 'install_certs') + @mock.patch.object(cert_utils, 'install_ca_cert') + @mock.patch.object(cert_utils, 'mkdir') + @mock.patch.object(cert_utils, 'relation_get') + def test_process_certificates(self, relation_get, mkdir, install_ca_cert, + install_certs, create_ip_cert_links, + local_unit): + local_unit.return_value = 'devnull/2' + certs = { + 'admin.openstack.local': { + 'cert': 'ADMINCERT', + 'key': 'ADMINKEY'}} + _relation_info = { + 'keystone_2.processed_requests': json.dumps(certs), + 'chain': 'MYCHAIN', + 'ca': 'ROOTCA', + } + relation_get.return_value = _relation_info + self.assertFalse(cert_utils.process_certificates( + 'myservice', + 'certificates:2', + 'vault/0', + custom_hostname_link='funky-name')) + local_unit.return_value = 'keystone/2' + self.assertTrue(cert_utils.process_certificates( + 'myservice', + 'certificates:2', + 'vault/0', + custom_hostname_link='funky-name')) + install_ca_cert.assert_called_once_with(b'ROOTCA') + install_certs.assert_called_once_with( + '/etc/apache2/ssl/myservice', + {'admin.openstack.local': { + 'key': 'ADMINKEY', 'cert': 'ADMINCERT'}}, + 'MYCHAIN', user='root', group='root') + create_ip_cert_links.assert_called_once_with( + '/etc/apache2/ssl/myservice', + custom_hostname_link='funky-name') + + @mock.patch.object(cert_utils, 'local_unit') + @mock.patch.object(cert_utils, 'related_units') + @mock.patch.object(cert_utils, 'relation_ids') + @mock.patch.object(cert_utils, 'relation_get') + def test_get_requests_for_local_unit(self, relation_get, relation_ids, + related_units, local_unit): + local_unit.return_value = 'rabbitmq-server/2' + relation_ids.return_value = ['certificates:12'] + related_units.return_value = ['vault/0'] + certs = { + 'juju-cd4bb3-5.lxd': { + 'cert': 'BASECERT', + 'key': 'BASEKEY'}, + 'juju-cd4bb3-5.internal': { + 'cert': 'INTERNALCERT', + 'key': 'INTERNALKEY'}} + _relation_info = { + 'rabbitmq-server_2.processed_requests': json.dumps(certs), + 'chain': 'MYCHAIN', + 'ca': 'ROOTCA', + } + relation_get.return_value = _relation_info + self.assertEqual( + cert_utils.get_requests_for_local_unit(), + [{ + 'ca': 'ROOTCA', + 'certs': { + 'juju-cd4bb3-5.lxd': { + 'cert': 'BASECERT', + 'key': 'BASEKEY'}, + 'juju-cd4bb3-5.internal': { + 'cert': 'INTERNALCERT', + 'key': 'INTERNALKEY'}}, + 'chain': 'MYCHAIN'}] + ) + + @mock.patch.object(cert_utils, 'get_requests_for_local_unit') + def test_get_bundle_for_cn(self, get_requests_for_local_unit): + get_requests_for_local_unit.return_value = [{ + 'ca': 'ROOTCA', + 'certs': { + 'juju-cd4bb3-5.lxd': { + 'cert': 'BASECERT', + 'key': 'BASEKEY'}, + 'juju-cd4bb3-5.internal': { + 'cert': 'INTERNALCERT', + 'key': 'INTERNALKEY'}}, + 'chain': 'MYCHAIN'}] + self.assertEqual( + cert_utils.get_bundle_for_cn('juju-cd4bb3-5.internal'), + { + 'ca': 'ROOTCA', + 'cert': 'INTERNALCERT', + 'chain': 'MYCHAIN', + 'key': 'INTERNALKEY'}) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_ip.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_ip.py new file mode 100644 index 0000000000000000000000000000000000000000..e062e7b89e9f4f7e9a1566886dbd5f794a4b5e90 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_ip.py @@ -0,0 +1,174 @@ +from testtools import TestCase +from mock import patch, call, MagicMock + +import charmhelpers.core as ch_core +import charmhelpers.contrib.openstack.ip as ip + +TO_PATCH = [ + 'config', + 'unit_get', + 'get_address_in_network', + 'is_clustered', + 'service_name', + 'network_get_primary_address', + 'resolve_network_cidr', +] + + +class TestConfig(): + + def __init__(self): + self.config = {} + + def set(self, key, value): + self.config[key] = value + + def get(self, key): + return self.config.get(key) + + +class IPTestCase(TestCase): + + def setUp(self): + super(IPTestCase, self).setUp() + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + self.test_config = TestConfig() + self.config.side_effect = self.test_config.get + self.network_get_primary_address.side_effect = [ + NotImplementedError, + ch_core.hookenv.NoNetworkBinding, + ] + + def _patch(self, method): + _m = patch('charmhelpers.contrib.openstack.ip.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def test_resolve_address_default(self): + self.is_clustered.return_value = False + self.unit_get.return_value = 'unit1' + self.get_address_in_network.return_value = 'unit1' + self.assertEquals(ip.resolve_address(), 'unit1') + self.unit_get.assert_called_with('public-address') + calls = [call('os-public-network'), + call('prefer-ipv6')] + self.config.assert_has_calls(calls) + + def test_resolve_address_default_internal(self): + self.is_clustered.return_value = False + self.unit_get.return_value = 'unit1' + self.get_address_in_network.return_value = 'unit1' + self.assertEquals(ip.resolve_address(ip.INTERNAL), 'unit1') + self.unit_get.assert_called_with('private-address') + calls = [call('os-internal-network'), + call('prefer-ipv6')] + self.config.assert_has_calls(calls) + + def test_resolve_address_public_not_clustered(self): + self.is_clustered.return_value = False + self.test_config.set('os-public-network', '192.168.20.0/24') + self.unit_get.return_value = 'unit1' + self.get_address_in_network.return_value = '192.168.20.1' + self.assertEquals(ip.resolve_address(), '192.168.20.1') + self.unit_get.assert_called_with('public-address') + calls = [call('os-public-network'), + call('prefer-ipv6')] + self.config.assert_has_calls(calls) + self.get_address_in_network.assert_called_with( + '192.168.20.0/24', + 'unit1') + + def test_resolve_address_public_clustered(self): + self.is_clustered.return_value = True + self.test_config.set('os-public-network', '192.168.20.0/24') + self.test_config.set('vip', '192.168.20.100 10.5.3.1') + self.assertEquals(ip.resolve_address(), '192.168.20.100') + + def test_resolve_address_default_clustered(self): + self.is_clustered.return_value = True + self.test_config.set('vip', '10.5.3.1') + self.assertEquals(ip.resolve_address(), '10.5.3.1') + self.config.assert_has_calls( + [call('vip'), + call('os-public-network')]) + + def test_resolve_address_public_clustered_inresolvable(self): + self.is_clustered.return_value = True + self.test_config.set('os-public-network', '192.168.20.0/24') + self.test_config.set('vip', '10.5.3.1') + self.assertRaises(ValueError, ip.resolve_address) + + def test_resolve_address_override(self): + self.test_config.set('os-public-hostname', 'public.example.com') + addr = ip.resolve_address() + self.assertEqual('public.example.com', addr) + + @patch.object(ip, '_get_address_override') + def test_resolve_address_no_override(self, _get_address_override): + self.test_config.set('os-public-hostname', 'public.example.com') + self.unit_get.return_value = '10.0.0.1' + addr = ip.resolve_address(override=False) + self.assertFalse(_get_address_override.called) + self.assertEqual('10.0.0.1', addr) + + def test_resolve_address_override_template(self): + self.test_config.set('os-public-hostname', + '{service_name}.example.com') + self.service_name.return_value = 'foo' + addr = ip.resolve_address() + self.assertEqual('foo.example.com', addr) + + @patch.object(ip, 'get_ipv6_addr', lambda *args, **kwargs: ['::1']) + def test_resolve_address_ipv6_fallback(self): + self.test_config.set('prefer-ipv6', True) + self.is_clustered.return_value = False + self.assertEqual(ip.resolve_address(), '::1') + + @patch.object(ip, 'resolve_address') + def test_canonical_url_http(self, resolve_address): + resolve_address.return_value = 'unit1' + configs = MagicMock() + configs.complete_contexts.return_value = [] + self.assertTrue(ip.canonical_url(configs), + 'http://unit1') + + @patch.object(ip, 'resolve_address') + def test_canonical_url_https(self, resolve_address): + resolve_address.return_value = 'unit1' + configs = MagicMock() + configs.complete_contexts.return_value = ['https'] + self.assertTrue(ip.canonical_url(configs), + 'https://unit1') + + @patch.object(ip, 'is_ipv6', lambda *args: True) + @patch.object(ip, 'resolve_address') + def test_canonical_url_ipv6(self, resolve_address): + resolve_address.return_value = 'unit1' + self.assertTrue(ip.canonical_url(None), 'http://[unit1]') + + def test_resolve_address_network_get(self): + self.is_clustered.return_value = False + self.unit_get.return_value = 'unit1' + self.network_get_primary_address.side_effect = None + self.network_get_primary_address.return_value = '10.5.60.1' + self.assertEqual(ip.resolve_address(), '10.5.60.1') + self.unit_get.assert_called_with('public-address') + calls = [call('os-public-network'), + call('prefer-ipv6')] + self.config.assert_has_calls(calls) + self.network_get_primary_address.assert_called_with('public') + + def test_resolve_address_network_get_clustered(self): + self.is_clustered.return_value = True + self.test_config.set('vip', '10.5.60.20 192.168.1.20') + self.network_get_primary_address.side_effect = None + self.network_get_primary_address.return_value = '10.5.60.1' + self.resolve_network_cidr.return_value = '10.5.60.1/24' + self.assertEqual(ip.resolve_address(), '10.5.60.20') + calls = [call('os-public-hostname'), + call('vip'), + call('os-public-network')] + self.config.assert_has_calls(calls) + self.network_get_primary_address.assert_called_with('public') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_keystone_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_keystone_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c7bba25d691b736a781d53c6269986435f114745 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_keystone_utils.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python + +import unittest + +from mock import patch, PropertyMock + +import charmhelpers.contrib.openstack.keystone as keystone + +TO_PATCH = [ + 'apt_install', + "log", + "ERROR", + "IdentityServiceContext", +] + + +class KeystoneTests(unittest.TestCase): + def setUp(self): + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + + def _patch(self, method): + _m = patch('charmhelpers.contrib.openstack.keystone.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def test_get_keystone_manager(self): + manager = keystone.get_keystone_manager( + 'test-endpoint', 2, token="12345" + ) + self.assertTrue(isinstance(manager, keystone.KeystoneManager2)) + + manager = keystone.get_keystone_manager( + 'test-endpoint', 3, token="12345") + + self.assertTrue(isinstance(manager, keystone.KeystoneManager3)) + self.assertRaises(ValueError, keystone.get_keystone_manager, + 'test-endpoint', 4, token="12345") + + def test_resolve_sevice_id_v2(self): + class ServiceList(list): + def __iter__(self): + class Service(object): + _info = { + 'type': 'metering', + 'name': "ceilometer", + 'id': "uuid-uuid", + } + yield Service() + + manager = keystone.get_keystone_manager('test-endpoint', 2, + token="1234") + manager.api.services.list = PropertyMock(return_value=ServiceList()) + self.assertTrue(manager.service_exists(service_name="ceilometer", + service_type="metering")) + self.assertFalse(manager.service_exists(service_name="barbican")) + self.assertFalse(manager.service_exists(service_name="barbican", + service_type="openstack")) + + def test_resolve_sevice_id_v3(self): + class ServiceList(list): + def __iter__(self): + class Service(object): + _info = { + 'type': 'metering', + 'name': "ceilometer", + 'id': "uuid-uuid", + } + yield Service() + + manager = keystone.get_keystone_manager('test-endpoint', 3, + token="12345") + manager.api.services.list = PropertyMock(return_value=ServiceList()) + self.assertTrue(manager.service_exists(service_name="ceilometer", + service_type="metering")) + self.assertFalse(manager.service_exists(service_name="barbican")) + self.assertFalse(manager.service_exists(service_name="barbican", + service_type="openstack")) + + def test_get_api_suffix(self): + self.assertEquals(keystone.get_api_suffix(2), "v2.0") + self.assertEquals(keystone.get_api_suffix(3), "v3") + + def test_format_endpoint(self): + self.assertEquals(keystone.format_endpoint( + "http", "10.0.0.5", "5000", 2), "http://10.0.0.5:5000/v2.0/") + + def test_get_keystone_manager_from_identity_service_context(self): + class FakeIdentityServiceV2(object): + def __call__(self, *args, **kwargs): + return { + "service_protocol": "https", + "service_host": "10.5.0.5", + "service_port": "5000", + "api_version": "2.0", + "admin_user": "amdin", + "admin_password": "admin", + "admin_tenant_name": "admin_tenant" + } + + self.IdentityServiceContext.return_value = FakeIdentityServiceV2() + + manager = keystone.get_keystone_manager_from_identity_service_context() + self.assertIsInstance(manager, keystone.KeystoneManager2) + + class FakeIdentityServiceV3(object): + def __call__(self, *args, **kwargs): + return { + "service_protocol": "https", + "service_host": "10.5.0.5", + "service_port": "5000", + "api_version": "3", + "admin_user": "amdin", + "admin_password": "admin", + "admin_tenant_name": "admin_tenant" + } + + self.IdentityServiceContext.return_value = FakeIdentityServiceV3() + + manager = keystone.get_keystone_manager_from_identity_service_context() + self.assertIsInstance(manager, keystone.KeystoneManager3) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_neutron_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_neutron_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a571f171c5afcb3bbd6589e9c04c3d2a2db5af6c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_neutron_utils.py @@ -0,0 +1,239 @@ +import unittest +from mock import patch +from nose.tools import raises +import charmhelpers.contrib.openstack.neutron as neutron + +TO_PATCH = [ + 'log', + 'config', + 'os_release', + 'check_output', +] + + +class NeutronTests(unittest.TestCase): + def setUp(self): + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + + def _patch(self, method): + _m = patch('charmhelpers.contrib.openstack.neutron.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def test_headers_package(self): + self.check_output.return_value = b'3.13.0-19-generic' + kname = neutron.headers_package() + self.assertEquals(kname, 'linux-headers-3.13.0-19-generic') + + def test_kernel_version(self): + self.check_output.return_value = b'3.13.0-19-generic' + kver_maj, kver_min = neutron.kernel_version() + self.assertEquals((kver_maj, kver_min), (3, 13)) + + @patch.object(neutron, 'kernel_version') + def test_determine_dkms_package_old_kernel(self, _kernel_version): + self.check_output.return_value = b'3.4.0-19-generic' + _kernel_version.return_value = (3, 10) + dkms_package = neutron.determine_dkms_package() + self.assertEquals(dkms_package, ['linux-headers-3.4.0-19-generic', + 'openvswitch-datapath-dkms']) + + @patch.object(neutron, 'kernel_version') + def test_determine_dkms_package_new_kernel(self, _kernel_version): + _kernel_version.return_value = (3, 13) + dkms_package = neutron.determine_dkms_package() + self.assertEquals(dkms_package, []) + + def test_quantum_plugins(self): + self.config.return_value = 'foo' + plugins = neutron.quantum_plugins() + self.assertEquals(plugins['ovs']['services'], + ['quantum-plugin-openvswitch-agent']) + self.assertEquals(plugins['nvp']['services'], []) + + def test_neutron_plugins_preicehouse(self): + self.config.return_value = 'foo' + self.os_release.return_value = 'havana' + plugins = neutron.neutron_plugins() + self.assertEquals(plugins['ovs']['config'], + '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini') + self.assertEquals(plugins['nvp']['services'], []) + + def test_neutron_plugins(self): + self.config.return_value = 'foo' + self.os_release.return_value = 'icehouse' + plugins = neutron.neutron_plugins() + self.assertEquals(plugins['ovs']['config'], + '/etc/neutron/plugins/ml2/ml2_conf.ini') + self.assertEquals(plugins['nvp']['config'], + '/etc/neutron/plugins/vmware/nsx.ini') + self.assertTrue('neutron-plugin-vmware' in + plugins['nvp']['server_packages']) + self.assertEquals(plugins['n1kv']['config'], + '/etc/neutron/plugins/cisco/cisco_plugins.ini') + self.assertEquals(plugins['Calico']['config'], + '/etc/neutron/plugins/ml2/ml2_conf.ini') + self.assertEquals(plugins['plumgrid']['config'], + '/etc/neutron/plugins/plumgrid/plumgrid.ini') + self.assertEquals(plugins['midonet']['config'], + '/etc/neutron/plugins/midonet/midonet.ini') + + self.assertEquals(plugins['nvp']['services'], []) + self.assertEquals(plugins['nsx'], plugins['nvp']) + + self.os_release.return_value = 'kilo' + plugins = neutron.neutron_plugins() + self.assertEquals(plugins['midonet']['driver'], + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + self.assertEquals(plugins['nsx']['config'], + '/etc/neutron/plugins/vmware/nsx.ini') + + self.os_release.return_value = 'liberty' + self.config.return_value = 'mem-1.9' + plugins = neutron.neutron_plugins() + self.assertEquals(plugins['midonet']['driver'], + 'midonet.neutron.plugin_v1.MidonetPluginV2') + self.assertTrue('python-networking-midonet' in + plugins['midonet']['server_packages']) + + self.os_release.return_value = 'mitaka' + self.config.return_value = 'mem-1.9' + plugins = neutron.neutron_plugins() + self.assertEquals(plugins['nsx']['config'], + '/etc/neutron/nsx.ini') + self.assertTrue('python-vmware-nsx' in + plugins['nsx']['server_packages']) + + @patch.object(neutron, 'network_manager') + def test_neutron_plugin_attribute_quantum(self, _network_manager): + self.config.return_value = 'foo' + _network_manager.return_value = 'quantum' + plugins = neutron.neutron_plugin_attribute('ovs', 'services') + self.assertEquals(plugins, ['quantum-plugin-openvswitch-agent']) + + @patch.object(neutron, 'network_manager') + def test_neutron_plugin_attribute_neutron(self, _network_manager): + self.config.return_value = 'foo' + self.os_release.return_value = 'icehouse' + _network_manager.return_value = 'neutron' + plugins = neutron.neutron_plugin_attribute('ovs', 'services') + self.assertEquals(plugins, ['neutron-plugin-openvswitch-agent']) + + @raises(Exception) + @patch.object(neutron, 'network_manager') + def test_neutron_plugin_attribute_foo(self, _network_manager): + _network_manager.return_value = 'foo' + self.assertRaises(Exception, neutron.neutron_plugin_attribute('ovs', 'services')) + + @raises(Exception) + @patch.object(neutron, 'network_manager') + def test_neutron_plugin_attribute_plugin_keyerror(self, _network_manager): + self.config.return_value = 'foo' + _network_manager.return_value = 'quantum' + self.assertRaises(Exception, neutron.neutron_plugin_attribute('foo', 'foo')) + + @patch.object(neutron, 'network_manager') + def test_neutron_plugin_attribute_attr_keyerror(self, _network_manager): + self.config.return_value = 'foo' + _network_manager.return_value = 'quantum' + plugins = neutron.neutron_plugin_attribute('ovs', 'foo') + self.assertEquals(plugins, None) + + @raises(Exception) + def test_network_manager_essex(self): + essex_cases = { + 'quantum': 'quantum', + 'neutron': 'quantum', + 'newhotness': 'newhotness', + } + self.os_release.return_value = 'essex' + for nwmanager in essex_cases: + self.config.return_value = nwmanager + self.assertRaises(Exception, neutron.network_manager()) + + def test_network_manager_folsom(self): + folsom_cases = { + 'quantum': 'quantum', + 'neutron': 'quantum', + 'newhotness': 'newhotness', + } + self.os_release.return_value = 'folsom' + for nwmanager in folsom_cases: + self.config.return_value = nwmanager + renamed_manager = neutron.network_manager() + self.assertEquals(renamed_manager, folsom_cases[nwmanager]) + + def test_network_manager_grizzly(self): + grizzly_cases = { + 'quantum': 'quantum', + 'neutron': 'quantum', + 'newhotness': 'newhotness', + } + self.os_release.return_value = 'grizzly' + for nwmanager in grizzly_cases: + self.config.return_value = nwmanager + renamed_manager = neutron.network_manager() + self.assertEquals(renamed_manager, grizzly_cases[nwmanager]) + + def test_network_manager_havana(self): + havana_cases = { + 'quantum': 'neutron', + 'neutron': 'neutron', + 'newhotness': 'newhotness', + } + self.os_release.return_value = 'havana' + for nwmanager in havana_cases: + self.config.return_value = nwmanager + renamed_manager = neutron.network_manager() + self.assertEquals(renamed_manager, havana_cases[nwmanager]) + + def test_network_manager_icehouse(self): + icehouse_cases = { + 'quantum': 'neutron', + 'neutron': 'neutron', + 'newhotness': 'newhotness', + } + self.os_release.return_value = 'icehouse' + for nwmanager in icehouse_cases: + self.config.return_value = nwmanager + renamed_manager = neutron.network_manager() + self.assertEquals(renamed_manager, icehouse_cases[nwmanager]) + + def test_parse_bridge_mappings(self): + ret = neutron.parse_bridge_mappings(None) + self.assertEqual(ret, {}) + ret = neutron.parse_bridge_mappings("physnet1:br0") + self.assertEqual(ret, {'physnet1': 'br0'}) + ret = neutron.parse_bridge_mappings("physnet1:br0 physnet2:br1") + self.assertEqual(ret, {'physnet1': 'br0', 'physnet2': 'br1'}) + + def test_parse_data_port_mappings(self): + ret = neutron.parse_data_port_mappings(None) + self.assertEqual(ret, {}) + ret = neutron.parse_data_port_mappings('br0:eth0') + self.assertEqual(ret, {'eth0': 'br0'}) + # Back-compat test + ret = neutron.parse_data_port_mappings('eth0', default_bridge='br0') + self.assertEqual(ret, {'eth0': 'br0'}) + # Multiple mappings + ret = neutron.parse_data_port_mappings('br0:eth0 br1:eth1') + self.assertEqual(ret, {'eth0': 'br0', 'eth1': 'br1'}) + # MultMAC mappings + ret = neutron.parse_data_port_mappings('br0:cb:23:ae:72:f2:33 ' + 'br0:fa:16:3e:12:97:8e') + self.assertEqual(ret, {'cb:23:ae:72:f2:33': 'br0', + 'fa:16:3e:12:97:8e': 'br0'}) + + def test_parse_vlan_range_mappings(self): + ret = neutron.parse_vlan_range_mappings(None) + self.assertEqual(ret, {}) + ret = neutron.parse_vlan_range_mappings('physnet1:1001:2000') + self.assertEqual(ret, {'physnet1': ('1001', '2000')}) + ret = neutron.parse_vlan_range_mappings('physnet1:1001:2000 physnet2:2001:3000') + self.assertEqual(ret, {'physnet1': ('1001', '2000'), + 'physnet2': ('2001', '3000')}) + ret = neutron.parse_vlan_range_mappings('physnet1 physnet2:2001:3000') + self.assertEqual(ret, {'physnet1': ('',), + 'physnet2': ('2001', '3000')}) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_openstack_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_openstack_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6358ac3ee653fe6a52c5ec890d0a6f7570df4b1e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_openstack_utils.py @@ -0,0 +1,2340 @@ +import io +import os +import contextlib +import unittest +from copy import copy +from tests.helpers import patch_open, FakeRelation + +from testtools import TestCase +from mock import MagicMock, patch, call + +from charmhelpers.fetch import ubuntu as fetch +from charmhelpers.core.hookenv import WORKLOAD_STATES, flush + +import charmhelpers.contrib.openstack.utils as openstack + +import six + +if not six.PY3: + builtin_open = '__builtin__.open' + builtin_import = '__builtin__.__import__' +else: + builtin_open = 'builtins.open' + builtin_import = 'builtins.__import__' + +FAKE_CODENAME = 'precise' +# mocked return of openstack.lsb_release() +FAKE_RELEASE = { + 'DISTRIB_CODENAME': 'precise', + 'DISTRIB_RELEASE': '12.04', + 'DISTRIB_ID': 'Ubuntu', + 'DISTRIB_DESCRIPTION': '"Ubuntu 12.04"' +} + +FAKE_REPO = { + # liberty patch release + 'neutron-common': { + 'pkg_vers': '2:7.0.1-0ubuntu1', + 'os_release': 'liberty', + 'os_version': '2015.2' + }, + # liberty release version + 'nova-common': { + 'pkg_vers': '2:12.0.0~b1-0ubuntu1', + 'os_release': 'liberty', + 'os_version': '2015.2' + }, + 'nova': { + 'pkg_vers': '2012.2.3-0ubuntu2.1', + 'os_release': 'folsom', + 'os_version': '2012.2' + }, + 'glance-common': { + 'pkg_vers': '2012.1.3+stable-20130423-74b067df-0ubuntu1', + 'os_release': 'essex', + 'os_version': '2012.1' + }, + 'keystone-common': { + 'pkg_vers': '1:2013.1-0ubuntu1.1~cloud0', + 'os_release': 'grizzly', + 'os_version': '2013.1' + }, + # Exercise swift version detection + 'swift-storage': { + 'pkg_vers': '1.8.0-0ubuntu1', + 'os_release': 'grizzly', + 'os_version': '1.8.0' + }, + 'swift-proxy': { + 'pkg_vers': '1.13.1-0ubuntu1', + 'os_release': 'icehouse', + 'os_version': '1.13.1' + }, + 'swift-common': { + 'pkg_vers': '1.10.0~rc1-0ubuntu1', + 'os_release': 'havana', + 'os_version': '1.10.0' + }, + 'swift-mitaka-dev': { + 'pkg_vers': '2.7.1.dev8.201605111703.trusty-0ubuntu1', + 'os_release': 'mitaka', + 'os_version': '2.7.0' + }, + # a package thats available in the cache but is not installed + 'cinder-common': { + 'os_release': 'havana', + 'os_version': '2013.2' + }, + # poorly formed openstack version + 'bad-version': { + 'pkg_vers': '1:2200.1-0ubuntu1.1~cloud0', + 'os_release': None, + 'os_version': None + } +} + +MOUNTS = [ + ['/mnt', '/dev/vdb'] +] + +url = 'deb ' + openstack.CLOUD_ARCHIVE_URL +UCA_SOURCES = [ + ('cloud:precise-folsom/proposed', url + ' precise-proposed/folsom main'), + ('cloud:precise-folsom', url + ' precise-updates/folsom main'), + ('cloud:precise-folsom/updates', url + ' precise-updates/folsom main'), + ('cloud:precise-grizzly/proposed', url + ' precise-proposed/grizzly main'), + ('cloud:precise-grizzly', url + ' precise-updates/grizzly main'), + ('cloud:precise-grizzly/updates', url + ' precise-updates/grizzly main'), + ('cloud:precise-havana/proposed', url + ' precise-proposed/havana main'), + ('cloud:precise-havana', url + ' precise-updates/havana main'), + ('cloud:precise-havana/updates', url + ' precise-updates/havana main'), + ('cloud:precise-icehouse/proposed', + url + ' precise-proposed/icehouse main'), + ('cloud:precise-icehouse', url + ' precise-updates/icehouse main'), + ('cloud:precise-icehouse/updates', url + ' precise-updates/icehouse main'), +] + +# Mock python-dnspython resolver used by get_host_ip() + + +class FakeAnswer(object): + + def __init__(self, ip): + self.ip = ip + + def __str__(self): + return self.ip + + +class FakeResolver(object): + + def __init__(self, ip): + self.ip = ip + + def query(self, hostname, query_type): + if self.ip == '': + return [] + else: + return [FakeAnswer(self.ip)] + + +class FakeReverse(object): + + def from_address(self, address): + return '156.94.189.91.in-addr.arpa' + + +class FakeDNSName(object): + + def __init__(self, dnsname): + pass + + +class FakeDNS(object): + + def __init__(self, ip): + self.resolver = FakeResolver(ip) + self.reversename = FakeReverse() + self.name = MagicMock() + self.name.Name = FakeDNSName + + +class OpenStackHelpersTestCase(TestCase): + + def setUp(self): + super(OpenStackHelpersTestCase, self).setUp() + self.patch(fetch, 'get_apt_dpkg_env', lambda: {}) + + def _apt_cache(self): + # mocks out the apt cache + def cache_get(package): + pkg = MagicMock() + if package in FAKE_REPO and 'pkg_vers' in FAKE_REPO[package]: + pkg.name = package + pkg.current_ver.ver_str = FAKE_REPO[package]['pkg_vers'] + elif (package in FAKE_REPO and + 'pkg_vers' not in FAKE_REPO[package]): + pkg.name = package + pkg.current_ver = None + else: + raise KeyError + return pkg + cache = MagicMock() + cache.__getitem__.side_effect = cache_get + return cache + + @patch.object(openstack, 'filter_missing_packages') + def test_get_installed_semantic_versioned_packages(self, mock_filter): + def _filter_missing_packages(pkgs): + return [x for x in pkgs if x in ['cinder-common']] + mock_filter.side_effect = _filter_missing_packages + self.assertEquals( + openstack.get_installed_semantic_versioned_packages(), + ['cinder-common']) + + @patch('charmhelpers.contrib.openstack.utils.lsb_release') + def test_os_codename_from_install_source(self, mocked_lsb): + """Test mapping install source to OpenStack release name""" + mocked_lsb.return_value = FAKE_RELEASE + + # the openstack release shipped with respective ubuntu/lsb release. + self.assertEquals(openstack.get_os_codename_install_source('distro'), + 'essex') + # proposed pocket + self.assertEquals(openstack.get_os_codename_install_source( + 'distro-proposed'), + 'essex') + self.assertEquals(openstack.get_os_codename_install_source( + 'proposed'), + 'essex') + + # various cloud archive pockets + src = 'cloud:precise-grizzly' + self.assertEquals(openstack.get_os_codename_install_source(src), + 'grizzly') + src = 'cloud:precise-grizzly/proposed' + self.assertEquals(openstack.get_os_codename_install_source(src), + 'grizzly') + + # ppas and full repo urls. + src = 'ppa:openstack-ubuntu-testing/havana-trunk-testing' + self.assertEquals(openstack.get_os_codename_install_source(src), + 'havana') + src = ('deb http://ubuntu-cloud.archive.canonical.com/ubuntu ' + 'precise-havana main') + self.assertEquals(openstack.get_os_codename_install_source(src), + 'havana') + self.assertEquals(openstack.get_os_codename_install_source(None), + '') + + @patch.object(openstack, 'get_os_version_codename') + @patch.object(openstack, 'get_os_codename_install_source') + def test_os_version_from_install_source(self, codename, version): + codename.return_value = 'grizzly' + openstack.get_os_version_install_source('cloud:precise-grizzly') + version.assert_called_with('grizzly') + + @patch('charmhelpers.contrib.openstack.utils.lsb_release') + def test_os_codename_from_bad_install_source(self, mocked_lsb): + """Test mapping install source to OpenStack release name""" + _fake_release = copy(FAKE_RELEASE) + _fake_release['DISTRIB_CODENAME'] = 'natty' + + mocked_lsb.return_value = _fake_release + _e = 'charmhelpers.contrib.openstack.utils.error_out' + with patch(_e) as mocked_err: + openstack.get_os_codename_install_source('distro') + _er = ('Could not derive openstack release for this Ubuntu ' + 'release: natty') + mocked_err.assert_called_with(_er) + + def test_os_codename_from_version(self): + """Test mapping OpenStack numerical versions to code name""" + self.assertEquals(openstack.get_os_codename_version('2013.1'), + 'grizzly') + + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_codename_from_bad_version(self, mocked_error): + """Test mapping a bad OpenStack numerical versions to code name""" + openstack.get_os_codename_version('2014.5.5') + expected_err = ('Could not determine OpenStack codename for ' + 'version 2014.5.5') + mocked_error.assert_called_with(expected_err) + + def test_os_version_from_codename(self): + """Test mapping a OpenStack codename to numerical version""" + self.assertEquals(openstack.get_os_version_codename('folsom'), + '2012.2') + + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_version_from_bad_codename(self, mocked_error): + """Test mapping a bad OpenStack codename to numerical version""" + openstack.get_os_version_codename('foo') + expected_err = 'Could not derive OpenStack version for codename: foo' + mocked_error.assert_called_with(expected_err) + + def test_os_version_swift_from_codename(self): + """Test mapping a swift codename to numerical version""" + self.assertEquals(openstack.get_os_version_codename_swift('liberty'), + '2.5.0') + + def test_get_swift_codename_single_version_kilo(self): + self.assertEquals(openstack.get_swift_codename('2.2.2'), 'kilo') + + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_version_swift_from_bad_codename(self, mocked_error): + """Test mapping a bad swift codename to numerical version""" + openstack.get_os_version_codename_swift('foo') + expected_err = 'Could not derive swift version for codename: foo' + mocked_error.assert_called_with(expected_err) + + def test_get_swift_codename_multiple_versions_liberty(self): + with patch('subprocess.check_output') as _subp: + _subp.return_value = b"... trusty-updates/liberty/main ..." + self.assertEquals(openstack.get_swift_codename('2.5.0'), 'liberty') + + def test_get_swift_codename_multiple_versions_mitaka(self): + with patch('subprocess.check_output') as _subp: + _subp.return_value = b"... trusty-updates/mitaka/main ..." + self.assertEquals(openstack.get_swift_codename('2.5.0'), 'mitaka') + + def test_get_swift_codename_none(self): + self.assertEquals(openstack.get_swift_codename('1.2.3'), None) + + @patch.object(openstack, 'snap_install_requested') + def test_os_codename_from_package(self, mock_snap_install_requested): + """Test deriving OpenStack codename from an installed package""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + for pkg, vers in six.iteritems(FAKE_REPO): + # test fake repo for all "installed" packages + if pkg.startswith('bad-'): + continue + if 'pkg_vers' not in vers: + continue + self.assertEquals(openstack.get_os_codename_package(pkg), + vers['os_release']) + + @patch.object(openstack, 'snap_install_requested') + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_codename_from_bad_package_version(self, mocked_error, + mock_snap_install_requested): + """Test deriving OpenStack codename for a poorly versioned package""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + openstack.get_os_codename_package('bad-version') + _e = ('Could not determine OpenStack codename for version 2200.1') + mocked_error.assert_called_with(_e) + + @patch.object(openstack, 'snap_install_requested') + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_codename_from_bad_package(self, mocked_error, + mock_snap_install_requested): + """Test deriving OpenStack codename from an unavailable package""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + try: + openstack.get_os_codename_package('foo') + except Exception: + # ignore exceptions that raise when error_out is mocked + # and doesn't sys.exit(1) + pass + e = 'Could not determine version of package with no installation '\ + 'candidate: foo' + mocked_error.assert_called_with(e) + + @patch.object(openstack, 'snap_install_requested') + def test_os_codename_from_bad_package_nonfatal( + self, mock_snap_install_requested): + """Test OpenStack codename from an unavailable package is non-fatal""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + self.assertEquals( + None, + openstack.get_os_codename_package('foo', fatal=False) + ) + + @patch.object(openstack, 'snap_install_requested') + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_codename_from_uninstalled_package(self, mock_error, + mock_snap_install_requested): + """Test OpenStack codename from an available but uninstalled pkg""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + try: + openstack.get_os_codename_package('cinder-common', fatal=True) + except Exception: + pass + e = ('Could not determine version of uninstalled package: ' + 'cinder-common') + mock_error.assert_called_with(e) + + @patch.object(openstack, 'snap_install_requested') + def test_os_codename_from_uninstalled_package_nonfatal( + self, mock_snap_install_requested): + """Test OpenStack codename from avail uninstalled pkg is non fatal""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + self.assertEquals( + None, + openstack.get_os_codename_package('cinder-common', fatal=False) + ) + + @patch.object(openstack, 'snap_install_requested') + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_version_from_package(self, mocked_error, + mock_snap_install_requested): + """Test deriving OpenStack version from an installed package""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + for pkg, vers in six.iteritems(FAKE_REPO): + if pkg.startswith('bad-'): + continue + if 'pkg_vers' not in vers: + continue + self.assertEquals(openstack.get_os_version_package(pkg), + vers['os_version']) + + @patch.object(openstack, 'snap_install_requested') + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_os_version_from_bad_package(self, mocked_error, + mock_snap_install_requested): + """Test deriving OpenStack version from an uninstalled package""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + try: + openstack.get_os_version_package('foo') + except Exception: + # ignore exceptions that raise when error_out is mocked + # and doesn't sys.exit(1) + pass + e = 'Could not determine version of package with no installation '\ + 'candidate: foo' + mocked_error.assert_called_with(e) + + @patch.object(openstack, 'snap_install_requested') + def test_os_version_from_bad_package_nonfatal( + self, mock_snap_install_requested): + """Test OpenStack version from an uninstalled package is non-fatal""" + mock_snap_install_requested.return_value = False + with patch.object(openstack, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + self.assertEquals( + None, + openstack.get_os_version_package('foo', fatal=False) + ) + + @patch.object(openstack, 'get_os_codename_package') + @patch('charmhelpers.contrib.openstack.utils.config') + def test_os_release_uncached(self, config, get_cn): + openstack._os_rel = None + get_cn.return_value = 'folsom' + self.assertEquals('folsom', openstack.os_release('nova-common')) + + def test_os_release_cached(self): + openstack._os_rel = 'foo' + self.assertEquals('foo', openstack.os_release('nova-common')) + + @patch.object(openstack, 'juju_log') + @patch('sys.exit') + def test_error_out(self, mocked_exit, juju_log): + """Test erroring out""" + openstack.error_out('Everything broke.') + _log = 'FATAL ERROR: Everything broke.' + juju_log.assert_called_with(_log, level='ERROR') + mocked_exit.assert_called_with(1) + + def test_get_source_and_pgp_key(self): + tests = { + "source|key": ('source', 'key'), + "source|": ('source', None), + "|key": ('', 'key'), + "source": ('source', None), + } + for k, v in six.iteritems(tests): + self.assertEqual(openstack.get_source_and_pgp_key(k), v) + + # These should still work, even though the bulk of the functionality has + # moved to charmhelpers.fetch.add_source() + def test_configure_install_source_distro(self): + """Test configuring installation from distro""" + self.assertIsNone(openstack.configure_installation_source('distro')) + + def test_configure_install_source_ppa(self): + """Test configuring installation source from PPA""" + with patch('subprocess.check_call') as mock: + src = 'ppa:gandelman-a/openstack' + openstack.configure_installation_source(src) + ex_cmd = [ + 'add-apt-repository', '--yes', 'ppa:gandelman-a/openstack'] + mock.assert_called_with(ex_cmd, env={}) + + @patch('subprocess.check_call') + @patch.object(fetch, 'import_key') + def test_configure_install_source_deb_url(self, _import, _spcc): + """Test configuring installation source from deb repo url""" + src = ('deb http://ubuntu-cloud.archive.canonical.com/ubuntu ' + 'precise-havana main|KEYID') + openstack.configure_installation_source(src) + _import.assert_called_with('KEYID') + _spcc.assert_called_once_with( + ['add-apt-repository', '--yes', + 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu ' + 'precise-havana main'], env={}) + + @patch.object(fetch, 'get_distrib_codename') + @patch(builtin_open) + @patch('subprocess.check_call') + def test_configure_install_source_distro_proposed( + self, _spcc, _open, _lsb): + """Test configuring installation source from deb repo url""" + _lsb.return_value = FAKE_CODENAME + _file = MagicMock(spec=io.FileIO) + _open.return_value = _file + openstack.configure_installation_source('distro-proposed') + _file.__enter__().write.assert_called_once_with( + '# Proposed\ndeb http://archive.ubuntu.com/ubuntu ' + 'precise-proposed main universe multiverse restricted\n') + src = ('deb http://archive.ubuntu.com/ubuntu/ precise-proposed ' + 'restricted main multiverse universe') + openstack.configure_installation_source(src) + _spcc.assert_called_once_with( + ['add-apt-repository', '--yes', + 'deb http://archive.ubuntu.com/ubuntu/ precise-proposed ' + 'restricted main multiverse universe'], env={}) + + @patch('charmhelpers.fetch.filter_installed_packages') + @patch('charmhelpers.fetch.apt_install') + @patch.object(openstack, 'error_out') + @patch.object(openstack, 'juju_log') + def test_add_source_cloud_invalid_pocket(self, _log, _out, + apt_install, filter_pkg): + openstack.configure_installation_source("cloud:havana-updates") + _e = ('Invalid Cloud Archive release specified: ' + 'havana-updates on this Ubuntuversion') + _s = _out.call_args[0][0] + self.assertTrue(_s.startswith(_e)) + + @patch.object(fetch, 'filter_installed_packages') + @patch.object(fetch, 'apt_install') + @patch.object(fetch, 'get_distrib_codename') + def test_add_source_cloud_pocket_style(self, get_distrib_codename, + apt_install, filter_pkg): + source = "cloud:precise-updates/havana" + get_distrib_codename.return_value = 'precise' + result = ( + "# Ubuntu Cloud Archive\n" + "deb http://ubuntu-cloud.archive.canonical.com/ubuntu " + "precise-updates/havana main\n") + with patch_open() as (mock_open, mock_file): + openstack.configure_installation_source(source) + mock_file.write.assert_called_with(result) + filter_pkg.assert_called_with(['ubuntu-cloud-keyring']) + + @patch.object(fetch, 'filter_installed_packages') + @patch.object(fetch, 'apt_install') + @patch.object(fetch, 'get_distrib_codename') + def test_add_source_cloud_os_style(self, get_distrib_codename, + apt_install, filter_pkg): + source = "cloud:precise-havana" + get_distrib_codename.return_value = 'precise' + result = ( + "# Ubuntu Cloud Archive\n" + "deb http://ubuntu-cloud.archive.canonical.com/ubuntu " + "precise-updates/havana main\n") + with patch_open() as (mock_open, mock_file): + openstack.configure_installation_source(source) + mock_file.write.assert_called_with(result) + filter_pkg.assert_called_with(['ubuntu-cloud-keyring']) + + @patch.object(fetch, 'filter_installed_packages') + @patch.object(fetch, 'apt_install') + def test_add_source_cloud_distroless_style(self, apt_install, filter_pkg): + source = "cloud:havana" + result = ( + "# Ubuntu Cloud Archive\n" + "deb http://ubuntu-cloud.archive.canonical.com/ubuntu " + "precise-updates/havana main\n") + with patch_open() as (mock_open, mock_file): + openstack.configure_installation_source(source) + mock_file.write.assert_called_with(result) + filter_pkg.assert_called_with(['ubuntu-cloud-keyring']) + + @patch('charmhelpers.fetch.ubuntu.log', lambda *args, **kwargs: None) + @patch('charmhelpers.contrib.openstack.utils.juju_log', + lambda *args, **kwargs: None) + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_configure_bad_install_source(self, _error): + openstack.configure_installation_source('foo') + _error.assert_called_with("Unknown source: 'foo'") + + @patch.object(fetch, 'get_distrib_codename') + def test_configure_install_source_uca_staging(self, _lsb): + """Test configuring installation source from UCA staging sources""" + _lsb.return_value = FAKE_CODENAME + # staging pockets are configured as PPAs + with patch('subprocess.check_call') as _subp: + src = 'cloud:precise-folsom/staging' + openstack.configure_installation_source(src) + cmd = ['add-apt-repository', '-y', + 'ppa:ubuntu-cloud-archive/folsom-staging'] + _subp.assert_called_with(cmd, env={}) + + @patch(builtin_open) + @patch.object(fetch, 'apt_install') + @patch.object(fetch, 'get_distrib_codename') + @patch.object(fetch, 'filter_installed_packages') + def test_configure_install_source_uca_repos( + self, _fip, _lsb, _install, _open): + """Test configuring installation source from UCA sources""" + _lsb.return_value = FAKE_CODENAME + _file = MagicMock(spec=io.FileIO) + _open.return_value = _file + _fip.side_effect = lambda x: x + for src, url in UCA_SOURCES: + actual_url = "# Ubuntu Cloud Archive\n{}\n".format(url) + openstack.configure_installation_source(src) + _install.assert_called_with(['ubuntu-cloud-keyring'], + fatal=True) + _open.assert_called_with( + '/etc/apt/sources.list.d/cloud-archive.list', + 'w' + ) + _file.__enter__().write.assert_called_with(actual_url) + + @patch('charmhelpers.contrib.openstack.utils.error_out') + def test_configure_install_source_bad_uca(self, mocked_error): + """Test configuring installation source from bad UCA source""" + try: + openstack.configure_installation_source('cloud:foo-bar') + except Exception: + # ignore exceptions that raise when error_out is mocked + # and doesn't sys.exit(1) + pass + _e = ('Invalid Cloud Archive release specified: foo-bar' + ' on this Ubuntuversion') + _s = mocked_error.call_args[0][0] + self.assertTrue(_s.startswith(_e)) + + @patch.object(openstack, 'fetch_import_key') + def test_import_key_calls_fetch_import_key(self, fetch_import_key): + openstack.import_key('random-string') + fetch_import_key.assert_called_once_with('random-string') + + @patch.object(openstack, 'juju_log', lambda *args, **kwargs: None) + @patch.object(openstack, 'fetch_import_key') + @patch.object(openstack, 'sys') + def test_import_key_calls_sys_exit_on_error(self, mock_sys, + fetch_import_key): + + def raiser(_): + raise openstack.GPGKeyError("an error occurred") + fetch_import_key.side_effect = raiser + openstack.import_key('random failure') + mock_sys.exit.assert_called_once_with(1) + + @patch('os.mkdir') + @patch('os.path.exists') + @patch('charmhelpers.contrib.openstack.utils.charm_dir') + @patch(builtin_open) + def test_save_scriptrc(self, _open, _charm_dir, _exists, _mkdir): + """Test generation of scriptrc from environment""" + scriptrc = ['#!/bin/bash\n', + 'export setting1=foo\n', + 'export setting2=bar\n'] + _file = MagicMock(spec=io.FileIO) + _open.return_value = _file + _charm_dir.return_value = '/var/lib/juju/units/testing-foo-0/charm' + _exists.return_value = False + os.environ['JUJU_UNIT_NAME'] = 'testing-foo/0' + openstack.save_script_rc(setting1='foo', setting2='bar') + rcdir = '/var/lib/juju/units/testing-foo-0/charm/scripts' + _mkdir.assert_called_with(rcdir) + expected_f = '/var/lib/juju/units/testing-foo-0/charm/scripts/scriptrc' + _open.assert_called_with(expected_f, 'wt') + _mkdir.assert_called_with(os.path.dirname(expected_f)) + _file.__enter__().write.assert_has_calls( + list(call(line) for line in scriptrc), any_order=True) + + @patch.object(openstack, 'lsb_release') + @patch.object(openstack, 'get_os_version_package') + @patch.object(openstack, 'get_os_version_codename_swift') + @patch.object(openstack, 'config') + def test_openstack_upgrade_detection_true(self, config, vers_swift, + vers_pkg, lsb): + """Test it detects when an openstack package has available upgrade""" + lsb.return_value = FAKE_RELEASE + config.return_value = 'cloud:precise-havana' + vers_pkg.return_value = '2013.1.1' + self.assertTrue(openstack.openstack_upgrade_available('nova-common')) + # milestone to major release detection + vers_pkg.return_value = '2013.2~b1' + self.assertTrue(openstack.openstack_upgrade_available('nova-common')) + vers_pkg.return_value = '1.9.0' + vers_swift.return_value = '2.5.0' + self.assertTrue(openstack.openstack_upgrade_available('swift-proxy')) + vers_pkg.return_value = '2.5.0' + vers_swift.return_value = '2.10.0' + self.assertTrue(openstack.openstack_upgrade_available('swift-proxy')) + + @patch.object(openstack, 'lsb_release') + @patch.object(openstack, 'get_os_version_package') + @patch.object(openstack, 'config') + def test_openstack_upgrade_detection_false(self, config, vers_pkg, lsb): + """Test it detects when an openstack upgrade is not necessary""" + lsb.return_value = FAKE_RELEASE + config.return_value = 'cloud:precise-folsom' + vers_pkg.return_value = '2013.1.1' + self.assertFalse(openstack.openstack_upgrade_available('nova-common')) + # milestone to majro release detection + vers_pkg.return_value = '2013.1~b1' + self.assertFalse(openstack.openstack_upgrade_available('nova-common')) + # ugly duckling testing + config.return_value = 'cloud:precise-havana' + vers_pkg.return_value = '1.10.0' + self.assertFalse(openstack.openstack_upgrade_available('swift-proxy')) + + @patch.object(openstack, 'is_block_device') + @patch.object(openstack, 'error_out') + def test_ensure_block_device_bad_config(self, err, is_bd): + """Test it doesn't prepare storage with bad config""" + openstack.ensure_block_device(block_device='none') + self.assertTrue(err.called) + + @patch.object(openstack, 'is_block_device') + @patch.object(openstack, 'ensure_loopback_device') + def test_ensure_block_device_loopback(self, ensure_loopback, is_bd): + """Test it ensures loopback device when checking block device""" + defsize = openstack.DEFAULT_LOOPBACK_SIZE + is_bd.return_value = True + + ensure_loopback.return_value = '/tmp/cinder.img' + result = openstack.ensure_block_device('/tmp/cinder.img') + ensure_loopback.assert_called_with('/tmp/cinder.img', defsize) + self.assertEquals(result, '/tmp/cinder.img') + + ensure_loopback.return_value = '/tmp/cinder-2.img' + result = openstack.ensure_block_device('/tmp/cinder-2.img|15G') + ensure_loopback.assert_called_with('/tmp/cinder-2.img', '15G') + self.assertEquals(result, '/tmp/cinder-2.img') + + @patch.object(openstack, 'is_block_device') + def test_ensure_standard_block_device(self, is_bd): + """Test it looks for storage at both relative and full device path""" + for dev in ['vdb', '/dev/vdb']: + openstack.ensure_block_device(dev) + is_bd.assert_called_with('/dev/vdb') + + @patch.object(openstack, 'is_block_device') + @patch.object(openstack, 'error_out') + def test_ensure_nonexistent_block_device(self, error_out, is_bd): + """Test it will not ensure a non-existant block device""" + is_bd.return_value = False + openstack.ensure_block_device(block_device='foo') + self.assertTrue(error_out.called) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'umount') + @patch.object(openstack, 'mounts') + @patch.object(openstack, 'zap_disk') + @patch.object(openstack, 'is_lvm_physical_volume') + def test_clean_storage_unmount(self, is_pv, zap_disk, mounts, umount, log): + """Test it unmounts block device when cleaning storage""" + is_pv.return_value = False + zap_disk.return_value = True + mounts.return_value = MOUNTS + openstack.clean_storage('/dev/vdb') + umount.called_with('/dev/vdb', True) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'remove_lvm_physical_volume') + @patch.object(openstack, 'deactivate_lvm_volume_group') + @patch.object(openstack, 'mounts') + @patch.object(openstack, 'is_lvm_physical_volume') + def test_clean_storage_lvm_wipe(self, is_pv, mounts, rm_lv, rm_vg, log): + """Test it removes traces of LVM when cleaning storage""" + mounts.return_value = [] + is_pv.return_value = True + openstack.clean_storage('/dev/vdb') + rm_lv.assert_called_with('/dev/vdb') + rm_vg .assert_called_with('/dev/vdb') + + @patch.object(openstack, 'zap_disk') + @patch.object(openstack, 'is_lvm_physical_volume') + @patch.object(openstack, 'mounts') + def test_clean_storage_zap_disk(self, mounts, is_pv, zap_disk): + """It removes traces of LVM when cleaning storage""" + mounts.return_value = [] + is_pv.return_value = False + openstack.clean_storage('/dev/vdb') + zap_disk.assert_called_with('/dev/vdb') + + @patch('os.path.isfile') + @patch(builtin_open) + def test_get_matchmaker_map(self, _open, _isfile): + _isfile.return_value = True + mm_data = """ + { + "cinder-scheduler": [ + "juju-t-machine-4" + ] + } + """ + fh = _open.return_value.__enter__.return_value + fh.read.return_value = mm_data + self.assertEqual( + openstack.get_matchmaker_map(), + {'cinder-scheduler': ['juju-t-machine-4']} + ) + + @patch('os.path.isfile') + @patch(builtin_open) + def test_get_matchmaker_map_nofile(self, _open, _isfile): + _isfile.return_value = False + self.assertEqual( + openstack.get_matchmaker_map(), + {} + ) + + def test_incomplete_relation_data(self): + configs = MagicMock() + configs.complete_contexts.return_value = ['pgsql-db', 'amqp'] + required_interfaces = { + 'database': ['shared-db', 'pgsql-db'], + 'message': ['amqp', 'zeromq-configuration'], + 'identity': ['identity-service']} + expected_result = 'identity' + + result = openstack.incomplete_relation_data( + configs, required_interfaces) + self.assertTrue(expected_result in result.keys()) + + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_complete( + self, is_unit_paused_set, status_set, log): + configs = MagicMock() + configs.complete_contexts.return_value = ['shared-db', + 'amqp', + 'identity-service'] + required_interfaces = { + 'database': ['shared-db', 'pgsql-db'], + 'message': ['amqp', 'zeromq-configuration'], + 'identity': ['identity-service']} + + openstack.set_os_workload_status(configs, required_interfaces) + status_set.assert_called_with('active', 'Unit is ready') + + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.incomplete_relation_data', + return_value={'identity': {'identity-service': {'related': True}}}) + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_related_incomplete( + self, is_unit_paused_set, status_set, + incomplete_relation_data, log): + configs = MagicMock() + configs.complete_contexts.return_value = ['shared-db', 'amqp'] + required_interfaces = { + 'database': ['shared-db', 'pgsql-db'], + 'message': ['amqp', 'zeromq-configuration'], + 'identity': ['identity-service']} + + openstack.set_os_workload_status(configs, required_interfaces) + status_set.assert_called_with('waiting', + "Incomplete relations: identity") + + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.incomplete_relation_data', + return_value={'identity': {'identity-service': {'related': False}}}) + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_absent( + self, is_unit_paused_set, status_set, + incomplete_relation_data, log): + configs = MagicMock() + configs.complete_contexts.return_value = ['shared-db', 'amqp'] + required_interfaces = { + 'database': ['shared-db', 'pgsql-db'], + 'message': ['amqp', 'zeromq-configuration'], + 'identity': ['identity-service']} + + openstack.set_os_workload_status(configs, required_interfaces) + status_set.assert_called_with('blocked', + 'Missing relations: identity') + + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.hook_name', + return_value='identity-service-relation-broken') + @patch('charmhelpers.contrib.openstack.utils.incomplete_relation_data', + return_value={'identity': {'identity-service': {'related': True}}}) + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_related_broken( + self, is_unit_paused_set, status_set, + incomplete_relation_data, hook_name, log): + configs = MagicMock() + configs.complete_contexts.return_value = ['shared-db', 'amqp'] + required_interfaces = { + 'database': ['shared-db', 'pgsql-db'], + 'message': ['amqp', 'zeromq-configuration'], + 'identity': ['identity-service']} + + openstack.set_os_workload_status(configs, required_interfaces) + status_set.assert_called_with('blocked', + "Missing relations: identity") + + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.incomplete_relation_data', + return_value={'identity': + {'identity-service': {'related': True}}, + + 'message': + {'amqp': {'missing_data': ['rabbitmq-password'], + 'related': True}}, + + 'database': + {'shared-db': {'related': False}} + }) + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_mixed( + self, is_unit_paused_set, status_set, + incomplete_relation_data, log): + configs = MagicMock() + configs.complete_contexts.return_value = ['shared-db', 'amqp'] + required_interfaces = { + 'database': ['shared-db', 'pgsql-db'], + 'message': ['amqp', 'zeromq-configuration'], + 'identity': ['identity-service']} + + openstack.set_os_workload_status(configs, required_interfaces) + + args = status_set.call_args + actual_parm1 = args[0][0] + actual_parm2 = args[0][1] + expected1 = ("Missing relations: database; incomplete relations: " + "identity, message") + expected2 = ("Missing relations: database; incomplete relations: " + "message, identity") + self.assertTrue(actual_parm1 == 'blocked') + self.assertTrue(actual_parm2 == expected1 or actual_parm2 == expected2) + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_complete_with_services_list( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = ['database', 'identity'] + # Assume that the service and ports are open. + port_has_listener.return_value = True + service_running.return_value = True + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with('active', 'Unit is ready') + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_complete_services_list_not_running( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = ['database', 'identity'] + port_has_listener.return_value = True + # Fail the identity service + service_running.side_effect = [True, False] + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with( + 'blocked', + 'Services not running that should be: identity') + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_complete_with_services( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + # Assume that the service and ports are open. + port_has_listener.return_value = True + service_running.return_value = True + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with('active', 'Unit is ready') + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_complete_service_not_running( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + port_has_listener.return_value = True + # Fail the identity service + service_running.side_effect = [True, False] + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with( + 'blocked', + 'Services not running that should be: identity') + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_complete_port_not_open( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + port_has_listener.side_effect = [True, False, True] + # Fail the identity service + service_running.return_value = True + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with( + 'blocked', + 'Services with ports not open that should be:' + ' database: [20]') + + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=False) + def test_set_os_workload_status_complete_ports_not_open( + self, is_unit_paused_set, status_set, log, port_has_listener): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + ports = [50, 60, 70] + port_has_listener.side_effect = [True, False, True] + + openstack.set_os_workload_status( + configs, required_interfaces, ports=ports) + status_set.assert_called_with( + 'blocked', + 'Ports which should be open, but are not: 60') + + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=True) + def test_set_os_workload_status_paused_simple( + self, is_unit_paused_set, status_set, log): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + openstack.set_os_workload_status(configs, required_interfaces) + status_set.assert_called_with( + 'maintenance', + "Paused. Use 'resume' action to resume normal service.") + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=True) + def test_set_os_workload_status_paused_services_check( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + port_has_listener.return_value = False + service_running.side_effect = [False, False] + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with( + 'maintenance', + "Paused. Use 'resume' action to resume normal service.") + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=True) + def test_set_os_workload_status_paused_services_fail( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + port_has_listener.return_value = False + # Fail the identity service + service_running.side_effect = [False, True] + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with( + 'blocked', + "Services should be paused but these services running: identity") + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=True) + def test_set_os_workload_status_paused_services_ports_fail( + self, is_unit_paused_set, status_set, log, + port_has_listener, service_running): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + # make the service 20 port be still listening. + port_has_listener.side_effect = [False, True, False] + service_running.return_value = False + + openstack.set_os_workload_status( + configs, required_interfaces, services=services) + status_set.assert_called_with( + 'blocked', + "Services should be paused but these service:ports are open:" + " database: [20]") + + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=True) + def test_set_os_workload_status_paused_ports_check( + self, is_unit_paused_set, status_set, log, + port_has_listener): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + ports = [50, 60, 70] + port_has_listener.side_effect = [False, False, False] + + openstack.set_os_workload_status( + configs, required_interfaces, ports=ports) + status_set.assert_called_with( + 'maintenance', + "Paused. Use 'resume' action to resume normal service.") + + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + @patch.object(openstack, 'juju_log') + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set', + return_value=True) + def test_set_os_workload_status_paused_ports_fail( + self, is_unit_paused_set, status_set, log, + port_has_listener): + configs = MagicMock() + configs.complete_contexts.return_value = [] + required_interfaces = {} + + # fail port 70 to make it seem to be running + ports = [50, 60, 70] + port_has_listener.side_effect = [False, False, True] + + openstack.set_os_workload_status( + configs, required_interfaces, ports=ports) + status_set.assert_called_with( + 'blocked', + "Services should be paused but " + "these ports which should be closed, but are open: 70") + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + def test_check_actually_paused_simple_services( + self, port_has_listener, service_running): + services = ['database', 'identity'] + port_has_listener.return_value = False + service_running.return_value = False + + state, message = openstack.check_actually_paused( + services) + self.assertEquals(state, None) + self.assertEquals(message, None) + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + def test_check_actually_paused_simple_services_fail( + self, port_has_listener, service_running): + services = ['database', 'identity'] + port_has_listener.return_value = False + service_running.side_effect = [False, True] + + state, message = openstack.check_actually_paused( + services) + self.assertEquals(state, 'blocked') + self.assertEquals( + message, + "Services should be paused but these services running: identity") + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + def test_check_actually_paused_services_dict( + self, port_has_listener, service_running): + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + # Assume that the service and ports are open. + port_has_listener.return_value = False + service_running.return_value = False + + state, message = openstack.check_actually_paused( + services) + self.assertEquals(state, None) + self.assertEquals(message, None) + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + def test_check_actually_paused_services_dict_fail( + self, port_has_listener, service_running): + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + # Assume that the service and ports are open. + port_has_listener.return_value = False + service_running.side_effect = [False, True] + + state, message = openstack.check_actually_paused( + services) + self.assertEquals(state, 'blocked') + self.assertEquals( + message, + "Services should be paused but these services running: identity") + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + def test_check_actually_paused_services_dict_ports_fail( + self, port_has_listener, service_running): + services = [ + {'service': 'database', 'ports': [10, 20]}, + {'service': 'identity', 'ports': [30]}, + ] + # Assume that the service and ports are open. + port_has_listener.side_effect = [False, True, False] + service_running.return_value = False + + state, message = openstack.check_actually_paused( + services) + self.assertEquals(state, 'blocked') + self.assertEquals(message, + 'Services should be paused but these service:ports' + ' are open: database: [20]') + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + def test_check_actually_paused_ports_okay( + self, port_has_listener, service_running): + port_has_listener.side_effect = [False, False, False] + service_running.return_value = False + ports = [50, 60, 70] + + state, message = openstack.check_actually_paused( + ports=ports) + self.assertEquals(state, None) + self.assertEquals(state, None) + + @patch('charmhelpers.contrib.openstack.utils.service_running') + @patch('charmhelpers.contrib.openstack.utils.port_has_listener') + def test_check_actually_paused_ports_fail( + self, port_has_listener, service_running): + port_has_listener.side_effect = [False, True, False] + service_running.return_value = False + ports = [50, 60, 70] + + state, message = openstack.check_actually_paused( + ports=ports) + self.assertEquals(state, 'blocked') + self.assertEquals(message, + 'Services should be paused but these ports ' + 'which should be closed, but are open: 60') + + @staticmethod + def _unit_paused_helper(hook_data_mock): + # HookData()() returns a tuple (kv, delta_config, delta_relation) + # but we only want kv in the test. + kv = MagicMock() + + @contextlib.contextmanager + def hook_data__call__(): + yield (kv, True, False) + + hook_data__call__.return_value = (kv, True, False) + hook_data_mock.return_value = hook_data__call__ + return kv + + @patch('charmhelpers.contrib.openstack.utils.unitdata.HookData') + def test_set_unit_paused(self, hook_data): + kv = self._unit_paused_helper(hook_data) + openstack.set_unit_paused() + kv.set.assert_called_once_with('unit-paused', True) + + @patch('charmhelpers.contrib.openstack.utils.unitdata.HookData') + def test_set_unit_upgrading(self, hook_data): + kv = self._unit_paused_helper(hook_data) + openstack.set_unit_upgrading() + kv.set.assert_called_once_with('unit-upgrading', True) + + @patch('charmhelpers.contrib.openstack.utils.unitdata.HookData') + def test_clear_unit_paused(self, hook_data): + kv = self._unit_paused_helper(hook_data) + openstack.clear_unit_paused() + kv.set.assert_called_once_with('unit-paused', False) + + @patch('charmhelpers.contrib.openstack.utils.unitdata.HookData') + def test_clear_unit_upgrading(self, hook_data): + kv = self._unit_paused_helper(hook_data) + openstack.clear_unit_upgrading() + kv.set.assert_called_once_with('unit-upgrading', False) + + @patch('charmhelpers.contrib.openstack.utils.unitdata.HookData') + def test_is_unit_paused_set(self, hook_data): + kv = self._unit_paused_helper(hook_data) + kv.get.return_value = True + r = openstack.is_unit_paused_set() + kv.get.assert_called_once_with('unit-paused') + self.assertEquals(r, True) + kv.get.return_value = False + r = openstack.is_unit_paused_set() + self.assertEquals(r, False) + + @patch('charmhelpers.contrib.openstack.utils.unitdata.HookData') + def test_is_unit_upgrading_set(self, hook_data): + kv = self._unit_paused_helper(hook_data) + kv.get.return_value = True + r = openstack.is_unit_upgrading_set() + kv.get.assert_called_once_with('unit-upgrading') + self.assertEquals(r, True) + kv.get.return_value = False + r = openstack.is_unit_upgrading_set() + self.assertEquals(r, False) + + @patch('charmhelpers.contrib.openstack.utils.service_stop') + def test_manage_payload_services_ok(self, service_stop): + services = ['service1', 'service2'] + service_stop.side_effect = [True, True] + self.assertEqual( + openstack.manage_payload_services('stop', services=services), + (True, [])) + + @patch('charmhelpers.contrib.openstack.utils.service_stop') + def test_manage_payload_services_fails(self, service_stop): + services = ['service1', 'service2'] + service_stop.side_effect = [True, False] + self.assertEqual( + openstack.manage_payload_services('stop', services=services), + (False, ["service2 didn't stop cleanly."])) + + @patch('charmhelpers.contrib.openstack.utils.service_stop') + def test_manage_payload_services_charm_func(self, service_stop): + bespoke_func = MagicMock() + bespoke_func.return_value = None + services = ['service1', 'service2'] + service_stop.side_effect = [True, True] + self.assertEqual( + openstack.manage_payload_services('stop', services=services, + charm_func=bespoke_func), + (True, [])) + bespoke_func.assert_called_once_with() + + @patch('charmhelpers.contrib.openstack.utils.service_stop') + def test_manage_payload_services_charm_func_msg(self, service_stop): + bespoke_func = MagicMock() + bespoke_func.return_value = 'it worked' + services = ['service1', 'service2'] + service_stop.side_effect = [True, True] + self.assertEqual( + openstack.manage_payload_services('stop', services=services, + charm_func=bespoke_func), + (True, ['it worked'])) + bespoke_func.assert_called_once_with() + + @patch('charmhelpers.contrib.openstack.utils.service_stop') + def test_manage_payload_services_charm_func_fails(self, service_stop): + bespoke_func = MagicMock() + bespoke_func.side_effect = Exception('it failed') + services = ['service1', 'service2'] + service_stop.side_effect = [True, True] + self.assertEqual( + openstack.manage_payload_services('stop', services=services, + charm_func=bespoke_func), + (False, ['it failed'])) + bespoke_func.assert_called_once_with() + + def test_manage_payload_services_wrong_action(self): + self.assertRaises( + RuntimeError, + openstack.manage_payload_services, + 'mangle') + + @patch('charmhelpers.contrib.openstack.utils.service_pause') + @patch('charmhelpers.contrib.openstack.utils.set_unit_paused') + def test_pause_unit_okay(self, set_unit_paused, service_pause): + services = ['service1', 'service2'] + service_pause.side_effect = [True, True] + openstack.pause_unit(None, services=services) + set_unit_paused.assert_called_once_with() + self.assertEquals(service_pause.call_count, 2) + + @patch('charmhelpers.contrib.openstack.utils.service_pause') + @patch('charmhelpers.contrib.openstack.utils.set_unit_paused') + def test_pause_unit_service_fails(self, set_unit_paused, service_pause): + services = ['service1', 'service2'] + service_pause.side_effect = [True, True] + openstack.pause_unit(None, services=services) + set_unit_paused.assert_called_once_with() + self.assertEquals(service_pause.call_count, 2) + # Fail the 2nd service + service_pause.side_effect = [True, False] + try: + openstack.pause_unit(None, services=services) + raise Exception("pause_unit should have raised Exception") + except Exception as e: + self.assertEquals(e.args[0], + "Couldn't pause: service2 didn't pause cleanly.") + + @patch('charmhelpers.contrib.openstack.utils.service_pause') + @patch('charmhelpers.contrib.openstack.utils.set_unit_paused') + def test_pause_unit_service_charm_func( + self, set_unit_paused, service_pause): + services = ['service1', 'service2'] + service_pause.return_value = True + charm_func = MagicMock() + charm_func.return_value = None + openstack.pause_unit(None, services=services, charm_func=charm_func) + charm_func.assert_called_once_with() + # fail the charm_func + charm_func.return_value = "Custom charm failed" + try: + openstack.pause_unit( + None, services=services, charm_func=charm_func) + raise Exception("pause_unit should have raised Exception") + except Exception as e: + self.assertEquals(e.args[0], + "Couldn't pause: Custom charm failed") + + @patch('charmhelpers.contrib.openstack.utils.service_pause') + @patch('charmhelpers.contrib.openstack.utils.set_unit_paused') + def test_pause_unit_assess_status_func( + self, set_unit_paused, service_pause): + services = ['service1', 'service2'] + service_pause.return_value = True + assess_status_func = MagicMock() + assess_status_func.return_value = None + openstack.pause_unit(assess_status_func, services=services) + assess_status_func.assert_called_once_with() + # fail the assess_status_func + assess_status_func.return_value = "assess_status_func failed" + try: + openstack.pause_unit(assess_status_func, services=services) + raise Exception("pause_unit should have raised Exception") + except Exception as e: + self.assertEquals(e.args[0], + "Couldn't pause: assess_status_func failed") + + @patch('charmhelpers.contrib.openstack.utils.service_resume') + @patch('charmhelpers.contrib.openstack.utils.clear_unit_paused') + def test_resume_unit_okay(self, clear_unit_paused, service_resume): + services = ['service1', 'service2'] + service_resume.side_effect = [True, True] + openstack.resume_unit(None, services=services) + clear_unit_paused.assert_called_once_with() + self.assertEquals(service_resume.call_count, 2) + + @patch('charmhelpers.contrib.openstack.utils.service_resume') + @patch('charmhelpers.contrib.openstack.utils.clear_unit_paused') + def test_resume_unit_service_fails( + self, clear_unit_paused, service_resume): + services = ['service1', 'service2'] + service_resume.side_effect = [True, True] + openstack.resume_unit(None, services=services) + clear_unit_paused.assert_called_once_with() + self.assertEquals(service_resume.call_count, 2) + # Fail the 2nd service + service_resume.side_effect = [True, False] + try: + openstack.resume_unit(None, services=services) + raise Exception("resume_unit should have raised Exception") + except Exception as e: + self.assertEquals( + e.args[0], "Couldn't resume: service2 didn't resume cleanly.") + + @patch('charmhelpers.contrib.openstack.utils.service_resume') + @patch('charmhelpers.contrib.openstack.utils.clear_unit_paused') + def test_resume_unit_service_charm_func( + self, clear_unit_paused, service_resume): + services = ['service1', 'service2'] + service_resume.return_value = True + charm_func = MagicMock() + charm_func.return_value = None + openstack.resume_unit(None, services=services, charm_func=charm_func) + charm_func.assert_called_once_with() + # fail the charm_func + charm_func.return_value = "Custom charm failed" + try: + openstack.resume_unit( + None, services=services, charm_func=charm_func) + raise Exception("resume_unit should have raised Exception") + except Exception as e: + self.assertEquals(e.args[0], + "Couldn't resume: Custom charm failed") + + @patch('charmhelpers.contrib.openstack.utils.service_resume') + @patch('charmhelpers.contrib.openstack.utils.clear_unit_paused') + def test_resume_unit_assess_status_func( + self, clear_unit_paused, service_resume): + services = ['service1', 'service2'] + service_resume.return_value = True + assess_status_func = MagicMock() + assess_status_func.return_value = None + openstack.resume_unit(assess_status_func, services=services) + assess_status_func.assert_called_once_with() + # fail the assess_status_func + assess_status_func.return_value = "assess_status_func failed" + try: + openstack.resume_unit(assess_status_func, services=services) + raise Exception("resume_unit should have raised Exception") + except Exception as e: + self.assertEquals(e.args[0], + "Couldn't resume: assess_status_func failed") + + @patch('charmhelpers.contrib.openstack.utils.status_set') + @patch('charmhelpers.contrib.openstack.utils.' + '_determine_os_workload_status') + def test_make_assess_status_func(self, _determine_os_workload_status, + status_set): + _determine_os_workload_status.return_value = ('active', 'fine') + f = openstack.make_assess_status_func('one', 'two', three='three') + r = f() + self.assertEquals(r, None) + _determine_os_workload_status.assert_called_once_with( + 'one', 'two', three='three') + status_set.assert_called_once_with('active', 'fine') + # return something other than 'active' or 'maintenance' + _determine_os_workload_status.return_value = ('broken', 'damaged') + r = f() + self.assertEquals(r, 'damaged') + + # TODO(ajkavanagh) -- there should be a test for + # _determine_os_workload_status() as the policyd override code has changed + # it, but there wasn't a test previously. + + @patch.object(openstack, 'restart_on_change_helper') + @patch.object(openstack, 'is_unit_paused_set') + def test_pausable_restart_on_change( + self, is_unit_paused_set, restart_on_change_helper): + @openstack.pausable_restart_on_change({}) + def test_func(): + pass + + # test with pause: restart_on_change_helper should not be called. + is_unit_paused_set.return_value = True + test_func() + self.assertEquals(restart_on_change_helper.call_count, 0) + + # test without pause: restart_on_change_helper should be called. + is_unit_paused_set.return_value = False + test_func() + self.assertEquals(restart_on_change_helper.call_count, 1) + + @patch.object(openstack, 'restart_on_change_helper') + @patch.object(openstack, 'is_unit_paused_set') + def test_pausable_restart_on_change_with_callable( + self, is_unit_paused_set, restart_on_change_helper): + mock_test = MagicMock() + mock_test.called_set = False + + def _restart_map(): + mock_test.called_set = True + return {"a": "b"} + + @openstack.pausable_restart_on_change(_restart_map) + def test_func(): + pass + + self.assertFalse(mock_test.called_set) + is_unit_paused_set.return_value = False + test_func() + self.assertEquals(restart_on_change_helper.call_count, 1) + self.assertTrue(mock_test.called_set) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'action_set') + @patch.object(openstack, 'action_fail') + @patch.object(openstack, 'openstack_upgrade_available') + @patch('charmhelpers.contrib.openstack.utils.config') + def test_openstack_upgrade(self, config, openstack_upgrade_available, + action_fail, action_set, log): + def do_openstack_upgrade(configs): + pass + + openstack_upgrade_available.return_value = True + + # action-managed-upgrade=True + config.side_effect = [True] + + openstack.do_action_openstack_upgrade('package-xyz', + do_openstack_upgrade, + None) + + self.assertTrue(openstack_upgrade_available.called) + msg = ('success, upgrade completed.') + action_set.assert_called_with({'outcome': msg}) + self.assertFalse(action_fail.called) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'action_set') + @patch.object(openstack, 'action_fail') + @patch.object(openstack, 'openstack_upgrade_available') + @patch('charmhelpers.contrib.openstack.utils.config') + def test_openstack_upgrade_not_avail(self, config, + openstack_upgrade_available, + action_fail, action_set, log): + def do_openstack_upgrade(configs): + pass + + openstack_upgrade_available.return_value = False + + openstack.do_action_openstack_upgrade('package-xyz', + do_openstack_upgrade, + None) + + self.assertTrue(openstack_upgrade_available.called) + msg = ('no upgrade available.') + action_set.assert_called_with({'outcome': msg}) + self.assertFalse(action_fail.called) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'action_set') + @patch.object(openstack, 'action_fail') + @patch.object(openstack, 'openstack_upgrade_available') + @patch('charmhelpers.contrib.openstack.utils.config') + def test_openstack_upgrade_config_false(self, config, + openstack_upgrade_available, + action_fail, action_set, log): + def do_openstack_upgrade(configs): + pass + + openstack_upgrade_available.return_value = True + + # action-managed-upgrade=False + config.side_effect = [False] + + openstack.do_action_openstack_upgrade('package-xyz', + do_openstack_upgrade, + None) + + self.assertTrue(openstack_upgrade_available.called) + msg = ('action-managed-upgrade config is False, skipped upgrade.') + action_set.assert_called_with({'outcome': msg}) + self.assertFalse(action_fail.called) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'action_set') + @patch.object(openstack, 'action_fail') + @patch.object(openstack, 'openstack_upgrade_available') + @patch('traceback.format_exc') + @patch('charmhelpers.contrib.openstack.utils.config') + def test_openstack_upgrade_traceback(self, config, traceback, + openstack_upgrade_available, + action_fail, action_set, log): + def do_openstack_upgrade(configs): + oops() # noqa + + openstack_upgrade_available.return_value = True + + # action-managed-upgrade=False + config.side_effect = [True] + + openstack.do_action_openstack_upgrade('package-xyz', + do_openstack_upgrade, + None) + + self.assertTrue(openstack_upgrade_available.called) + msg = 'do_openstack_upgrade resulted in an unexpected error' + action_fail.assert_called_with(msg) + self.assertTrue(action_set.called) + self.assertTrue(traceback.called) + + @patch.object(openstack, 'os_release') + @patch.object(openstack, 'application_version_set') + def test_os_application_version_set(self, + mock_application_version_set, + mock_os_release): + with patch.object(fetch, 'apt_cache') as cache: + cache.return_value = self._apt_cache() + mock_os_release.return_value = 'mitaka' + openstack.os_application_version_set('neutron-common') + mock_application_version_set.assert_called_with('7.0.1') + openstack.os_application_version_set('cinder-common') + mock_application_version_set.assert_called_with('mitaka') + + @patch.object(openstack, 'valid_snap_channel') + @patch('charmhelpers.contrib.openstack.utils.config') + def test_snap_install_requested(self, config, valid_snap_channel): + valid_snap_channel.return_value = True + # Expect True + flush('snap_install_requested') + config.return_value = 'snap:ocata/edge' + self.assertTrue(openstack.snap_install_requested()) + valid_snap_channel.assert_called_with('edge') + flush('snap_install_requested') + config.return_value = 'snap:pike' + self.assertTrue(openstack.snap_install_requested()) + valid_snap_channel.assert_called_with('stable') + flush('snap_install_requested') + config.return_value = 'snap:pike/stable/jamespage' + self.assertTrue(openstack.snap_install_requested()) + valid_snap_channel.assert_called_with('stable') + # Expect False + flush('snap_install_requested') + config.return_value = 'cloud:xenial-ocata' + self.assertFalse(openstack.snap_install_requested()) + + def test_get_snaps_install_info_from_origin(self): + snaps = ['os_project'] + mode = 'jailmode' + + # snap:track/channel + src = 'snap:ocata/beta' + expected = {snaps[0]: {'mode': mode, + 'channel': '--channel=ocata/beta'}} + self.assertEqual( + expected, + openstack.get_snaps_install_info_from_origin(snaps, src, + mode=mode)) + + # snap:track/channel/branch + src = 'snap:ocata/beta/jamespage' + expected = {snaps[0]: {'mode': mode, + 'channel': '--channel=ocata/beta/jamespage'}} + self.assertEqual( + expected, + openstack.get_snaps_install_info_from_origin(snaps, src, + mode=mode)) + # snap:track + src = 'snap:pike' + expected = {snaps[0]: {'mode': mode, + 'channel': '--channel=pike'}} + self.assertEqual( + expected, + openstack.get_snaps_install_info_from_origin(snaps, src, + mode=mode)) + + @patch.object(openstack, 'snap_install') + def test_install_os_snaps(self, mock_snap_install): + snaps = ['os_project'] + mode = 'jailmode' + + # snap:track/channel + src = 'snap:ocata/beta' + openstack.install_os_snaps( + openstack.get_snaps_install_info_from_origin( + snaps, src, mode=mode)) + mock_snap_install.assert_called_with( + 'os_project', '--channel=ocata/beta', '--jailmode') + + # snap:track + src = 'snap:pike' + openstack.install_os_snaps( + openstack.get_snaps_install_info_from_origin( + snaps, src, mode=mode)) + mock_snap_install.assert_called_with( + 'os_project', '--channel=pike', '--jailmode') + + @patch.object(openstack, 'set_unit_upgrading') + @patch.object(openstack, 'is_unit_paused_set') + def test_series_upgrade_prepare( + self, is_unit_paused_set, set_unit_upgrading): + is_unit_paused_set.return_value = False + fake_pause_helper = MagicMock() + fake_configs = MagicMock() + openstack.series_upgrade_prepare(fake_pause_helper, fake_configs) + set_unit_upgrading.assert_called_once() + fake_pause_helper.assert_called_once_with(fake_configs) + + @patch.object(openstack, 'set_unit_upgrading') + @patch.object(openstack, 'is_unit_paused_set') + def test_series_upgrade_prepare_no_pause( + self, is_unit_paused_set, set_unit_upgrading): + is_unit_paused_set.return_value = True + fake_pause_helper = MagicMock() + fake_configs = MagicMock() + openstack.series_upgrade_prepare(fake_pause_helper, fake_configs) + set_unit_upgrading.assert_called_once() + fake_pause_helper.assert_not_called() + + @patch.object(openstack, 'clear_unit_upgrading') + @patch.object(openstack, 'clear_unit_paused') + def test_series_upgrade_complete( + self, clear_unit_paused, clear_unit_upgrading): + fake_resume_helper = MagicMock() + fake_configs = MagicMock() + openstack.series_upgrade_complete(fake_resume_helper, fake_configs) + clear_unit_upgrading.assert_called_once() + clear_unit_paused.assert_called_once() + fake_configs.write_all.assert_called_once() + fake_resume_helper.assert_called_once_with(fake_configs) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'leader_get') + def test_is_db_initialised(self, leader_get, juju_log): + leader_get.return_value = 'True' + self.assertTrue(openstack.is_db_initialised()) + leader_get.return_value = 'False' + self.assertFalse(openstack.is_db_initialised()) + leader_get.return_value = None + self.assertFalse(openstack.is_db_initialised()) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'leader_set') + def test_set_db_initialised(self, leader_set, juju_log): + openstack.set_db_initialised() + leader_set.assert_called_once_with({'db-initialised': True}) + + @patch.object(openstack, 'juju_log') + @patch.object(openstack, 'relation_ids') + @patch.object(openstack, 'related_units') + @patch.object(openstack, 'relation_get') + def test_is_db_maintenance_mode(self, relation_get, related_units, + relation_ids, juju_log): + relation_ids.return_value = ['rid:1'] + related_units.return_value = ['unit/0', 'unit/2'] + rsettings = { + 'rid:1': { + 'unit/0': { + 'private-ip': '1.2.3.4', + 'cluster-series-upgrading': 'True'}, + 'unit/2': { + 'private-ip': '1.2.3.5'}}} + relation_get.side_effect = lambda unit, rid: rsettings[rid][unit] + self.assertTrue(openstack.is_db_maintenance_mode()) + rsettings = { + 'rid:1': { + 'unit/0': { + 'private-ip': '1.2.3.4'}, + 'unit/2': { + 'private-ip': '1.2.3.5'}}} + self.assertFalse(openstack.is_db_maintenance_mode()) + rsettings = { + 'rid:1': { + 'unit/0': { + 'private-ip': '1.2.3.4', + 'cluster-series-upgrading': 'False'}, + 'unit/2': { + 'private-ip': '1.2.3.5'}}} + self.assertFalse(openstack.is_db_maintenance_mode()) + rsettings = { + 'rid:1': { + 'unit/0': { + 'private-ip': '1.2.3.4', + 'cluster-series-upgrading': 'lskjfsd'}, + 'unit/2': { + 'private-ip': '1.2.3.5'}}} + self.assertFalse(openstack.is_db_maintenance_mode()) + + def test_get_endpoint_key(self): + self.assertEqual( + openstack.get_endpoint_key('placement', 'is:2', 'keystone/0'), + 'placement-is_2-keystone_0') + + @patch.object(openstack, 'relation_get') + @patch.object(openstack, 'related_units') + @patch.object(openstack, 'relation_ids') + def test_get_endpoint_notifications(self, relation_ids, related_units, + relation_get): + id_svc_rel_units = { + 'identity-service:3': ['keystone/0', 'keystone/1', 'keystone/2'] + } + + def _related_units(relid): + return id_svc_rel_units[relid] + + id_svc_rel_data = { + 'keystone/0': { + 'ep_changed': '{"placement": "d5c3"}'}, + 'keystone/1': { + 'ep_changed': '{"nova": "4d06", "neutron": "2aa6"}'}, + 'keystone/2': {}} + + def _relation_get(unit, rid, attribute): + return id_svc_rel_data[unit].get(attribute) + + relation_ids.return_value = id_svc_rel_units.keys() + related_units.side_effect = _related_units + relation_get.side_effect = _relation_get + self.assertEqual( + openstack.get_endpoint_notifications(['neutron']), + { + 'neutron-identity-service_3-keystone_1': '2aa6'}) + self.assertEqual( + openstack.get_endpoint_notifications(['placement', 'neutron']), + { + 'neutron-identity-service_3-keystone_1': '2aa6', + 'placement-identity-service_3-keystone_0': 'd5c3'}) + + @patch.object(openstack, 'get_endpoint_notifications') + @patch.object(openstack.unitdata, 'HookData') + def test_endpoint_changed(self, HookData, get_endpoint_notifications): + self.kv_data = {} + + def _kv_get(key): + return self.kv_data.get(key) + kv = self._unit_paused_helper(HookData) + kv.get.side_effect = _kv_get + # Check endpoint_changed returns True when there are new notifications. + get_endpoint_notifications.return_value = { + 'neutron-identity-service_3-keystone_1': '2aa6', + 'placement-identity-service_3-keystone_0': 'd5c3'} + self.assertTrue(openstack.endpoint_changed('placement')) + # Check endpoint_changed returns False when there are new + # notifications but they are not the ones being looked for. + self.assertTrue(openstack.endpoint_changed('nova')) + # Check endpoint_changed returns False if the notification + # has alredy been seen + get_endpoint_notifications.return_value = { + 'placement-identity-service_3-keystone_0': 'd5c3'} + self.kv_data = { + 'placement-identity-service_3-keystone_0': 'd5c3'} + self.assertFalse(openstack.endpoint_changed('placement')) + + @patch.object(openstack, 'get_endpoint_notifications') + @patch.object(openstack.unitdata, 'HookData') + def test_save_endpoint_changed_triggers(self, HookData, + get_endpoint_notifications): + kv = self._unit_paused_helper(HookData) + get_endpoint_notifications.return_value = { + 'neutron-identity-service_3-keystone_1': '2aa6', + 'placement-identity-service_3-keystone_0': 'd5c3'} + openstack.save_endpoint_changed_triggers(['neutron', 'placement']) + kv_set_calls = [ + call('neutron-identity-service_3-keystone_1', '2aa6'), + call('placement-identity-service_3-keystone_0', 'd5c3')] + kv.set.assert_has_calls(kv_set_calls, any_order=True) + + +class OpenStackUtilsAdditionalTests(TestCase): + SHARED_DB_RELATIONS = { + 'shared-db:8': { + 'mysql-svc1/0': { + 'allowed_units': 'client/0', + }, + 'mysql-svc1/1': {}, + 'mysql-svc1/2': { + 'allowed_units': 'client/0 client/1', + }, + }, + 'shared-db:12': { + 'mysql-svc2/0': { + 'allowed_units': 'client/1', + }, + 'mysql-svc2/1': { + 'allowed_units': 'client/3', + }, + 'mysql-svc2/2': { + 'allowed_units': {}, + }, + } + } + SCALE_RELATIONS = { + 'cluster:2': { + 'keystone/1': {}, + 'keystone/2': {}}, + 'shared-db:12': { + 'mysql-svc2/0': { + 'allowed_units': 'client/1', + }, + 'mysql-svc2/1': { + 'allowed_units': 'client/3', + }, + 'mysql-svc2/2': { + 'allowed_units': {}, + }}, + } + SCALE_RELATIONS_HA = { + 'cluster:2': { + 'keystone/1': {'unit-state-keystone-1': 'READY'}, + 'keystone/2': {}}, + 'shared-db:12': { + 'mysql-svc2/0': { + 'allowed_units': 'client/1', + }, + 'mysql-svc2/1': { + 'allowed_units': 'client/3', + }, + 'mysql-svc2/2': { + 'allowed_units': {}, + }}, + 'ha:32': { + 'hacluster-keystone/1': {}} + } + All_PEERS_READY = { + 'cluster:2': { + 'keystone/1': {'unit-state-keystone-1': 'READY'}, + 'keystone/2': {'unit-state-keystone-2': 'READY'}}} + PEERS_NOT_READY = { + 'cluster:2': { + 'keystone/1': {'unit-state-keystone-1': 'READY'}, + 'keystone/2': {}}} + + def setUp(self): + super(OpenStackUtilsAdditionalTests, self).setUp() + [self._patch(m) for m in [ + 'expect_ha', + 'expected_peer_units', + 'expected_related_units', + 'juju_log', + 'metadata', + 'related_units', + 'relation_get', + 'relation_id', + 'relation_ids', + 'relation_set', + 'local_unit', + ]] + + def _patch(self, method): + _m = patch.object(openstack, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + def setup_relation(self, relation_map): + relation = FakeRelation(relation_map) + self.relation_id.side_effect = relation.relation_id + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.related_units + return relation + + def test_is_db_ready(self): + relation = self.setup_relation(self.SHARED_DB_RELATIONS) + + # Check unit allowed in 1st relation + self.local_unit.return_value = 'client/0' + self.assertTrue(openstack.is_db_ready()) + + # Check unit allowed in 2nd relation + self.local_unit.return_value = 'client/3' + self.assertTrue(openstack.is_db_ready()) + + # Check unit not allowed in any list + self.local_unit.return_value = 'client/5' + self.assertFalse(openstack.is_db_ready()) + + # Check call with an invalid relation + self.local_unit.return_value = 'client/3' + # None returned if not in a relation context (eg update-status) + relation.clear_relation_context() + self.assertRaises( + Exception, + openstack.is_db_ready, + use_current_context=True) + + # Check unit allowed using current relation context + relation.set_relation_context('mysql-svc2/0', 'shared-db:12') + self.local_unit.return_value = 'client/1' + self.assertTrue(openstack.is_db_ready(use_current_context=True)) + + # Check unit not allowed using current relation context + relation.set_relation_context('mysql-svc2/0', 'shared-db:12') + self.local_unit.return_value = 'client/0' + self.assertFalse(openstack.is_db_ready(use_current_context=True)) + + @patch.object(openstack, 'container_scoped_relations') + def test_is_expected_scale_noha(self, container_scoped_relations): + self.setup_relation(self.SCALE_RELATIONS) + self.expect_ha.return_value = False + eru = { + 'shared-db': ['mysql/0', 'mysql/1', 'mysql/2']} + + def _expected_related_units(reltype): + return eru[reltype] + self.expected_related_units.side_effect = _expected_related_units + container_scoped_relations.return_value = ['ha', 'domain-backend'] + + # All peer and db units are present + self.expected_peer_units.return_value = ['keystone/0', 'keystone/2'] + self.assertTrue(openstack.is_expected_scale()) + + # db units are present but a peer is missing + self.expected_peer_units.return_value = ['keystone/0', 'keystone/2', 'keystone/3'] + self.assertFalse(openstack.is_expected_scale()) + + # peer units are present but a db unit is missing + eru['shared-db'].append('mysql/3') + self.expected_peer_units.return_value = ['keystone/0', 'keystone/2'] + self.assertFalse(openstack.is_expected_scale()) + eru['shared-db'].remove('mysql/3') + + # Expect ha but ha unit is missing + self.expect_ha.return_value = True + self.expected_peer_units.return_value = ['keystone/0', 'keystone/2'] + self.assertFalse(openstack.is_expected_scale()) + + @patch.object(openstack, 'container_scoped_relations') + def test_is_expected_scale_ha(self, container_scoped_relations): + self.setup_relation(self.SCALE_RELATIONS_HA) + eru = { + 'shared-db': ['mysql/0', 'mysql/1', 'mysql/2']} + + def _expected_related_units(reltype): + return eru[reltype] + self.expected_related_units.side_effect = _expected_related_units + container_scoped_relations.return_value = ['ha', 'domain-backend'] + self.expect_ha.return_value = True + self.expected_peer_units.return_value = ['keystone/0', 'keystone/2'] + self.assertTrue(openstack.is_expected_scale()) + + def test_container_scoped_relations(self): + _metadata = { + 'provides': { + 'amqp': {'interface': 'rabbitmq'}, + 'identity-service': {'interface': 'keystone'}, + 'ha': { + 'interface': 'hacluster', + 'scope': 'container'}}, + 'peers': { + 'cluster': {'interface': 'openstack-ha'}}} + self.metadata.return_value = _metadata + self.assertEqual(openstack.container_scoped_relations(), ['ha']) + + def test_get_peer_key(self): + self.assertEqual( + openstack.get_peer_key('cinder/0'), + 'unit-state-cinder-0') + + def test_inform_peers_unit_state(self): + self.local_unit.return_value = 'client/0' + self.setup_relation(self.All_PEERS_READY) + openstack.inform_peers_unit_state('READY') + self.relation_set.assert_called_once_with( + relation_id='cluster:2', + relation_settings={'unit-state-client-0': 'READY'}) + + def test_get_peers_unit_state(self): + self.setup_relation(self.All_PEERS_READY) + self.assertEqual( + openstack.get_peers_unit_state(), + {'keystone/1': 'READY', 'keystone/2': 'READY'}) + self.setup_relation(self.PEERS_NOT_READY) + self.assertEqual( + openstack.get_peers_unit_state(), + {'keystone/1': 'READY', 'keystone/2': 'UNKNOWN'}) + + def test_are_peers_ready(self): + self.setup_relation(self.All_PEERS_READY) + self.assertTrue(openstack.are_peers_ready()) + self.setup_relation(self.PEERS_NOT_READY) + self.assertFalse(openstack.are_peers_ready()) + + @patch.object(openstack, 'inform_peers_unit_state') + def test_inform_peers_if_ready(self, inform_peers_unit_state): + self.setup_relation(self.All_PEERS_READY) + + def _not_ready(): + return False, "Its all gone wrong" + + def _ready(): + return True, "Hurray!" + openstack.inform_peers_if_ready(_not_ready) + inform_peers_unit_state.assert_called_once_with('NOTREADY', 'cluster') + inform_peers_unit_state.reset_mock() + openstack.inform_peers_if_ready(_ready) + inform_peers_unit_state.assert_called_once_with('READY', 'cluster') + + @patch.object(openstack, 'is_expected_scale') + @patch.object(openstack, 'is_db_initialised') + @patch.object(openstack, 'is_db_ready') + @patch.object(openstack, 'is_unit_paused_set') + @patch.object(openstack, 'is_db_maintenance_mode') + def test_check_api_unit_ready(self, is_db_maintenance_mode, + is_unit_paused_set, is_db_ready, + is_db_initialised, is_expected_scale): + is_db_maintenance_mode.return_value = True + self.assertFalse(openstack.check_api_unit_ready()[0]) + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = True + self.assertFalse(openstack.check_api_unit_ready()[0]) + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = False + self.assertFalse(openstack.check_api_unit_ready()[0]) + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = True + is_db_initialised.return_value = False + self.assertFalse(openstack.check_api_unit_ready()[0]) + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = True + is_db_initialised.return_value = True + is_expected_scale.return_value = False + self.assertFalse(openstack.check_api_unit_ready()[0]) + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = True + is_db_initialised.return_value = True + is_expected_scale.return_value = True + self.assertTrue(openstack.check_api_unit_ready()[0]) + + @patch.object(openstack, 'is_expected_scale') + @patch.object(openstack, 'is_db_initialised') + @patch.object(openstack, 'is_db_ready') + @patch.object(openstack, 'is_unit_paused_set') + @patch.object(openstack, 'is_db_maintenance_mode') + def test_get_api_unit_status(self, is_db_maintenance_mode, + is_unit_paused_set, is_db_ready, + is_db_initialised, is_expected_scale): + is_db_maintenance_mode.return_value = True + self.assertEqual( + openstack.get_api_unit_status()[0].value, + 'maintenance') + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = True + self.assertEqual( + openstack.get_api_unit_status()[0].value, + 'blocked') + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = False + self.assertEqual( + openstack.get_api_unit_status()[0].value, + 'waiting') + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = True + is_db_initialised.return_value = False + self.assertEqual( + openstack.get_api_unit_status()[0].value, + 'waiting') + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = True + is_db_initialised.return_value = True + is_expected_scale.return_value = False + self.assertEqual( + openstack.get_api_unit_status()[0].value, + 'waiting') + + is_db_maintenance_mode.return_value = False + is_unit_paused_set.return_value = False + is_db_ready.return_value = True + is_db_initialised.return_value = True + is_expected_scale.return_value = True + self.assertEqual( + openstack.get_api_unit_status()[0].value, + 'active') + + @patch.object(openstack, 'get_api_unit_status') + def test_check_api_application_ready(self, get_api_unit_status): + get_api_unit_status.return_value = (WORKLOAD_STATES.ACTIVE, 'Hurray') + self.assertTrue(openstack.check_api_application_ready()[0]) + get_api_unit_status.return_value = (WORKLOAD_STATES.BLOCKED, ':-(') + self.assertFalse(openstack.check_api_application_ready()[0]) + + @patch.object(openstack, 'get_api_unit_status') + def test_get_api_application_status(self, get_api_unit_status): + get_api_unit_status.return_value = (WORKLOAD_STATES.ACTIVE, 'Hurray') + self.assertEqual( + openstack.get_api_application_status()[0].value, + 'active') + get_api_unit_status.return_value = (WORKLOAD_STATES.BLOCKED, ':-(') + self.assertEqual( + openstack.get_api_application_status()[0].value, + 'blocked') + + +if __name__ == '__main__': + unittest.main() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_contexts.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_contexts.py new file mode 100644 index 0000000000000000000000000000000000000000..6edd66a4061f5d6bb8cba47d3eb486870783135a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_contexts.py @@ -0,0 +1,4842 @@ +import collections +import copy +import json +import mock +import six +import unittest +import yaml + +from mock import ( + patch, + Mock, + MagicMock, + call +) + +from tests.helpers import patch_open + +import tests.utils + +import charmhelpers.contrib.openstack.context as context + + +if not six.PY3: + open_builtin = '__builtin__.open' +else: + open_builtin = 'builtins.open' + + +class FakeRelation(object): + + ''' + A fake relation class. Lets tests specify simple relation data + for a default relation + unit (foo:0, foo/0, set in setUp()), eg: + + rel = { + 'private-address': 'foo', + 'password': 'passwd', + } + relation = FakeRelation(rel) + self.relation_get.side_effect = relation.get + passwd = self.relation_get('password') + + or more complex relations meant to be addressed by explicit relation id + + unit id combos: + + rel = { + 'mysql:0': { + 'mysql/0': { + 'private-address': 'foo', + 'password': 'passwd', + } + } + } + relation = FakeRelation(rel) + self.relation_get.side_affect = relation.get + passwd = self.relation_get('password', rid='mysql:0', unit='mysql/0') + ''' + + def __init__(self, relation_data): + self.relation_data = relation_data + + def get(self, attribute=None, unit=None, rid=None): + if not rid or rid == 'foo:0': + if attribute is None: + return self.relation_data + elif attribute in self.relation_data: + return self.relation_data[attribute] + return None + else: + if rid not in self.relation_data: + return None + try: + relation = self.relation_data[rid][unit] + except KeyError: + return None + if attribute is None: + return relation + if attribute in relation: + return relation[attribute] + return None + + def relation_ids(self, relation): + rids = [] + for rid in sorted(self.relation_data.keys()): + if relation + ':' in rid: + rids.append(rid) + return rids + + def relation_units(self, relation_id): + if relation_id not in self.relation_data: + return None + return sorted(self.relation_data[relation_id].keys()) + + +SHARED_DB_RELATION = { + 'db_host': 'dbserver.local', + 'password': 'foo' +} + +SHARED_DB_RELATION_W_PORT = { + 'db_host': 'dbserver.local', + 'password': 'foo', + 'db_port': 3306, +} + +SHARED_DB_RELATION_ALT_RID = { + 'mysql-alt:0': { + 'mysql-alt/0': { + 'db_host': 'dbserver-alt.local', + 'password': 'flump'}}} + +SHARED_DB_RELATION_SSL = { + 'db_host': 'dbserver.local', + 'password': 'foo', + 'ssl_ca': 'Zm9vCg==', + 'ssl_cert': 'YmFyCg==', + 'ssl_key': 'Zm9vYmFyCg==', +} + +SHARED_DB_CONFIG = { + 'database-user': 'adam', + 'database': 'foodb', +} + +SHARED_DB_RELATION_NAMESPACED = { + 'db_host': 'bar', + 'quantum_password': 'bar2' +} + +SHARED_DB_RELATION_ACCESS_NETWORK = { + 'db_host': 'dbserver.local', + 'password': 'foo', + 'access-network': '10.5.5.0/24', + 'hostname': 'bar', +} + + +IDENTITY_SERVICE_RELATION_HTTP = { + 'service_port': '5000', + 'service_host': 'keystonehost.local', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'service_domain': 'admin_domain', + 'service_tenant': 'admin', + 'service_tenant_id': '123456', + 'service_password': 'foo', + 'service_username': 'adam', + 'service_protocol': 'http', + 'auth_protocol': 'http', +} + +IDENTITY_SERVICE_RELATION_UNSET = { + 'service_port': '5000', + 'service_host': 'keystonehost.local', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'service_domain': 'admin_domain', + 'service_tenant': 'admin', + 'service_password': 'foo', + 'service_username': 'adam', +} + +IDENTITY_CREDENTIALS_RELATION_UNSET = { + 'credentials_port': '5000', + 'credentials_host': 'keystonehost.local', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'https', + 'domain': 'admin_domain', + 'credentials_project': 'admin', + 'credentials_project_id': '123456', + 'credentials_password': 'foo', + 'credentials_username': 'adam', + 'credentials_protocol': 'https', +} + + +APIIDENTITY_SERVICE_RELATION_UNSET = { + 'neutron-plugin-api:0': { + 'neutron-api/0': { + 'service_port': '5000', + 'service_host': 'keystonehost.local', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'service_domain': 'admin_domain', + 'service_tenant': 'admin', + 'service_password': 'foo', + 'service_username': 'adam', + } + } +} + +IDENTITY_SERVICE_RELATION_HTTPS = { + 'service_port': '5000', + 'service_host': 'keystonehost.local', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'service_domain': 'admin_domain', + 'service_tenant': 'admin', + 'service_password': 'foo', + 'service_username': 'adam', + 'service_protocol': 'https', + 'auth_protocol': 'https', +} + +IDENTITY_SERVICE_RELATION_VERSIONED = { + 'api_version': '3', + 'service_tenant_id': 'svc-proj-id', + 'service_domain_id': 'svc-dom-id', +} +IDENTITY_SERVICE_RELATION_VERSIONED.update(IDENTITY_SERVICE_RELATION_HTTPS) + +IDENTITY_CREDENTIALS_RELATION_VERSIONED = { + 'api_version': '3', + 'service_tenant_id': 'svc-proj-id', + 'service_domain_id': 'svc-dom-id', +} +IDENTITY_CREDENTIALS_RELATION_VERSIONED.update(IDENTITY_CREDENTIALS_RELATION_UNSET) + +POSTGRESQL_DB_RELATION = { + 'host': 'dbserver.local', + 'user': 'adam', + 'password': 'foo', +} + +POSTGRESQL_DB_CONFIG = { + 'database': 'foodb', +} + +IDENTITY_SERVICE_RELATION = { + 'service_port': '5000', + 'service_host': 'keystonehost.local', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'service_domain': 'admin_domain', + 'service_tenant': 'admin', + 'service_password': 'foo', + 'service_username': 'adam', +} + +AMQP_RELATION = { + 'private-address': 'rabbithost', + 'password': 'foobar', + 'vip': '10.0.0.1', +} + +AMQP_RELATION_ALT_RID = { + 'amqp-alt:0': { + 'rabbitmq-alt/0': { + 'private-address': 'rabbitalthost1', + 'password': 'flump', + }, + } +} + +AMQP_RELATION_WITH_SSL = { + 'private-address': 'rabbithost', + 'password': 'foobar', + 'vip': '10.0.0.1', + 'ssl_port': 5671, + 'ssl_ca': 'cert', + 'ha_queues': 'queues', +} + +AMQP_AA_RELATION = { + 'amqp:0': { + 'rabbitmq/0': { + 'private-address': 'rabbithost1', + 'password': 'foobar', + }, + 'rabbitmq/1': { + 'private-address': 'rabbithost2', + 'password': 'foobar', + }, + 'rabbitmq/2': { # Should be ignored because password is missing. + 'private-address': 'rabbithost3', + } + } +} + +AMQP_CONFIG = { + 'rabbit-user': 'adam', + 'rabbit-vhost': 'foo', +} + +AMQP_OSLO_CONFIG = { + 'oslo-messaging-flags': ("rabbit_max_retries=1" + ",rabbit_retry_backoff=1" + ",rabbit_retry_interval=1"), + 'oslo-messaging-driver': 'log' +} + +AMQP_NOTIFICATION_FORMAT = { + 'notification-format': 'both' +} + +AMQP_NOTIFICATION_TOPICS = { + 'notification-topics': 'foo,bar' +} + +AMQP_NOTIFICATIONS_LOGS = { + 'send-notifications-to-logs': True +} + +AMQP_NOVA_CONFIG = { + 'nova-rabbit-user': 'adam', + 'nova-rabbit-vhost': 'foo', +} + +HAPROXY_CONFIG = { + 'haproxy-server-timeout': 50000, + 'haproxy-client-timeout': 50000, +} + +CEPH_RELATION = { + 'ceph:0': { + 'ceph/0': { + 'private-address': 'ceph_node1', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true' + }, + 'ceph/1': { + 'private-address': 'ceph_node2', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'false' + }, + } +} + +CEPH_RELATION_WITH_PUBLIC_ADDR = { + 'ceph:0': { + 'ceph/0': { + 'ceph-public-address': '192.168.1.10', + 'private-address': 'ceph_node1', + 'auth': 'foo', + 'key': 'bar' + }, + 'ceph/1': { + 'ceph-public-address': '192.168.1.11', + 'private-address': 'ceph_node2', + 'auth': 'foo', + 'key': 'bar' + }, + } +} + +CEPH_REL_WITH_PUBLIC_ADDR_PORT = { + 'ceph:0': { + 'ceph/0': { + 'ceph-public-address': '192.168.1.10:1234', + 'private-address': 'ceph_node1', + 'auth': 'foo', + 'key': 'bar' + }, + 'ceph/1': { + 'ceph-public-address': '192.168.1.11:4321', + 'private-address': 'ceph_node2', + 'auth': 'foo', + 'key': 'bar' + }, + } +} + +CEPH_REL_WITH_PUBLIC_IPv6_ADDR = { + 'ceph:0': { + 'ceph/0': { + 'ceph-public-address': '2001:5c0:9168::1', + 'private-address': 'ceph_node1', + 'auth': 'foo', + 'key': 'bar' + }, + 'ceph/1': { + 'ceph-public-address': '2001:5c0:9168::2', + 'private-address': 'ceph_node2', + 'auth': 'foo', + 'key': 'bar' + }, + } +} + +CEPH_REL_WITH_PUBLIC_IPv6_ADDR_PORT = { + 'ceph:0': { + 'ceph/0': { + 'ceph-public-address': '[2001:5c0:9168::1]:1234', + 'private-address': 'ceph_node1', + 'auth': 'foo', + 'key': 'bar' + }, + 'ceph/1': { + 'ceph-public-address': '[2001:5c0:9168::2]:4321', + 'private-address': 'ceph_node2', + 'auth': 'foo', + 'key': 'bar' + }, + } +} + +CEPH_REL_WITH_MULTI_PUBLIC_ADDR = { + 'ceph:0': { + 'ceph/0': { + 'ceph-public-address': '192.168.1.10 192.168.1.20', + 'private-address': 'ceph_node1', + 'auth': 'foo', + 'key': 'bar' + }, + 'ceph/1': { + 'ceph-public-address': '192.168.1.11 192.168.1.21', + 'private-address': 'ceph_node2', + 'auth': 'foo', + 'key': 'bar' + }, + } +} + +CEPH_REL_WITH_DEFAULT_FEATURES = { + 'ceph:0': { + 'ceph/0': { + 'private-address': 'ceph_node1', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + 'rbd-features': '1' + }, + 'ceph/1': { + 'private-address': 'ceph_node2', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'false', + 'rbd-features': '1' + }, + } +} + + +IDENTITY_RELATION_NO_CERT = { + 'identity-service:0': { + 'keystone/0': { + 'private-address': 'keystone1', + }, + } +} + +IDENTITY_RELATION_SINGLE_CERT = { + 'identity-service:0': { + 'keystone/0': { + 'private-address': 'keystone1', + 'ssl_cert_cinderhost1': 'certa', + 'ssl_key_cinderhost1': 'keya', + }, + } +} + +IDENTITY_RELATION_MULTIPLE_CERT = { + 'identity-service:0': { + 'keystone/0': { + 'private-address': 'keystone1', + 'ssl_cert_cinderhost1-int-network': 'certa', + 'ssl_key_cinderhost1-int-network': 'keya', + 'ssl_cert_cinderhost1-pub-network': 'certa', + 'ssl_key_cinderhost1-pub-network': 'keya', + 'ssl_cert_cinderhost1-adm-network': 'certa', + 'ssl_key_cinderhost1-adm-network': 'keya', + }, + } +} + +QUANTUM_NETWORK_SERVICE_RELATION = { + 'quantum-network-service:0': { + 'unit/0': { + 'keystone_host': '10.5.0.1', + 'service_port': '5000', + 'auth_port': '20000', + 'service_tenant': 'tenant', + 'service_username': 'username', + 'service_password': 'password', + 'quantum_host': '10.5.0.2', + 'quantum_port': '9696', + 'quantum_url': 'http://10.5.0.2:9696/v2', + 'region': 'aregion' + }, + } +} + +QUANTUM_NETWORK_SERVICE_RELATION_VERSIONED = { + 'quantum-network-service:0': { + 'unit/0': { + 'keystone_host': '10.5.0.1', + 'service_port': '5000', + 'auth_port': '20000', + 'service_tenant': 'tenant', + 'service_username': 'username', + 'service_password': 'password', + 'quantum_host': '10.5.0.2', + 'quantum_port': '9696', + 'quantum_url': 'http://10.5.0.2:9696/v2', + 'region': 'aregion', + 'api_version': '3', + }, + } +} + +SUB_CONFIG = """ +nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [nova-key1, value1] + - [nova-key2, value2] +glance: + /etc/glance/glance.conf: + sections: + DEFAULT: + - [glance-key1, value1] + - [glance-key2, value2] +""" + +NOVA_SUB_CONFIG1 = """ +nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [nova-key1, value1] + - [nova-key2, value2] +""" + + +NOVA_SUB_CONFIG2 = """ +nova-compute: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [nova-key3, value3] + - [nova-key4, value4] +""" + +NOVA_SUB_CONFIG3 = """ +nova-compute: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [nova-key5, value5] + - [nova-key6, value6] +""" + +CINDER_SUB_CONFIG1 = """ +cinder: + /etc/cinder/cinder.conf: + sections: + cinder-1-section: + - [key1, value1] +""" + +CINDER_SUB_CONFIG2 = """ +cinder: + /etc/cinder/cinder.conf: + sections: + cinder-2-section: + - [key2, value2] + not-a-section: + 1234 +""" + +SUB_CONFIG_RELATION = { + 'nova-subordinate:0': { + 'nova-subordinate/0': { + 'private-address': 'nova_node1', + 'subordinate_configuration': json.dumps(yaml.safe_load(SUB_CONFIG)), + }, + }, + 'glance-subordinate:0': { + 'glance-subordinate/0': { + 'private-address': 'glance_node1', + 'subordinate_configuration': json.dumps(yaml.safe_load(SUB_CONFIG)), + }, + }, + 'foo-subordinate:0': { + 'foo-subordinate/0': { + 'private-address': 'foo_node1', + 'subordinate_configuration': 'ea8e09324jkadsfh', + }, + }, + 'cinder-subordinate:0': { + 'cinder-subordinate/0': { + 'private-address': 'cinder_node1', + 'subordinate_configuration': json.dumps( + yaml.safe_load(CINDER_SUB_CONFIG1)), + }, + }, + 'cinder-subordinate:1': { + 'cinder-subordinate/1': { + 'private-address': 'cinder_node1', + 'subordinate_configuration': json.dumps( + yaml.safe_load(CINDER_SUB_CONFIG2)), + }, + }, +} + +SUB_CONFIG_RELATION2 = { + 'nova-ceilometer:6': { + 'ceilometer-agent/0': { + 'private-address': 'nova_node1', + 'subordinate_configuration': json.dumps( + yaml.safe_load(NOVA_SUB_CONFIG1)), + }, + }, + 'neutron-plugin:3': { + 'neutron-ovs-plugin/0': { + 'private-address': 'nova_node1', + 'subordinate_configuration': json.dumps( + yaml.safe_load(NOVA_SUB_CONFIG2)), + }, + }, + 'neutron-plugin:4': { + 'neutron-other-plugin/0': { + 'private-address': 'nova_node1', + 'subordinate_configuration': json.dumps( + yaml.safe_load(NOVA_SUB_CONFIG3)), + }, + } +} + +NONET_CONFIG = { + 'vip': 'cinderhost1vip', + 'os-internal-network': None, + 'os-admin-network': None, + 'os-public-network': None +} + +FULLNET_CONFIG = { + 'vip': '10.5.1.1 10.5.2.1 10.5.3.1', + 'os-internal-network': "10.5.1.0/24", + 'os-admin-network': "10.5.2.0/24", + 'os-public-network': "10.5.3.0/24" +} + +MACHINE_MACS = { + 'eth0': 'fe:c5:ce:8e:2b:00', + 'eth1': 'fe:c5:ce:8e:2b:01', + 'eth2': 'fe:c5:ce:8e:2b:02', + 'eth3': 'fe:c5:ce:8e:2b:03', +} + +MACHINE_NICS = { + 'eth0': ['192.168.0.1'], + 'eth1': ['192.168.0.2'], + 'eth2': [], + 'eth3': [], +} + +ABSENT_MACS = "aa:a5:ae:ae:ab:a4 " + +# Imported in contexts.py and needs patching in setUp() +TO_PATCH = [ + 'b64decode', + 'check_call', + 'get_cert', + 'get_ca_cert', + 'install_ca_cert', + 'log', + 'config', + 'relation_get', + 'relation_ids', + 'related_units', + 'is_relation_made', + 'relation_set', + 'unit_get', + 'https', + 'determine_api_port', + 'determine_apache_port', + 'is_clustered', + 'time', + 'https', + 'get_address_in_network', + 'get_netmask_for_address', + 'local_unit', + 'get_ipv6_addr', + 'mkdir', + 'write_file', + 'get_relation_ip', + 'charm_name', + 'sysctl_create', + 'kv', + 'pwgen', + 'lsb_release', + 'is_container', + 'network_get_primary_address', + 'resolve_address', + 'is_ipv6_disabled', +] + + +class fake_config(object): + + def __init__(self, data): + self.data = data + + def __call__(self, attr): + if attr in self.data: + return self.data[attr] + return None + + +class fake_is_relation_made(): + def __init__(self, relations): + self.relations = relations + + def rel_made(self, relation): + return self.relations[relation] + + +class TestDB(object): + '''Test KV store for unitdata testing''' + def __init__(self): + self.data = {} + self.flushed = False + + def get(self, key, default=None): + return self.data.get(key, default) + + def set(self, key, value): + self.data[key] = value + return value + + def flush(self): + self.flushed = True + + +class ContextTests(unittest.TestCase): + + def setUp(self): + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + # mock at least a single relation + unit + self.relation_ids.return_value = ['foo:0'] + self.related_units.return_value = ['foo/0'] + self.local_unit.return_value = 'localunit' + self.kv.side_effect = TestDB + self.pwgen.return_value = 'testpassword' + self.lsb_release.return_value = {'DISTRIB_RELEASE': '16.04'} + self.is_container.return_value = False + self.network_get_primary_address.side_effect = NotImplementedError() + self.resolve_address.return_value = '10.5.1.50' + self.maxDiff = None + + def _patch(self, method): + _m = patch('charmhelpers.contrib.openstack.context.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def test_base_class_not_implemented(self): + base = context.OSContextGenerator() + self.assertRaises(NotImplementedError, base) + + @patch.object(context, 'get_os_codename_install_source') + def test_shared_db_context_with_data(self, os_codename): + '''Test shared-db context with all required data''' + os_codename.return_value = 'queens' + relation = FakeRelation(relation_data=SHARED_DB_RELATION) + self.relation_get.side_effect = relation.get + self.get_address_in_network.return_value = '' + self.config.side_effect = fake_config(SHARED_DB_CONFIG) + shared_db = context.SharedDBContext() + result = shared_db() + expected = { + 'database_host': 'dbserver.local', + 'database': 'foodb', + 'database_user': 'adam', + 'database_password': 'foo', + 'database_type': 'mysql+pymysql', + } + self.assertEquals(result, expected) + + def test_shared_db_context_with_data_and_access_net_mismatch(self): + """Mismatch between hostname and hostname for access net - defers + execution""" + relation = FakeRelation( + relation_data=SHARED_DB_RELATION_ACCESS_NETWORK) + self.relation_get.side_effect = relation.get + self.get_address_in_network.return_value = '10.5.5.1' + self.config.side_effect = fake_config(SHARED_DB_CONFIG) + shared_db = context.SharedDBContext() + result = shared_db() + self.assertEquals(result, None) + self.relation_set.assert_called_with( + relation_settings={ + 'hostname': '10.5.5.1'}) + + @patch.object(context, 'get_os_codename_install_source') + def test_shared_db_context_with_data_and_access_net_match(self, + os_codename): + """Correctly set hostname for access net returns complete context""" + os_codename.return_value = 'queens' + relation = FakeRelation( + relation_data=SHARED_DB_RELATION_ACCESS_NETWORK) + self.relation_get.side_effect = relation.get + self.get_address_in_network.return_value = 'bar' + self.config.side_effect = fake_config(SHARED_DB_CONFIG) + shared_db = context.SharedDBContext() + result = shared_db() + expected = { + 'database_host': 'dbserver.local', + 'database': 'foodb', + 'database_user': 'adam', + 'database_password': 'foo', + 'database_type': 'mysql+pymysql', + } + self.assertEquals(result, expected) + + @patch.object(context, 'get_os_codename_install_source') + def test_shared_db_context_explicit_relation_id(self, os_codename): + '''Test shared-db context setting the relation_id''' + os_codename.return_value = 'queens' + relation = FakeRelation(relation_data=SHARED_DB_RELATION_ALT_RID) + self.related_units.return_value = ['mysql-alt/0'] + self.relation_get.side_effect = relation.get + self.get_address_in_network.return_value = '' + self.config.side_effect = fake_config(SHARED_DB_CONFIG) + shared_db = context.SharedDBContext(relation_id='mysql-alt:0') + result = shared_db() + expected = { + 'database_host': 'dbserver-alt.local', + 'database': 'foodb', + 'database_user': 'adam', + 'database_password': 'flump', + 'database_type': 'mysql+pymysql', + } + self.assertEquals(result, expected) + + @patch.object(context, 'get_os_codename_install_source') + def test_shared_db_context_with_port(self, os_codename): + '''Test shared-db context with all required data''' + os_codename.return_value = 'queens' + relation = FakeRelation(relation_data=SHARED_DB_RELATION_W_PORT) + self.relation_get.side_effect = relation.get + self.get_address_in_network.return_value = '' + self.config.side_effect = fake_config(SHARED_DB_CONFIG) + shared_db = context.SharedDBContext() + result = shared_db() + expected = { + 'database_host': 'dbserver.local', + 'database': 'foodb', + 'database_user': 'adam', + 'database_password': 'foo', + 'database_type': 'mysql+pymysql', + 'database_port': 3306, + } + self.assertEquals(result, expected) + + @patch('os.path.exists') + @patch(open_builtin) + def test_db_ssl(self, _open, osexists): + osexists.return_value = False + ssl_dir = '/etc/dbssl' + db_ssl_ctxt = context.db_ssl(SHARED_DB_RELATION_SSL, {}, ssl_dir) + expected = { + 'database_ssl_ca': ssl_dir + '/db-client.ca', + 'database_ssl_cert': ssl_dir + '/db-client.cert', + 'database_ssl_key': ssl_dir + '/db-client.key', + } + files = [ + call(expected['database_ssl_ca'], 'wb'), + call(expected['database_ssl_cert'], 'wb'), + call(expected['database_ssl_key'], 'wb') + ] + for f in files: + self.assertIn(f, _open.call_args_list) + self.assertEquals(db_ssl_ctxt, expected) + decode = [ + call(SHARED_DB_RELATION_SSL['ssl_ca']), + call(SHARED_DB_RELATION_SSL['ssl_cert']), + call(SHARED_DB_RELATION_SSL['ssl_key']) + ] + self.assertEquals(decode, self.b64decode.call_args_list) + + def test_db_ssl_nossldir(self): + db_ssl_ctxt = context.db_ssl(SHARED_DB_RELATION_SSL, {}, None) + self.assertEquals(db_ssl_ctxt, {}) + + @patch.object(context, 'get_os_codename_install_source') + def test_shared_db_context_with_missing_relation(self, os_codename): + '''Test shared-db context missing relation data''' + os_codename.return_value = 'stein' + incomplete_relation = copy.copy(SHARED_DB_RELATION) + incomplete_relation['password'] = None + relation = FakeRelation(relation_data=incomplete_relation) + self.relation_get.side_effect = relation.get + self.config.return_value = SHARED_DB_CONFIG + shared_db = context.SharedDBContext() + result = shared_db() + self.assertEquals(result, {}) + + def test_shared_db_context_with_missing_config(self): + '''Test shared-db context missing relation data''' + incomplete_config = copy.copy(SHARED_DB_CONFIG) + del incomplete_config['database-user'] + self.config.side_effect = fake_config(incomplete_config) + relation = FakeRelation(relation_data=SHARED_DB_RELATION) + self.relation_get.side_effect = relation.get + self.config.return_value = incomplete_config + shared_db = context.SharedDBContext() + self.assertRaises(context.OSContextError, shared_db) + + @patch.object(context, 'get_os_codename_install_source') + def test_shared_db_context_with_params(self, os_codename): + '''Test shared-db context with object parameters''' + os_codename.return_value = 'stein' + shared_db = context.SharedDBContext( + database='quantum', user='quantum', relation_prefix='quantum') + relation = FakeRelation(relation_data=SHARED_DB_RELATION_NAMESPACED) + self.relation_get.side_effect = relation.get + result = shared_db() + self.assertIn( + call(rid='foo:0', unit='foo/0'), + self.relation_get.call_args_list) + self.assertEquals( + result, {'database': 'quantum', + 'database_user': 'quantum', + 'database_password': 'bar2', + 'database_host': 'bar', + 'database_type': 'mysql+pymysql'}) + + @patch.object(context, 'get_os_codename_install_source') + def test_shared_db_context_with_params_pike(self, os_codename): + '''Test shared-db context with object parameters''' + os_codename.return_value = 'pike' + shared_db = context.SharedDBContext( + database='quantum', user='quantum', relation_prefix='quantum') + relation = FakeRelation(relation_data=SHARED_DB_RELATION_NAMESPACED) + self.relation_get.side_effect = relation.get + result = shared_db() + self.assertIn( + call(rid='foo:0', unit='foo/0'), + self.relation_get.call_args_list) + self.assertEquals( + result, {'database': 'quantum', + 'database_user': 'quantum', + 'database_password': 'bar2', + 'database_host': 'bar', + 'database_type': 'mysql'}) + + @patch.object(context, 'get_os_codename_install_source') + @patch('charmhelpers.contrib.openstack.context.format_ipv6_addr') + def test_shared_db_context_with_ipv6(self, format_ipv6_addr, os_codename): + '''Test shared-db context with ipv6''' + shared_db = context.SharedDBContext( + database='quantum', user='quantum', relation_prefix='quantum') + os_codename.return_value = 'stein' + relation = FakeRelation(relation_data=SHARED_DB_RELATION_NAMESPACED) + self.relation_get.side_effect = relation.get + format_ipv6_addr.return_value = '[2001:db8:1::1]' + result = shared_db() + self.assertIn( + call(rid='foo:0', unit='foo/0'), + self.relation_get.call_args_list) + self.assertEquals( + result, {'database': 'quantum', + 'database_user': 'quantum', + 'database_password': 'bar2', + 'database_host': '[2001:db8:1::1]', + 'database_type': 'mysql+pymysql'}) + + def test_postgresql_db_context_with_data(self): + '''Test postgresql-db context with all required data''' + relation = FakeRelation(relation_data=POSTGRESQL_DB_RELATION) + self.relation_get.side_effect = relation.get + self.config.side_effect = fake_config(POSTGRESQL_DB_CONFIG) + postgresql_db = context.PostgresqlDBContext() + result = postgresql_db() + expected = { + 'database_host': 'dbserver.local', + 'database': 'foodb', + 'database_user': 'adam', + 'database_password': 'foo', + 'database_type': 'postgresql', + } + self.assertEquals(result, expected) + + def test_postgresql_db_context_with_missing_relation(self): + '''Test postgresql-db context missing relation data''' + incomplete_relation = copy.copy(POSTGRESQL_DB_RELATION) + incomplete_relation['password'] = None + relation = FakeRelation(relation_data=incomplete_relation) + self.relation_get.side_effect = relation.get + self.config.return_value = POSTGRESQL_DB_CONFIG + postgresql_db = context.PostgresqlDBContext() + result = postgresql_db() + self.assertEquals(result, {}) + + def test_postgresql_db_context_with_missing_config(self): + '''Test postgresql-db context missing relation data''' + incomplete_config = copy.copy(POSTGRESQL_DB_CONFIG) + del incomplete_config['database'] + self.config.side_effect = fake_config(incomplete_config) + relation = FakeRelation(relation_data=POSTGRESQL_DB_RELATION) + self.relation_get.side_effect = relation.get + self.config.return_value = incomplete_config + postgresql_db = context.PostgresqlDBContext() + self.assertRaises(context.OSContextError, postgresql_db) + + def test_postgresql_db_context_with_params(self): + '''Test postgresql-db context with object parameters''' + postgresql_db = context.PostgresqlDBContext(database='quantum') + result = postgresql_db() + self.assertEquals(result['database'], 'quantum') + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + def test_identity_service_context_with_data(self, *args): + '''Test shared-db context with all required data''' + relation = FakeRelation(relation_data=IDENTITY_SERVICE_RELATION_UNSET) + self.relation_get.side_effect = relation.get + identity_service = context.IdentityServiceContext() + result = identity_service() + expected = { + 'admin_password': 'foo', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': None, + 'admin_domain_id': None, + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'http', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'http', + 'api_version': '2.0', + } + result.pop('keystone_authtoken') + self.assertEquals(result, expected) + + def test_identity_credentials_context_with_data(self): + '''Test identity-credentials context with all required data''' + relation = FakeRelation(relation_data=IDENTITY_CREDENTIALS_RELATION_UNSET) + self.relation_get.side_effect = relation.get + identity_credentials = context.IdentityCredentialsContext() + result = identity_credentials() + expected = { + 'admin_password': 'foo', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': '123456', + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'https', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'https', + 'api_version': '2.0', + } + self.assertEquals(result, expected) + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + def test_identity_service_context_with_altname(self, *args): + '''Test identity context when using an explicit relation name''' + relation = FakeRelation( + relation_data=APIIDENTITY_SERVICE_RELATION_UNSET + ) + self.relation_get.side_effect = relation.get + self.relation_ids.return_value = ['neutron-plugin-api:0'] + self.related_units.return_value = ['neutron-api/0'] + identity_service = context.IdentityServiceContext( + rel_name='neutron-plugin-api' + ) + result = identity_service() + expected = { + 'admin_password': 'foo', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': None, + 'admin_domain_id': None, + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'http', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'http', + 'api_version': '2.0', + } + result.pop('keystone_authtoken') + self.assertEquals(result, expected) + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + def test_identity_service_context_with_cache(self, *args): + '''Test shared-db context with signing cache info''' + relation = FakeRelation(relation_data=IDENTITY_SERVICE_RELATION_UNSET) + self.relation_get.side_effect = relation.get + svc = 'cinder' + identity_service = context.IdentityServiceContext(service=svc, + service_user=svc) + result = identity_service() + expected = { + 'admin_password': 'foo', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': None, + 'admin_domain_id': None, + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'http', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'http', + 'signing_dir': '/var/cache/cinder', + 'api_version': '2.0', + } + self.assertTrue(self.mkdir.called) + result.pop('keystone_authtoken') + self.assertEquals(result, expected) + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + def test_identity_service_context_with_data_http(self, *args): + '''Test shared-db context with all required data''' + relation = FakeRelation(relation_data=IDENTITY_SERVICE_RELATION_HTTP) + self.relation_get.side_effect = relation.get + identity_service = context.IdentityServiceContext() + result = identity_service() + expected = { + 'admin_password': 'foo', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': '123456', + 'admin_domain_id': None, + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'http', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'http', + 'api_version': '2.0', + } + result.pop('keystone_authtoken') + self.assertEquals(result, expected) + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + def test_identity_service_context_with_data_https(self, *args): + '''Test shared-db context with all required data''' + relation = FakeRelation(relation_data=IDENTITY_SERVICE_RELATION_HTTPS) + self.relation_get.side_effect = relation.get + identity_service = context.IdentityServiceContext() + result = identity_service() + expected = { + 'admin_password': 'foo', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': None, + 'admin_domain_id': None, + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'https', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'https', + 'api_version': '2.0', + } + result.pop('keystone_authtoken') + self.assertEquals(result, expected) + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + def test_identity_service_context_with_data_versioned(self, *args): + '''Test shared-db context with api version supplied from keystone''' + relation = FakeRelation( + relation_data=IDENTITY_SERVICE_RELATION_VERSIONED) + self.relation_get.side_effect = relation.get + identity_service = context.IdentityServiceContext() + result = identity_service() + expected = { + 'admin_password': 'foo', + 'admin_domain_name': 'admin_domain', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': 'svc-proj-id', + 'admin_domain_id': 'svc-dom-id', + 'service_project_id': 'svc-proj-id', + 'service_domain_id': 'svc-dom-id', + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'https', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'https', + 'api_version': '3', + } + result.pop('keystone_authtoken') + self.assertEquals(result, expected) + + def test_identity_credentials_context_with_data_versioned(self): + '''Test identity-credentials context with api version supplied from keystone''' + relation = FakeRelation( + relation_data=IDENTITY_CREDENTIALS_RELATION_VERSIONED) + self.relation_get.side_effect = relation.get + identity_credentials = context.IdentityCredentialsContext() + result = identity_credentials() + expected = { + 'admin_password': 'foo', + 'admin_domain_name': 'admin_domain', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': '123456', + 'admin_user': 'adam', + 'auth_host': 'keystone-host.local', + 'auth_port': '35357', + 'auth_protocol': 'https', + 'service_host': 'keystonehost.local', + 'service_port': '5000', + 'service_protocol': 'https', + 'api_version': '3', + } + self.assertEquals(result, expected) + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + @patch('charmhelpers.contrib.openstack.context.format_ipv6_addr') + def test_identity_service_context_with_ipv6(self, format_ipv6_addr, *args): + '''Test identity-service context with ipv6''' + relation = FakeRelation(relation_data=IDENTITY_SERVICE_RELATION_HTTP) + self.relation_get.side_effect = relation.get + format_ipv6_addr.return_value = '[2001:db8:1::1]' + identity_service = context.IdentityServiceContext() + result = identity_service() + expected = { + 'admin_password': 'foo', + 'admin_tenant_name': 'admin', + 'admin_tenant_id': '123456', + 'admin_domain_id': None, + 'admin_user': 'adam', + 'auth_host': '[2001:db8:1::1]', + 'auth_port': '35357', + 'auth_protocol': 'http', + 'service_host': '[2001:db8:1::1]', + 'service_port': '5000', + 'service_protocol': 'http', + 'api_version': '2.0', + } + result.pop('keystone_authtoken') + self.assertEquals(result, expected) + + @patch.object(context, 'filter_installed_packages', return_value=[]) + @patch.object(context, 'os_release', return_value='rocky') + def test_identity_service_context_with_missing_relation(self, *args): + '''Test shared-db context missing relation data''' + incomplete_relation = copy.copy(IDENTITY_SERVICE_RELATION_UNSET) + incomplete_relation['service_password'] = None + relation = FakeRelation(relation_data=incomplete_relation) + self.relation_get.side_effect = relation.get + identity_service = context.IdentityServiceContext() + result = identity_service() + self.assertEquals(result, {}) + + @patch.object(context, 'filter_installed_packages') + @patch.object(context, 'os_release') + def test_keystone_authtoken_www_authenticate_uri_stein_apiv3(self, mock_os_release, mock_filter_installed_packages): + relation_data = copy.deepcopy(IDENTITY_SERVICE_RELATION_VERSIONED) + relation = FakeRelation(relation_data=relation_data) + self.relation_get.side_effect = relation.get + + mock_filter_installed_packages.return_value = [] + mock_os_release.return_value = 'stein' + + identity_service = context.IdentityServiceContext() + + cfg_ctx = identity_service() + + keystone_authtoken = cfg_ctx.get('keystone_authtoken', {}) + + expected = collections.OrderedDict(( + ('auth_type', 'password'), + ('www_authenticate_uri', 'https://keystonehost.local:5000/v3'), + ('auth_url', 'https://keystone-host.local:35357/v3'), + ('project_domain_name', 'admin_domain'), + ('user_domain_name', 'admin_domain'), + ('project_name', 'admin'), + ('username', 'adam'), + ('password', 'foo'), + ('signing_dir', ''), + )) + + self.assertEquals(keystone_authtoken, expected) + + def test_amqp_context_with_data(self): + '''Test amqp context with all required data''' + relation = FakeRelation(relation_data=AMQP_RELATION) + self.relation_get.side_effect = relation.get + self.config.return_value = AMQP_CONFIG + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'transport_url': 'rabbit://adam:foobar@rabbithost:5672/foo' + } + self.assertEquals(result, expected) + + def test_amqp_context_explicit_relation_id(self): + '''Test amqp context setting the relation_id''' + relation = FakeRelation(relation_data=AMQP_RELATION_ALT_RID) + self.relation_get.side_effect = relation.get + self.related_units.return_value = ['rabbitmq-alt/0'] + self.config.return_value = AMQP_CONFIG + amqp = context.AMQPContext(relation_id='amqp-alt:0') + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbitalthost1', + 'rabbitmq_password': 'flump', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'transport_url': 'rabbit://adam:flump@rabbitalthost1:5672/foo' + } + self.assertEquals(result, expected) + + def test_amqp_context_with_data_altname(self): + '''Test amqp context with alternative relation name''' + relation = FakeRelation(relation_data=AMQP_RELATION) + self.relation_get.side_effect = relation.get + self.config.return_value = AMQP_NOVA_CONFIG + amqp = context.AMQPContext( + rel_name='amqp-nova', + relation_prefix='nova') + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'transport_url': 'rabbit://adam:foobar@rabbithost:5672/foo' + } + self.assertEquals(result, expected) + + @patch(open_builtin) + def test_amqp_context_with_data_ssl(self, _open): + '''Test amqp context with all required data and ssl''' + relation = FakeRelation(relation_data=AMQP_RELATION_WITH_SSL) + self.relation_get.side_effect = relation.get + self.config.return_value = AMQP_CONFIG + ssl_dir = '/etc/sslamqp' + amqp = context.AMQPContext(ssl_dir=ssl_dir) + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbit_ssl_port': 5671, + 'rabbitmq_virtual_host': 'foo', + 'rabbit_ssl_ca': ssl_dir + '/rabbit-client-ca.pem', + 'rabbitmq_ha_queues': True, + 'transport_url': 'rabbit://adam:foobar@rabbithost:5671/foo' + } + _open.assert_called_once_with(ssl_dir + '/rabbit-client-ca.pem', 'wb') + self.assertEquals(result, expected) + self.assertEquals([call(AMQP_RELATION_WITH_SSL['ssl_ca'])], + self.b64decode.call_args_list) + + def test_amqp_context_with_data_ssl_noca(self): + '''Test amqp context with all required data with ssl but missing ca''' + relation = FakeRelation(relation_data=AMQP_RELATION_WITH_SSL) + self.relation_get.side_effect = relation.get + self.config.return_value = AMQP_CONFIG + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbit_ssl_port': 5671, + 'rabbitmq_virtual_host': 'foo', + 'rabbit_ssl_ca': 'cert', + 'rabbitmq_ha_queues': True, + 'transport_url': 'rabbit://adam:foobar@rabbithost:5671/foo' + } + self.assertEquals(result, expected) + + def test_amqp_context_with_data_clustered(self): + '''Test amqp context with all required data with clustered rabbit''' + relation_data = copy.copy(AMQP_RELATION) + relation_data['clustered'] = 'yes' + relation = FakeRelation(relation_data=relation_data) + self.relation_get.side_effect = relation.get + self.config.return_value = AMQP_CONFIG + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'clustered': True, + 'rabbitmq_host': relation_data['vip'], + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'transport_url': 'rabbit://adam:foobar@10.0.0.1:5672/foo' + } + self.assertEquals(result, expected) + + def test_amqp_context_with_data_active_active(self): + '''Test amqp context with required data with active/active rabbit''' + relation_data = copy.copy(AMQP_AA_RELATION) + relation = FakeRelation(relation_data=relation_data) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + self.config.return_value = AMQP_CONFIG + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost1', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'rabbitmq_hosts': 'rabbithost1,rabbithost2', + 'transport_url': ('rabbit://adam:foobar@rabbithost1:5672' + ',adam:foobar@rabbithost2:5672/foo') + } + self.assertEquals(result, expected) + + def test_amqp_context_with_missing_relation(self): + '''Test amqp context missing relation data''' + incomplete_relation = copy.copy(AMQP_RELATION) + incomplete_relation['password'] = '' + relation = FakeRelation(relation_data=incomplete_relation) + self.relation_get.side_effect = relation.get + self.config.return_value = AMQP_CONFIG + amqp = context.AMQPContext() + result = amqp() + self.assertEquals({}, result) + + def test_amqp_context_with_missing_config(self): + '''Test amqp context missing relation data''' + incomplete_config = copy.copy(AMQP_CONFIG) + del incomplete_config['rabbit-user'] + relation = FakeRelation(relation_data=AMQP_RELATION) + self.relation_get.side_effect = relation.get + self.config.return_value = incomplete_config + amqp = context.AMQPContext() + self.assertRaises(context.OSContextError, amqp) + + @patch('charmhelpers.contrib.openstack.context.format_ipv6_addr') + def test_amqp_context_with_ipv6(self, format_ipv6_addr): + '''Test amqp context with ipv6''' + relation_data = copy.copy(AMQP_AA_RELATION) + relation = FakeRelation(relation_data=relation_data) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + format_ipv6_addr.return_value = '[2001:db8:1::1]' + self.config.return_value = AMQP_CONFIG + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': '[2001:db8:1::1]', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'rabbitmq_hosts': '[2001:db8:1::1],[2001:db8:1::1]', + 'transport_url': ('rabbit://adam:foobar@[2001:db8:1::1]:5672' + ',adam:foobar@[2001:db8:1::1]:5672/foo') + } + self.assertEquals(result, expected) + + def test_amqp_context_with_oslo_messaging(self): + """Test amqp context with oslo-messaging-flags option""" + relation = FakeRelation(relation_data=AMQP_RELATION) + self.relation_get.side_effect = relation.get + AMQP_OSLO_CONFIG.update(AMQP_CONFIG) + self.config.return_value = AMQP_OSLO_CONFIG + amqp = context.AMQPContext() + result = amqp() + expected = { + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'oslo_messaging_flags': { + 'rabbit_max_retries': '1', + 'rabbit_retry_backoff': '1', + 'rabbit_retry_interval': '1' + }, + 'oslo_messaging_driver': 'log', + 'transport_url': 'rabbit://adam:foobar@rabbithost:5672/foo' + } + + self.assertEquals(result, expected) + + def test_amqp_context_with_notification_format(self): + """Test amqp context with notification_format option""" + relation = FakeRelation(relation_data=AMQP_RELATION) + self.relation_get.side_effect = relation.get + AMQP_NOTIFICATION_FORMAT.update(AMQP_CONFIG) + self.config.return_value = AMQP_NOTIFICATION_FORMAT + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'notification_format': 'both', + 'transport_url': 'rabbit://adam:foobar@rabbithost:5672/foo' + } + + self.assertEquals(result, expected) + + def test_amqp_context_with_notification_topics(self): + """Test amqp context with notification_topics option""" + relation = FakeRelation(relation_data=AMQP_RELATION) + self.relation_get.side_effect = relation.get + AMQP_NOTIFICATION_TOPICS.update(AMQP_CONFIG) + self.config.return_value = AMQP_NOTIFICATION_TOPICS + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'notification_topics': 'foo,bar', + 'transport_url': 'rabbit://adam:foobar@rabbithost:5672/foo' + } + + self.assertEquals(result, expected) + + def test_amqp_context_with_notifications_to_logs(self): + """Test amqp context with send_notifications_to_logs""" + relation = FakeRelation(relation_data=AMQP_RELATION) + self.relation_get.side_effect = relation.get + AMQP_NOTIFICATIONS_LOGS.update(AMQP_CONFIG) + self.config.return_value = AMQP_NOTIFICATIONS_LOGS + amqp = context.AMQPContext() + result = amqp() + expected = { + 'oslo_messaging_driver': 'messagingv2', + 'rabbitmq_host': 'rabbithost', + 'rabbitmq_password': 'foobar', + 'rabbitmq_user': 'adam', + 'rabbitmq_virtual_host': 'foo', + 'transport_url': 'rabbit://adam:foobar@rabbithost:5672/foo', + 'send_notifications_to_logs': True, + } + + self.assertEquals(result, expected) + + def test_libvirt_config_flags(self): + self.config.side_effect = fake_config({ + 'libvirt-flags': 'iscsi_use_multipath=True,chap_auth=False', + }) + + results = context.LibvirtConfigFlagsContext()() + self.assertEquals(results, { + 'libvirt_flags': { + 'chap_auth': 'False', + 'iscsi_use_multipath': 'True' + } + }) + + def test_ceph_no_relids(self): + '''Test empty ceph realtion''' + relation = FakeRelation(relation_data={}) + self.relation_ids.side_effect = relation.get + ceph = context.CephContext() + result = ceph() + self.assertEquals(result, {}) + + def test_ceph_rel_with_no_units(self): + '''Test ceph context with missing related units''' + relation = FakeRelation(relation_data={}) + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = [] + ceph = context.CephContext() + result = ceph() + self.assertEquals(result, {}) + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_data(self, ensure_packages, mkdir, isdir, + mock_config): + '''Test ceph context with all relation data''' + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + isdir.return_value = False + relation = FakeRelation(relation_data=CEPH_RELATION) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': 'ceph_node1 ceph_node2', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_missing_data(self, ensure_packages, mkdir): + '''Test ceph context with missing relation data''' + relation = copy.deepcopy(CEPH_RELATION) + for k, v in six.iteritems(relation): + for u in six.iterkeys(v): + del relation[k][u]['auth'] + relation = FakeRelation(relation_data=relation) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + self.assertEquals(result, {}) + self.assertFalse(ensure_packages.called) + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_partial_missing_data(self, ensure_packages, mkdir, + isdir, config): + '''Test ceph context last unit missing data + + Tests a fix to a previously bug which meant only the config from + last unit was returned so if a valid value was supplied from an + earlier unit it would be ignored''' + config.side_effect = fake_config({'use-syslog': 'True'}) + relation = copy.deepcopy(CEPH_RELATION) + for k, v in six.iteritems(relation): + last_unit = sorted(six.iterkeys(v))[-1] + unit_data = relation[k][last_unit] + del unit_data['auth'] + relation[k][last_unit] = unit_data + relation = FakeRelation(relation_data=relation) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': 'ceph_node1 ceph_node2', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_public_addr( + self, ensure_packages, mkdir, isdir, mock_config): + '''Test ceph context in host with multiple networks with all + relation data''' + isdir.return_value = False + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = FakeRelation(relation_data=CEPH_RELATION_WITH_PUBLIC_ADDR) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': '192.168.1.10 192.168.1.11', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_public_addr_and_port( + self, ensure_packages, mkdir, isdir, mock_config): + '''Test ceph context in host with multiple networks with all + relation data''' + isdir.return_value = False + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = FakeRelation(relation_data=CEPH_REL_WITH_PUBLIC_ADDR_PORT) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': '192.168.1.10:1234 192.168.1.11:4321', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_public_ipv6_addr(self, ensure_packages, mkdir, + isdir, mock_config): + '''Test ceph context in host with multiple networks with all + relation data''' + isdir.return_value = False + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = FakeRelation(relation_data=CEPH_REL_WITH_PUBLIC_IPv6_ADDR) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': '[2001:5c0:9168::1] [2001:5c0:9168::2]', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_public_ipv6_addr_port( + self, ensure_packages, mkdir, isdir, mock_config): + '''Test ceph context in host with multiple networks with all + relation data''' + isdir.return_value = False + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = FakeRelation( + relation_data=CEPH_REL_WITH_PUBLIC_IPv6_ADDR_PORT) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': '[2001:5c0:9168::1]:1234 [2001:5c0:9168::2]:4321', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_multi_public_addr( + self, ensure_packages, mkdir, isdir, mock_config): + '''Test ceph context in host with multiple networks with all + relation data''' + isdir.return_value = False + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = FakeRelation(relation_data=CEPH_REL_WITH_MULTI_PUBLIC_ADDR) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': '192.168.1.10 192.168.1.11 192.168.1.20 192.168.1.21', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_default_features( + self, ensure_packages, mkdir, isdir, mock_config): + '''Test ceph context in host with multiple networks with all + relation data''' + isdir.return_value = False + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = FakeRelation(relation_data=CEPH_REL_WITH_DEFAULT_FEATURES) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + result = ceph() + expected = { + 'mon_hosts': 'ceph_node1 ceph_node2', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + 'rbd_features': '1', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_with_rbd_cache(self, ensure_packages, mkdir, isdir, + mock_config): + isdir.return_value = False + config_dict = {'rbd-client-cache': 'enabled', + 'use-syslog': False} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = FakeRelation(relation_data=CEPH_RELATION_WITH_PUBLIC_ADDR) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + + class CephContextWithRBDCache(context.CephContext): + def __call__(self): + ctxt = super(CephContextWithRBDCache, self).__call__() + + rbd_cache = fake_config('rbd-client-cache') or "" + if rbd_cache.lower() == "enabled": + ctxt['rbd_client_cache_settings'] = \ + {'rbd cache': 'true', + 'rbd cache writethrough until flush': 'true'} + elif rbd_cache.lower() == "disabled": + ctxt['rbd_client_cache_settings'] = \ + {'rbd cache': 'false'} + + return ctxt + + ceph = CephContextWithRBDCache() + result = ceph() + expected = { + 'mon_hosts': '192.168.1.10 192.168.1.11', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'false', + } + expected['rbd_client_cache_settings'] = \ + {'rbd cache': 'true', + 'rbd cache writethrough until flush': 'true'} + + self.assertDictEqual(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch.object(context, 'config') + def test_sysctl_context_with_config(self, config): + self.charm_name.return_value = 'test-charm' + config.return_value = '{ kernel.max_pid: "1337"}' + self.sysctl_create.return_value = True + ctxt = context.SysctlContext() + result = ctxt() + self.sysctl_create.assert_called_with( + config.return_value, + "/etc/sysctl.d/50-test-charm.conf") + + self.assertTrue(result, {'sysctl': config.return_value}) + + @patch.object(context, 'config') + def test_sysctl_context_without_config(self, config): + self.charm_name.return_value = 'test-charm' + config.return_value = None + self.sysctl_create.return_value = True + ctxt = context.SysctlContext() + result = ctxt() + self.assertTrue(self.sysctl_create.called == 0) + self.assertTrue(result, {'sysctl': config.return_value}) + + @patch.object(context, 'config') + @patch('os.path.isdir') + @patch('os.mkdir') + @patch.object(context, 'ensure_packages') + def test_ceph_context_missing_public_addr( + self, ensure_packages, mkdir, isdir, mock_config): + '''Test ceph context in host with multiple networks with no + ceph-public-addr in relation data''' + isdir.return_value = False + config_dict = {'use-syslog': True} + + def fake_config(key): + return config_dict.get(key) + + mock_config.side_effect = fake_config + relation = copy.deepcopy(CEPH_RELATION_WITH_PUBLIC_ADDR) + del relation['ceph:0']['ceph/0']['ceph-public-address'] + relation = FakeRelation(relation_data=relation) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + ceph = context.CephContext() + + result = ceph() + expected = { + 'mon_hosts': '192.168.1.11 ceph_node1', + 'auth': 'foo', + 'key': 'bar', + 'use_syslog': 'true', + } + self.assertEquals(result, expected) + ensure_packages.assert_called_with(['ceph-common']) + mkdir.assert_called_with('/etc/ceph') + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_data(self, local_unit, unit_get): + '''Test haproxy context with all relation data''' + cluster_relation = { + 'cluster:0': { + 'peer/1': { + 'private-address': 'cluster-peer1.localnet', + }, + 'peer/2': { + 'private-address': 'cluster-peer2.localnet', + }, + }, + } + local_unit.return_value = 'peer/0' + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + self.get_relation_ip.side_effect = [None, None, None, + 'cluster-peer0.localnet'] + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + self.get_netmask_for_address.return_value = '255.255.0.0' + self.config.return_value = False + self.maxDiff = None + self.is_ipv6_disabled.return_value = True + haproxy = context.HAProxyContext() + with patch_open() as (_open, _file): + result = haproxy() + ex = { + 'frontends': { + 'cluster-peer0.localnet': { + 'network': 'cluster-peer0.localnet/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.localnet'), + ('peer-1', 'cluster-peer1.localnet'), + ('peer-2', 'cluster-peer2.localnet'), + ]), + }, + }, + 'default_backend': 'cluster-peer0.localnet', + 'local_host': '127.0.0.1', + 'haproxy_host': '0.0.0.0', + 'ipv6_enabled': False, + 'stat_password': 'testpassword', + 'stat_port': '8888', + } + # the context gets generated. + self.assertEquals(ex, result) + # and /etc/default/haproxy is updated. + self.assertEquals(_file.write.call_args_list, + [call('ENABLED=1\n')]) + self.get_relation_ip.assert_has_calls([call('admin', False), + call('internal', False), + call('public', False), + call('cluster')]) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_data_timeout(self, local_unit, unit_get): + '''Test haproxy context with all relation data and timeout''' + cluster_relation = { + 'cluster:0': { + 'peer/1': { + 'private-address': 'cluster-peer1.localnet', + }, + 'peer/2': { + 'private-address': 'cluster-peer2.localnet', + }, + }, + } + local_unit.return_value = 'peer/0' + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + self.get_relation_ip.side_effect = [None, None, None, + 'cluster-peer0.localnet'] + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + self.get_netmask_for_address.return_value = '255.255.0.0' + self.config.return_value = False + self.maxDiff = None + c = fake_config(HAPROXY_CONFIG) + c.data['prefer-ipv6'] = False + self.config.side_effect = c + self.is_ipv6_disabled.return_value = True + haproxy = context.HAProxyContext() + with patch_open() as (_open, _file): + result = haproxy() + ex = { + 'frontends': { + 'cluster-peer0.localnet': { + 'network': 'cluster-peer0.localnet/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.localnet'), + ('peer-1', 'cluster-peer1.localnet'), + ('peer-2', 'cluster-peer2.localnet'), + ]), + } + }, + 'default_backend': 'cluster-peer0.localnet', + 'local_host': '127.0.0.1', + 'haproxy_host': '0.0.0.0', + 'ipv6_enabled': False, + 'stat_password': 'testpassword', + 'stat_port': '8888', + 'haproxy_client_timeout': 50000, + 'haproxy_server_timeout': 50000, + } + # the context gets generated. + self.assertEquals(ex, result) + # and /etc/default/haproxy is updated. + self.assertEquals(_file.write.call_args_list, + [call('ENABLED=1\n')]) + self.get_relation_ip.assert_has_calls([call('admin', None), + call('internal', None), + call('public', None), + call('cluster')]) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_data_multinet(self, local_unit, unit_get): + '''Test haproxy context with all relation data for network splits''' + cluster_relation = { + 'cluster:0': { + 'peer/1': { + 'private-address': 'cluster-peer1.localnet', + 'admin-address': 'cluster-peer1.admin', + 'internal-address': 'cluster-peer1.internal', + 'public-address': 'cluster-peer1.public', + }, + 'peer/2': { + 'private-address': 'cluster-peer2.localnet', + 'admin-address': 'cluster-peer2.admin', + 'internal-address': 'cluster-peer2.internal', + 'public-address': 'cluster-peer2.public', + }, + }, + } + + local_unit.return_value = 'peer/0' + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + self.get_relation_ip.side_effect = ['cluster-peer0.admin', + 'cluster-peer0.internal', + 'cluster-peer0.public', + 'cluster-peer0.localnet'] + self.get_netmask_for_address.return_value = '255.255.0.0' + self.config.return_value = False + self.maxDiff = None + self.is_ipv6_disabled.return_value = True + haproxy = context.HAProxyContext() + with patch_open() as (_open, _file): + result = haproxy() + ex = { + 'frontends': { + 'cluster-peer0.admin': { + 'network': 'cluster-peer0.admin/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.admin'), + ('peer-1', 'cluster-peer1.admin'), + ('peer-2', 'cluster-peer2.admin'), + ]), + }, + 'cluster-peer0.internal': { + 'network': 'cluster-peer0.internal/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.internal'), + ('peer-1', 'cluster-peer1.internal'), + ('peer-2', 'cluster-peer2.internal'), + ]), + }, + 'cluster-peer0.public': { + 'network': 'cluster-peer0.public/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.public'), + ('peer-1', 'cluster-peer1.public'), + ('peer-2', 'cluster-peer2.public'), + ]), + }, + 'cluster-peer0.localnet': { + 'network': 'cluster-peer0.localnet/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.localnet'), + ('peer-1', 'cluster-peer1.localnet'), + ('peer-2', 'cluster-peer2.localnet'), + ]), + } + }, + 'default_backend': 'cluster-peer0.localnet', + 'local_host': '127.0.0.1', + 'haproxy_host': '0.0.0.0', + 'ipv6_enabled': False, + 'stat_password': 'testpassword', + 'stat_port': '8888', + } + # the context gets generated. + self.assertEquals(ex, result) + # and /etc/default/haproxy is updated. + self.assertEquals(_file.write.call_args_list, + [call('ENABLED=1\n')]) + self.get_relation_ip.assert_has_calls([call('admin', False), + call('internal', False), + call('public', False), + call('cluster')]) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_data_public_only(self, local_unit, unit_get): + '''Test haproxy context with with openstack-dashboard public only binding''' + cluster_relation = { + 'cluster:0': { + 'peer/1': { + 'private-address': 'cluster-peer1.localnet', + 'public-address': 'cluster-peer1.public', + }, + 'peer/2': { + 'private-address': 'cluster-peer2.localnet', + 'public-address': 'cluster-peer2.public', + }, + }, + } + + local_unit.return_value = 'peer/0' + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + _network_get_map = { + 'public': 'cluster-peer0.public', + 'cluster': 'cluster-peer0.localnet', + } + self.get_relation_ip.side_effect = ( + lambda binding, config_opt=None: + _network_get_map[binding] + ) + self.get_netmask_for_address.return_value = '255.255.0.0' + self.config.return_value = None + self.maxDiff = None + self.is_ipv6_disabled.return_value = True + haproxy = context.HAProxyContext(address_types=['public']) + with patch_open() as (_open, _file): + result = haproxy() + ex = { + 'frontends': { + 'cluster-peer0.public': { + 'network': 'cluster-peer0.public/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.public'), + ('peer-1', 'cluster-peer1.public'), + ('peer-2', 'cluster-peer2.public'), + ]), + }, + 'cluster-peer0.localnet': { + 'network': 'cluster-peer0.localnet/255.255.0.0', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.localnet'), + ('peer-1', 'cluster-peer1.localnet'), + ('peer-2', 'cluster-peer2.localnet'), + ]), + } + }, + 'default_backend': 'cluster-peer0.localnet', + 'local_host': '127.0.0.1', + 'haproxy_host': '0.0.0.0', + 'ipv6_enabled': False, + 'stat_password': 'testpassword', + 'stat_port': '8888', + } + # the context gets generated. + self.assertEquals(ex, result) + # and /etc/default/haproxy is updated. + self.assertEquals(_file.write.call_args_list, + [call('ENABLED=1\n')]) + self.get_relation_ip.assert_has_calls([call('public', None), + call('cluster')]) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_data_ipv6(self, local_unit, unit_get): + '''Test haproxy context with all relation data ipv6''' + cluster_relation = { + 'cluster:0': { + 'peer/1': { + 'private-address': 'cluster-peer1.localnet', + }, + 'peer/2': { + 'private-address': 'cluster-peer2.localnet', + }, + }, + } + + local_unit.return_value = 'peer/0' + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + self.get_relation_ip.side_effect = [None, None, None, + 'cluster-peer0.localnet'] + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + self.get_address_in_network.return_value = None + self.get_netmask_for_address.return_value = \ + 'FFFF:FFFF:FFFF:FFFF:0000:0000:0000:0000' + self.get_ipv6_addr.return_value = ['cluster-peer0.localnet'] + c = fake_config(HAPROXY_CONFIG) + c.data['prefer-ipv6'] = True + self.config.side_effect = c + self.maxDiff = None + self.is_ipv6_disabled.return_value = False + haproxy = context.HAProxyContext() + with patch_open() as (_open, _file): + result = haproxy() + ex = { + 'frontends': { + 'cluster-peer0.localnet': { + 'network': 'cluster-peer0.localnet/' + 'FFFF:FFFF:FFFF:FFFF:0000:0000:0000:0000', + 'backends': collections.OrderedDict([ + ('peer-0', 'cluster-peer0.localnet'), + ('peer-1', 'cluster-peer1.localnet'), + ('peer-2', 'cluster-peer2.localnet'), + ]), + } + }, + 'default_backend': 'cluster-peer0.localnet', + 'local_host': 'ip6-localhost', + 'haproxy_server_timeout': 50000, + 'haproxy_client_timeout': 50000, + 'haproxy_host': '::', + 'ipv6_enabled': True, + 'stat_password': 'testpassword', + 'stat_port': '8888', + } + # the context gets generated. + self.assertEquals(ex, result) + # and /etc/default/haproxy is updated. + self.assertEquals(_file.write.call_args_list, + [call('ENABLED=1\n')]) + self.get_relation_ip.assert_has_calls([call('admin', None), + call('internal', None), + call('public', None), + call('cluster')]) + + def test_haproxy_context_with_missing_data(self): + '''Test haproxy context with missing relation data''' + self.relation_ids.return_value = [] + haproxy = context.HAProxyContext() + self.assertEquals({}, haproxy()) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_no_peers(self, local_unit, unit_get): + '''Test haproxy context with single unit''' + # peer relations always show at least one peer relation, even + # if unit is alone. should be an incomplete context. + cluster_relation = { + 'cluster:0': { + 'peer/0': { + 'private-address': 'lonely.clusterpeer.howsad', + }, + }, + } + local_unit.return_value = 'peer/0' + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + self.get_relation_ip.side_effect = [None, None, None, None] + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + self.config.return_value = False + haproxy = context.HAProxyContext() + self.assertEquals({}, haproxy()) + self.get_relation_ip.assert_has_calls([call('admin', False), + call('internal', False), + call('public', False), + call('cluster')]) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_net_override(self, local_unit, unit_get): + '''Test haproxy context with single unit''' + # peer relations always show at least one peer relation, even + # if unit is alone. should be an incomplete context. + cluster_relation = { + 'cluster:0': { + 'peer/0': { + 'private-address': 'lonely.clusterpeer.howsad', + }, + }, + } + local_unit.return_value = 'peer/0' + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + self.get_relation_ip.side_effect = [None, None, None, None] + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + self.config.return_value = False + c = fake_config(HAPROXY_CONFIG) + c.data['os-admin-network'] = '192.168.10.0/24' + c.data['os-internal-network'] = '192.168.20.0/24' + c.data['os-public-network'] = '192.168.30.0/24' + self.config.side_effect = c + haproxy = context.HAProxyContext() + self.assertEquals({}, haproxy()) + self.get_relation_ip.assert_has_calls([call('admin', '192.168.10.0/24'), + call('internal', '192.168.20.0/24'), + call('public', '192.168.30.0/24'), + call('cluster')]) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch('charmhelpers.contrib.openstack.context.local_unit') + def test_haproxy_context_with_no_peers_singlemode(self, local_unit, unit_get): + '''Test haproxy context with single unit''' + # peer relations always show at least one peer relation, even + # if unit is alone. should be an incomplete context. + cluster_relation = { + 'cluster:0': { + 'peer/0': { + 'private-address': 'lonely.clusterpeer.howsad', + }, + }, + } + local_unit.return_value = 'peer/0' + # We are only using get_relation_ip. + # Setup the values it returns on each subsequent call. + self.get_relation_ip.side_effect = [None, None, None, + 'lonely.clusterpeer.howsad'] + relation = FakeRelation(cluster_relation) + self.relation_ids.side_effect = relation.relation_ids + self.relation_get.side_effect = relation.get + self.related_units.side_effect = relation.relation_units + self.config.return_value = False + self.get_address_in_network.return_value = None + self.get_netmask_for_address.return_value = '255.255.0.0' + self.is_ipv6_disabled.return_value = True + with patch_open() as (_open, _file): + result = context.HAProxyContext(singlenode_mode=True)() + ex = { + 'frontends': { + 'lonely.clusterpeer.howsad': { + 'backends': collections.OrderedDict([ + ('peer-0', 'lonely.clusterpeer.howsad')]), + 'network': 'lonely.clusterpeer.howsad/255.255.0.0' + }, + }, + 'default_backend': 'lonely.clusterpeer.howsad', + 'haproxy_host': '0.0.0.0', + 'local_host': '127.0.0.1', + 'ipv6_enabled': False, + 'stat_port': '8888', + 'stat_password': 'testpassword', + } + self.assertEquals(ex, result) + # and /etc/default/haproxy is updated. + self.assertEquals(_file.write.call_args_list, + [call('ENABLED=1\n')]) + self.get_relation_ip.assert_has_calls([call('admin', False), + call('internal', False), + call('public', False), + call('cluster')]) + + def test_https_context_with_no_https(self): + '''Test apache2 https when no https data available''' + apache = context.ApacheSSLContext() + self.https.return_value = False + self.assertEquals({}, apache()) + + def _https_context_setup(self): + ''' + Helper for test_https_context* tests. + + ''' + self.https.return_value = True + self.determine_api_port.return_value = 8756 + self.determine_apache_port.return_value = 8766 + + apache = context.ApacheSSLContext() + apache.configure_cert = MagicMock() + apache.enable_modules = MagicMock() + apache.configure_ca = MagicMock() + apache.canonical_names = MagicMock() + apache.canonical_names.return_value = [ + '10.5.1.1', + '10.5.2.1', + '10.5.3.1', + ] + apache.get_network_addresses = MagicMock() + apache.get_network_addresses.return_value = [ + ('10.5.1.100', '10.5.1.1'), + ('10.5.2.100', '10.5.2.1'), + ('10.5.3.100', '10.5.3.1'), + ] + apache.external_ports = '8776' + apache.service_namespace = 'cinder' + + ex = { + 'namespace': 'cinder', + 'endpoints': [('10.5.1.100', '10.5.1.1', 8766, 8756), + ('10.5.2.100', '10.5.2.1', 8766, 8756), + ('10.5.3.100', '10.5.3.1', 8766, 8756)], + 'ext_ports': [8766] + } + + return apache, ex + + def test_https_context(self): + self.relation_ids.return_value = [] + + apache, ex = self._https_context_setup() + + self.assertEquals(ex, apache()) + + apache.configure_cert.assert_has_calls([ + call('10.5.1.1'), + call('10.5.2.1'), + call('10.5.3.1') + ]) + + self.assertTrue(apache.configure_ca.called) + self.assertTrue(apache.enable_modules.called) + self.assertTrue(apache.configure_cert.called) + + def test_https_context_vault_relation(self): + self.relation_ids.return_value = ['certificates:2'] + self.related_units.return_value = 'vault/0' + + apache, ex = self._https_context_setup() + + self.assertEquals(ex, apache()) + + self.assertFalse(apache.configure_cert.called) + self.assertFalse(apache.configure_ca.called) + + def test_https_context_no_canonical_names(self): + self.relation_ids.return_value = [] + + apache, ex = self._https_context_setup() + apache.canonical_names.return_value = [] + + self.resolve_address.side_effect = ( + '10.5.1.4', '10.5.2.5', '10.5.3.6') + + self.assertEquals(ex, apache()) + + apache.configure_cert.assert_has_calls([ + call('10.5.1.4'), + call('10.5.2.5'), + call('10.5.3.6') + ]) + + self.resolve_address.assert_has_calls([ + call(endpoint_type=context.INTERNAL), + call(endpoint_type=context.ADMIN), + call(endpoint_type=context.PUBLIC), + ]) + + self.assertTrue(apache.configure_ca.called) + self.assertTrue(apache.enable_modules.called) + self.assertTrue(apache.configure_cert.called) + + def test_https_context_loads_correct_apache_mods(self): + # Test apache2 context also loads required apache modules + apache = context.ApacheSSLContext() + apache.enable_modules() + ex_cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] + self.check_call.assert_called_with(ex_cmd) + + def test_https_configure_cert(self): + # Test apache2 properly installs certs and keys to disk + self.get_cert.return_value = ('SSL_CERT', 'SSL_KEY') + self.b64decode.side_effect = [b'SSL_CERT', b'SSL_KEY'] + apache = context.ApacheSSLContext() + apache.service_namespace = 'cinder' + apache.configure_cert('test-cn') + # appropriate directories are created. + self.mkdir.assert_called_with(path='/etc/apache2/ssl/cinder') + # appropriate files are written. + files = [call(path='/etc/apache2/ssl/cinder/cert_test-cn', + content=b'SSL_CERT', owner='root', group='root', + perms=0o640), + call(path='/etc/apache2/ssl/cinder/key_test-cn', + content=b'SSL_KEY', owner='root', group='root', + perms=0o640)] + self.write_file.assert_has_calls(files) + # appropriate bits are b64decoded. + decode = [call('SSL_CERT'), call('SSL_KEY')] + self.assertEquals(decode, self.b64decode.call_args_list) + + def test_https_configure_cert_deprecated(self): + # Test apache2 properly installs certs and keys to disk + self.get_cert.return_value = ('SSL_CERT', 'SSL_KEY') + self.b64decode.side_effect = ['SSL_CERT', 'SSL_KEY'] + apache = context.ApacheSSLContext() + apache.service_namespace = 'cinder' + apache.configure_cert() + # appropriate directories are created. + self.mkdir.assert_called_with(path='/etc/apache2/ssl/cinder') + # appropriate files are written. + files = [call(path='/etc/apache2/ssl/cinder/cert', + content='SSL_CERT', owner='root', group='root', + perms=0o640), + call(path='/etc/apache2/ssl/cinder/key', + content='SSL_KEY', owner='root', group='root', + perms=0o640)] + self.write_file.assert_has_calls(files) + # appropriate bits are b64decoded. + decode = [call('SSL_CERT'), call('SSL_KEY')] + self.assertEquals(decode, self.b64decode.call_args_list) + + def test_https_canonical_names(self): + rel = FakeRelation(IDENTITY_RELATION_SINGLE_CERT) + self.relation_ids.side_effect = rel.relation_ids + self.related_units.side_effect = rel.relation_units + self.relation_get.side_effect = rel.get + apache = context.ApacheSSLContext() + self.assertEquals(apache.canonical_names(), ['cinderhost1']) + rel.relation_data = IDENTITY_RELATION_MULTIPLE_CERT + self.assertEquals(apache.canonical_names(), + sorted(['cinderhost1-adm-network', + 'cinderhost1-int-network', + 'cinderhost1-pub-network'])) + rel.relation_data = IDENTITY_RELATION_NO_CERT + self.assertEquals(apache.canonical_names(), []) + + def test_image_service_context_missing_data(self): + '''Test image-service with missing relation and missing data''' + image_service = context.ImageServiceContext() + self.relation_ids.return_value = [] + self.assertEquals({}, image_service()) + self.relation_ids.return_value = ['image-service:0'] + self.related_units.return_value = ['glance/0'] + self.relation_get.return_value = None + self.assertEquals({}, image_service()) + + def test_image_service_context_with_data(self): + '''Test image-service with required data''' + image_service = context.ImageServiceContext() + self.relation_ids.return_value = ['image-service:0'] + self.related_units.return_value = ['glance/0'] + self.relation_get.return_value = 'http://glancehost:9292' + self.assertEquals({'glance_api_servers': 'http://glancehost:9292'}, + image_service()) + + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_context_base_properties(self, attr): + '''Test neutron context base properties''' + neutron = context.NeutronContext() + attr.return_value = 'quantum-plugin-package' + self.assertEquals(None, neutron.plugin) + self.assertEquals(None, neutron.network_manager) + self.assertEquals(None, neutron.neutron_security_groups) + self.assertEquals('quantum-plugin-package', neutron.packages) + + @patch.object(context, 'neutron_plugin_attribute') + @patch.object(context, 'apt_install') + @patch.object(context, 'filter_installed_packages') + def test_neutron_ensure_package(self, _filter, _install, _packages): + '''Test neutron context installed required packages''' + _filter.return_value = ['quantum-plugin-package'] + _packages.return_value = [['quantum-plugin-package']] + neutron = context.NeutronContext() + neutron._ensure_packages() + _install.assert_called_with(['quantum-plugin-package'], fatal=True) + + @patch.object(context.NeutronContext, 'neutron_security_groups') + @patch.object(context, 'unit_private_ip') + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_ovs_plugin_context(self, attr, ip, sec_groups): + ip.return_value = '10.0.0.1' + sec_groups.__get__ = MagicMock(return_value=True) + attr.return_value = 'some.quantum.driver.class' + neutron = context.NeutronContext() + self.assertEquals({ + 'config': 'some.quantum.driver.class', + 'core_plugin': 'some.quantum.driver.class', + 'neutron_plugin': 'ovs', + 'neutron_security_groups': True, + 'local_ip': '10.0.0.1'}, neutron.ovs_ctxt()) + + @patch.object(context.NeutronContext, 'neutron_security_groups') + @patch.object(context, 'unit_private_ip') + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_nvp_plugin_context(self, attr, ip, sec_groups): + ip.return_value = '10.0.0.1' + sec_groups.__get__ = MagicMock(return_value=True) + attr.return_value = 'some.quantum.driver.class' + neutron = context.NeutronContext() + self.assertEquals({ + 'config': 'some.quantum.driver.class', + 'core_plugin': 'some.quantum.driver.class', + 'neutron_plugin': 'nvp', + 'neutron_security_groups': True, + 'local_ip': '10.0.0.1'}, neutron.nvp_ctxt()) + + @patch.object(context, 'config') + @patch.object(context.NeutronContext, 'neutron_security_groups') + @patch.object(context, 'unit_private_ip') + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_n1kv_plugin_context(self, attr, ip, sec_groups, config): + ip.return_value = '10.0.0.1' + sec_groups.__get__ = MagicMock(return_value=True) + attr.return_value = 'some.quantum.driver.class' + config.return_value = 'n1kv' + neutron = context.NeutronContext() + self.assertEquals({ + 'core_plugin': 'some.quantum.driver.class', + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': True, + 'local_ip': '10.0.0.1', + 'config': 'some.quantum.driver.class', + 'vsm_ip': 'n1kv', + 'vsm_username': 'n1kv', + 'vsm_password': 'n1kv', + 'user_config_flags': {}, + 'restrict_policy_profiles': 'n1kv', + }, neutron.n1kv_ctxt()) + + @patch.object(context.NeutronContext, 'neutron_security_groups') + @patch.object(context, 'unit_private_ip') + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_calico_plugin_context(self, attr, ip, sec_groups): + ip.return_value = '10.0.0.1' + sec_groups.__get__ = MagicMock(return_value=True) + attr.return_value = 'some.quantum.driver.class' + neutron = context.NeutronContext() + self.assertEquals({ + 'config': 'some.quantum.driver.class', + 'core_plugin': 'some.quantum.driver.class', + 'neutron_plugin': 'Calico', + 'neutron_security_groups': True, + 'local_ip': '10.0.0.1'}, neutron.calico_ctxt()) + + @patch.object(context.NeutronContext, 'neutron_security_groups') + @patch.object(context, 'unit_private_ip') + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_plumgrid_plugin_context(self, attr, ip, sec_groups): + ip.return_value = '10.0.0.1' + sec_groups.__get__ = MagicMock(return_value=True) + attr.return_value = 'some.quantum.driver.class' + neutron = context.NeutronContext() + self.assertEquals({ + 'config': 'some.quantum.driver.class', + 'core_plugin': 'some.quantum.driver.class', + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': True, + 'local_ip': '10.0.0.1'}, neutron.pg_ctxt()) + + @patch.object(context.NeutronContext, 'neutron_security_groups') + @patch.object(context, 'unit_private_ip') + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_nuage_plugin_context(self, attr, ip, sec_groups): + ip.return_value = '10.0.0.1' + sec_groups.__get__ = MagicMock(return_value=True) + attr.return_value = 'some.quantum.driver.class' + neutron = context.NeutronContext() + self.assertEquals({ + 'config': 'some.quantum.driver.class', + 'core_plugin': 'some.quantum.driver.class', + 'neutron_plugin': 'vsp', + 'neutron_security_groups': True, + 'local_ip': '10.0.0.1'}, neutron.nuage_ctxt()) + + @patch.object(context.NeutronContext, 'neutron_security_groups') + @patch.object(context, 'unit_private_ip') + @patch.object(context, 'neutron_plugin_attribute') + def test_neutron_midonet_plugin_context(self, attr, ip, sec_groups): + ip.return_value = '10.0.0.1' + sec_groups.__get__ = MagicMock(return_value=True) + attr.return_value = 'some.quantum.driver.class' + neutron = context.NeutronContext() + self.assertEquals({ + 'config': 'some.quantum.driver.class', + 'core_plugin': 'some.quantum.driver.class', + 'neutron_plugin': 'midonet', + 'neutron_security_groups': True, + 'local_ip': '10.0.0.1'}, neutron.midonet_ctxt()) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch.object(context.NeutronContext, 'network_manager') + def test_neutron_neutron_ctxt(self, mock_network_manager, + mock_unit_get): + vip = '88.11.22.33' + priv_addr = '10.0.0.1' + mock_unit_get.return_value = priv_addr + neutron = context.NeutronContext() + + config = {'vip': vip} + self.config.side_effect = lambda key: config[key] + mock_network_manager.__get__ = Mock(return_value='neutron') + + self.is_clustered.return_value = False + self.assertEquals( + {'network_manager': 'neutron', + 'neutron_url': 'https://%s:9696' % (priv_addr)}, + neutron.neutron_ctxt() + ) + + self.is_clustered.return_value = True + self.assertEquals( + {'network_manager': 'neutron', + 'neutron_url': 'https://%s:9696' % (vip)}, + neutron.neutron_ctxt() + ) + + @patch('charmhelpers.contrib.openstack.context.unit_get') + @patch.object(context.NeutronContext, 'network_manager') + def test_neutron_neutron_ctxt_http(self, mock_network_manager, + mock_unit_get): + vip = '88.11.22.33' + priv_addr = '10.0.0.1' + mock_unit_get.return_value = priv_addr + neutron = context.NeutronContext() + + config = {'vip': vip} + self.config.side_effect = lambda key: config[key] + self.https.return_value = False + mock_network_manager.__get__ = Mock(return_value='neutron') + + self.is_clustered.return_value = False + self.assertEquals( + {'network_manager': 'neutron', + 'neutron_url': 'http://%s:9696' % (priv_addr)}, + neutron.neutron_ctxt() + ) + + self.is_clustered.return_value = True + self.assertEquals( + {'network_manager': 'neutron', + 'neutron_url': 'http://%s:9696' % (vip)}, + neutron.neutron_ctxt() + ) + + @patch.object(context.NeutronContext, 'neutron_ctxt') + @patch.object(context.NeutronContext, 'ovs_ctxt') + @patch.object(context.NeutronContext, 'plugin') + @patch.object(context.NeutronContext, '_ensure_packages') + @patch.object(context.NeutronContext, 'network_manager') + def test_neutron_main_context_generation(self, mock_network_manager, + mock_ensure_packages, + mock_plugin, mock_ovs_ctxt, + mock_neutron_ctxt): + + mock_neutron_ctxt.return_value = {'network_manager': 'neutron', + 'neutron_url': 'https://foo:9696'} + config = {'neutron-alchemy-flags': None} + self.config.side_effect = lambda key: config[key] + neutron = context.NeutronContext() + + mock_network_manager.__get__ = Mock(return_value='flatdhcpmanager') + mock_plugin.__get__ = Mock() + + self.assertEquals({}, neutron()) + self.assertTrue(mock_network_manager.__get__.called) + self.assertFalse(mock_plugin.__get__.called) + + mock_network_manager.__get__.return_value = 'neutron' + mock_plugin.__get__ = Mock(return_value=None) + self.assertEquals({}, neutron()) + self.assertTrue(mock_plugin.__get__.called) + + mock_ovs_ctxt.return_value = {'ovs': 'ovs_context'} + mock_plugin.__get__.return_value = 'ovs' + self.assertEquals( + {'network_manager': 'neutron', + 'ovs': 'ovs_context', + 'neutron_url': 'https://foo:9696'}, + neutron() + ) + + @patch.object(context.NeutronContext, 'neutron_ctxt') + @patch.object(context.NeutronContext, 'nvp_ctxt') + @patch.object(context.NeutronContext, 'plugin') + @patch.object(context.NeutronContext, '_ensure_packages') + @patch.object(context.NeutronContext, 'network_manager') + def test_neutron_main_context_gen_nvp_and_alchemy(self, + mock_network_manager, + mock_ensure_packages, + mock_plugin, + mock_nvp_ctxt, + mock_neutron_ctxt): + + mock_neutron_ctxt.return_value = {'network_manager': 'neutron', + 'neutron_url': 'https://foo:9696'} + config = {'neutron-alchemy-flags': 'pool_size=20'} + self.config.side_effect = lambda key: config[key] + neutron = context.NeutronContext() + + mock_network_manager.__get__ = Mock(return_value='flatdhcpmanager') + mock_plugin.__get__ = Mock() + + self.assertEquals({}, neutron()) + self.assertTrue(mock_network_manager.__get__.called) + self.assertFalse(mock_plugin.__get__.called) + + mock_network_manager.__get__.return_value = 'neutron' + mock_plugin.__get__ = Mock(return_value=None) + self.assertEquals({}, neutron()) + self.assertTrue(mock_plugin.__get__.called) + + mock_nvp_ctxt.return_value = {'nvp': 'nvp_context'} + mock_plugin.__get__.return_value = 'nvp' + self.assertEquals( + {'network_manager': 'neutron', + 'nvp': 'nvp_context', + 'neutron_alchemy_flags': {'pool_size': '20'}, + 'neutron_url': 'https://foo:9696'}, + neutron() + ) + + @patch.object(context.NeutronContext, 'neutron_ctxt') + @patch.object(context.NeutronContext, 'calico_ctxt') + @patch.object(context.NeutronContext, 'plugin') + @patch.object(context.NeutronContext, '_ensure_packages') + @patch.object(context.NeutronContext, 'network_manager') + def test_neutron_main_context_gen_calico(self, mock_network_manager, + mock_ensure_packages, + mock_plugin, mock_ovs_ctxt, + mock_neutron_ctxt): + + mock_neutron_ctxt.return_value = {'network_manager': 'neutron', + 'neutron_url': 'https://foo:9696'} + config = {'neutron-alchemy-flags': None} + self.config.side_effect = lambda key: config[key] + neutron = context.NeutronContext() + + mock_network_manager.__get__ = Mock(return_value='flatdhcpmanager') + mock_plugin.__get__ = Mock() + + self.assertEquals({}, neutron()) + self.assertTrue(mock_network_manager.__get__.called) + self.assertFalse(mock_plugin.__get__.called) + + mock_network_manager.__get__.return_value = 'neutron' + mock_plugin.__get__ = Mock(return_value=None) + self.assertEquals({}, neutron()) + self.assertTrue(mock_plugin.__get__.called) + + mock_ovs_ctxt.return_value = {'Calico': 'calico_context'} + mock_plugin.__get__.return_value = 'Calico' + self.assertEquals( + {'network_manager': 'neutron', + 'Calico': 'calico_context', + 'neutron_url': 'https://foo:9696'}, + neutron() + ) + + @patch('charmhelpers.contrib.openstack.utils.juju_log', + lambda *args, **kwargs: None) + @patch.object(context, 'config') + def test_os_configflag_context(self, config): + flags = context.OSConfigFlagContext() + + # single + config.return_value = 'deadbeef=True' + self.assertEquals({ + 'user_config_flags': { + 'deadbeef': 'True', + } + }, flags()) + + # multi + config.return_value = 'floating_ip=True,use_virtio=False,max=5' + self.assertEquals({ + 'user_config_flags': { + 'floating_ip': 'True', + 'use_virtio': 'False', + 'max': '5', + } + }, flags()) + + for empty in [None, '']: + config.return_value = empty + self.assertEquals({}, flags()) + + # multi with commas + config.return_value = 'good_flag=woot,badflag,great_flag=w00t' + self.assertEquals({ + 'user_config_flags': { + 'good_flag': 'woot,badflag', + 'great_flag': 'w00t', + } + }, flags()) + + # missing key + config.return_value = 'good_flag=woot=toow' + self.assertRaises(context.OSContextError, flags) + + # bad value + config.return_value = 'good_flag=woot==' + self.assertRaises(context.OSContextError, flags) + + @patch.object(context, 'config') + def test_os_configflag_context_custom(self, config): + flags = context.OSConfigFlagContext( + charm_flag='api-config-flags', + template_flag='api_config_flags') + + # single + config.return_value = 'deadbeef=True' + self.assertEquals({ + 'api_config_flags': { + 'deadbeef': 'True', + } + }, flags()) + + def test_os_subordinate_config_context(self): + relation = FakeRelation(relation_data=SUB_CONFIG_RELATION) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + nova_sub_ctxt = context.SubordinateConfigContext( + service='nova', + config_file='/etc/nova/nova.conf', + interface='nova-subordinate', + ) + glance_sub_ctxt = context.SubordinateConfigContext( + service='glance', + config_file='/etc/glance/glance.conf', + interface='glance-subordinate', + ) + cinder_sub_ctxt = context.SubordinateConfigContext( + service='cinder', + config_file='/etc/cinder/cinder.conf', + interface='cinder-subordinate', + ) + foo_sub_ctxt = context.SubordinateConfigContext( + service='foo', + config_file='/etc/foo/foo.conf', + interface='foo-subordinate', + ) + self.assertEquals( + nova_sub_ctxt(), + {'sections': { + 'DEFAULT': [ + ['nova-key1', 'value1'], + ['nova-key2', 'value2']] + }} + ) + self.assertEquals( + glance_sub_ctxt(), + {'sections': { + 'DEFAULT': [ + ['glance-key1', 'value1'], + ['glance-key2', 'value2']] + }} + ) + self.assertEquals( + cinder_sub_ctxt(), + {'sections': { + 'cinder-1-section': [ + ['key1', 'value1']], + 'cinder-2-section': [ + ['key2', 'value2']] + + }, 'not-a-section': 1234} + ) + + # subrodinate supplies nothing for given config + glance_sub_ctxt.config_file = '/etc/glance/glance-api-paste.ini' + self.assertEquals(glance_sub_ctxt(), {'sections': {}}) + + # subordinate supplies bad input + self.assertEquals(foo_sub_ctxt(), {'sections': {}}) + + def test_os_subordinate_config_context_multiple(self): + relation = FakeRelation(relation_data=SUB_CONFIG_RELATION2) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.relation_units + nova_sub_ctxt = context.SubordinateConfigContext( + service=['nova', 'nova-compute'], + config_file='/etc/nova/nova.conf', + interface=['nova-ceilometer', 'neutron-plugin'], + ) + self.assertEquals( + nova_sub_ctxt(), + {'sections': { + 'DEFAULT': [ + ['nova-key1', 'value1'], + ['nova-key2', 'value2'], + ['nova-key3', 'value3'], + ['nova-key4', 'value4'], + ['nova-key5', 'value5'], + ['nova-key6', 'value6']] + }} + ) + + def test_syslog_context(self): + self.config.side_effect = fake_config({'use-syslog': 'foo'}) + syslog = context.SyslogContext() + result = syslog() + expected = { + 'use_syslog': 'foo', + } + self.assertEquals(result, expected) + + def test_loglevel_context_set(self): + self.config.side_effect = fake_config({ + 'debug': True, + 'verbose': True, + }) + syslog = context.LogLevelContext() + result = syslog() + expected = { + 'debug': True, + 'verbose': True, + } + self.assertEquals(result, expected) + + def test_loglevel_context_unset(self): + self.config.side_effect = fake_config({ + 'debug': None, + 'verbose': None, + }) + syslog = context.LogLevelContext() + result = syslog() + expected = { + 'debug': False, + 'verbose': False, + } + self.assertEquals(result, expected) + + @patch.object(context, '_calculate_workers') + def test_wsgi_worker_config_context(self, + _calculate_workers): + self.config.return_value = 2 # worker-multiplier=2 + _calculate_workers.return_value = 8 + service_name = 'service-name' + script = '/usr/bin/script' + ctxt = context.WSGIWorkerConfigContext(name=service_name, + script=script) + expect = { + "service_name": service_name, + "user": service_name, + "group": service_name, + "script": script, + "admin_script": None, + "public_script": None, + "processes": 8, + "admin_processes": 2, + "public_processes": 6, + "threads": 1, + } + self.assertEqual(expect, ctxt()) + + @patch.object(context, '_calculate_workers') + def test_wsgi_worker_config_context_user_and_group(self, + _calculate_workers): + self.config.return_value = 1 + _calculate_workers.return_value = 1 + service_name = 'service-name' + script = '/usr/bin/script' + user = 'nova' + group = 'nobody' + ctxt = context.WSGIWorkerConfigContext(name=service_name, + user=user, + group=group, + script=script) + expect = { + "service_name": service_name, + "user": user, + "group": group, + "script": script, + "admin_script": None, + "public_script": None, + "processes": 1, + "admin_processes": 1, + "public_processes": 1, + "threads": 1, + } + self.assertEqual(expect, ctxt()) + + def test_zeromq_context_unrelated(self): + self.is_relation_made.return_value = False + self.assertEquals(context.ZeroMQContext()(), {}) + + def test_zeromq_context_related(self): + self.is_relation_made.return_value = True + self.relation_ids.return_value = ['zeromq-configuration:1'] + self.related_units.return_value = ['openstack-zeromq/0'] + self.relation_get.side_effect = ['nonce-data', 'hostname', 'redis'] + self.assertEquals(context.ZeroMQContext()(), + {'zmq_host': 'hostname', + 'zmq_nonce': 'nonce-data', + 'zmq_redis_address': 'redis'}) + + def test_notificationdriver_context_nomsg(self): + relations = { + 'zeromq-configuration': False, + 'amqp': False, + } + rels = fake_is_relation_made(relations=relations) + self.is_relation_made.side_effect = rels.rel_made + self.assertEquals(context.NotificationDriverContext()(), + {'notifications': 'False'}) + + def test_notificationdriver_context_zmq_nometer(self): + relations = { + 'zeromq-configuration': True, + 'amqp': False, + } + rels = fake_is_relation_made(relations=relations) + self.is_relation_made.side_effect = rels.rel_made + self.assertEquals(context.NotificationDriverContext()(), + {'notifications': 'False'}) + + def test_notificationdriver_context_zmq_meter(self): + relations = { + 'zeromq-configuration': True, + 'amqp': False, + } + rels = fake_is_relation_made(relations=relations) + self.is_relation_made.side_effect = rels.rel_made + self.assertEquals(context.NotificationDriverContext()(), + {'notifications': 'False'}) + + def test_notificationdriver_context_amq(self): + relations = { + 'zeromq-configuration': False, + 'amqp': True, + } + rels = fake_is_relation_made(relations=relations) + self.is_relation_made.side_effect = rels.rel_made + self.assertEquals(context.NotificationDriverContext()(), + {'notifications': 'True'}) + + @patch.object(context, 'psutil') + def test_num_cpus_xenial(self, _psutil): + _psutil.cpu_count.return_value = 4 + self.assertTrue(context._num_cpus(), 4) + + @patch.object(context, 'psutil') + def test_num_cpus_trusty(self, _psutil): + _psutil.NUM_CPUS = 4 + self.assertTrue(context._num_cpus(), 4) + + @patch.object(context, '_num_cpus') + def test_calculate_workers_float(self, _num_cpus): + self.config.side_effect = fake_config({ + 'worker-multiplier': 0.3 + }) + _num_cpus.return_value = 4 + self.assertTrue(context._calculate_workers(), 4) + + @patch.object(context, '_num_cpus') + def test_calculate_workers_not_quite_0(self, _num_cpus): + # Make sure that the multiplier evaluating to somewhere between + # 0 and 1 in the floating point range still has at least one + # worker. + self.config.side_effect = fake_config({ + 'worker-multiplier': 0.001 + }) + _num_cpus.return_value = 100 + self.assertTrue(context._calculate_workers(), 1) + + @patch.object(context, 'psutil') + def test_calculate_workers_0(self, _psutil): + self.config.side_effect = fake_config({ + 'worker-multiplier': 0 + }) + _psutil.cpu_count.return_value = 2 + self.assertTrue(context._calculate_workers(), 0) + + @patch.object(context, '_num_cpus') + def test_calculate_workers_noconfig(self, _num_cpus): + self.config.return_value = None + _num_cpus.return_value = 1 + self.assertTrue(context._calculate_workers(), 2) + + @patch.object(context, '_num_cpus') + def test_calculate_workers_noconfig_container(self, _num_cpus): + self.config.return_value = None + self.is_container.return_value = True + _num_cpus.return_value = 1 + self.assertTrue(context._calculate_workers(), 2) + + @patch.object(context, '_num_cpus') + def test_calculate_workers_noconfig_lotsa_cpus_container(self, + _num_cpus): + self.config.return_value = None + self.is_container.return_value = True + _num_cpus.return_value = 32 + self.assertTrue(context._calculate_workers(), 4) + + @patch.object(context, '_num_cpus') + def test_calculate_workers_noconfig_lotsa_cpus_not_container(self, + _num_cpus): + self.config.return_value = None + _num_cpus.return_value = 32 + self.assertTrue(context._calculate_workers(), 64) + + @patch.object(context, '_calculate_workers', return_value=256) + def test_worker_context(self, calculate_workers): + self.assertEqual(context.WorkerConfigContext()(), + {'workers': 256}) + + def test_apache_get_addresses_no_network_config(self): + self.config.side_effect = fake_config({ + 'os-internal-network': None, + 'os-admin-network': None, + 'os-public-network': None + }) + self.resolve_address.return_value = '10.5.1.50' + self.unit_get.return_value = '10.5.1.50' + + apache = context.ApacheSSLContext() + apache.external_ports = '8776' + + addresses = apache.get_network_addresses() + expected = [('10.5.1.50', '10.5.1.50')] + + self.assertEqual(addresses, expected) + + self.get_address_in_network.assert_not_called() + self.resolve_address.assert_has_calls([ + call(context.INTERNAL), + call(context.ADMIN), + call(context.PUBLIC) + ]) + + def test_apache_get_addresses_with_network_config(self): + self.config.side_effect = fake_config({ + 'os-internal-network': '10.5.1.0/24', + 'os-admin-network': '10.5.2.0/24', + 'os-public-network': '10.5.3.0/24', + }) + _base_addresses = ['10.5.1.100', + '10.5.2.100', + '10.5.3.100'] + self.get_address_in_network.side_effect = _base_addresses + self.resolve_address.side_effect = _base_addresses + self.unit_get.return_value = '10.5.1.50' + + apache = context.ApacheSSLContext() + + addresses = apache.get_network_addresses() + expected = [('10.5.1.100', '10.5.1.100'), + ('10.5.2.100', '10.5.2.100'), + ('10.5.3.100', '10.5.3.100')] + self.assertEqual(addresses, expected) + + calls = [call('10.5.1.0/24', '10.5.1.50'), + call('10.5.2.0/24', '10.5.1.50'), + call('10.5.3.0/24', '10.5.1.50')] + self.get_address_in_network.assert_has_calls(calls) + self.resolve_address.assert_has_calls([ + call(context.INTERNAL), + call(context.ADMIN), + call(context.PUBLIC) + ]) + + def test_apache_get_addresses_network_spaces(self): + self.config.side_effect = fake_config({ + 'os-internal-network': None, + 'os-admin-network': None, + 'os-public-network': None + }) + self.network_get_primary_address.side_effect = None + self.network_get_primary_address.return_value = '10.5.2.50' + self.resolve_address.return_value = '10.5.2.100' + self.unit_get.return_value = '10.5.1.50' + + apache = context.ApacheSSLContext() + apache.external_ports = '8776' + + addresses = apache.get_network_addresses() + expected = [('10.5.2.50', '10.5.2.100')] + + self.assertEqual(addresses, expected) + + self.get_address_in_network.assert_not_called() + self.resolve_address.assert_has_calls([ + call(context.INTERNAL), + call(context.ADMIN), + call(context.PUBLIC) + ]) + + def test_config_flag_parsing_simple(self): + # Standard key=value checks... + flags = context.config_flags_parser('key1=value1, key2=value2') + self.assertEqual(flags, {'key1': 'value1', 'key2': 'value2'}) + + # Check for multiple values to a single key + flags = context.config_flags_parser('key1=value1, ' + 'key2=value2,value3,value4') + self.assertEqual(flags, {'key1': 'value1', + 'key2': 'value2,value3,value4'}) + + # Check for yaml formatted key value pairings for more complex + # assignment options. + flags = context.config_flags_parser('key1: subkey1=value1,' + 'subkey2=value2') + self.assertEqual(flags, {'key1': 'subkey1=value1,subkey2=value2'}) + + # Check for good measure the ldap formats + test_string = ('user_tree_dn: ou=ABC General,' + 'ou=User Accounts,dc=example,dc=com') + flags = context.config_flags_parser(test_string) + self.assertEqual(flags, {'user_tree_dn': ('ou=ABC General,' + 'ou=User Accounts,' + 'dc=example,dc=com')}) + + def _fake_get_hwaddr(self, arg): + return MACHINE_MACS[arg] + + def _fake_get_ipv4(self, arg, fatal=False): + return MACHINE_NICS[arg] + + @patch('charmhelpers.contrib.openstack.context.config') + def test_no_ext_port(self, mock_config): + self.config.side_effect = config = fake_config({}) + mock_config.side_effect = config + self.assertEquals(context.ExternalPortContext()(), {}) + + @patch('charmhelpers.contrib.openstack.context.list_nics') + @patch('charmhelpers.contrib.openstack.context.config') + def test_ext_port_eth(self, mock_config, mock_list_nics): + config = fake_config({'ext-port': 'eth1010'}) + self.config.side_effect = config + mock_config.side_effect = config + mock_list_nics.return_value = ['eth1010'] + self.assertEquals(context.ExternalPortContext()(), + {'ext_port': 'eth1010'}) + + @patch('charmhelpers.contrib.openstack.context.list_nics') + @patch('charmhelpers.contrib.openstack.context.config') + def test_ext_port_eth_non_existent(self, mock_config, mock_list_nics): + config = fake_config({'ext-port': 'eth1010'}) + self.config.side_effect = config + mock_config.side_effect = config + mock_list_nics.return_value = [] + self.assertEquals(context.ExternalPortContext()(), {}) + + @patch('charmhelpers.contrib.openstack.context.is_phy_iface', + lambda arg: True) + @patch('charmhelpers.contrib.openstack.context.get_nic_hwaddr') + @patch('charmhelpers.contrib.openstack.context.list_nics') + @patch('charmhelpers.contrib.openstack.context.get_ipv6_addr') + @patch('charmhelpers.contrib.openstack.context.get_ipv4_addr') + @patch('charmhelpers.contrib.openstack.context.config') + def test_ext_port_mac(self, mock_config, mock_get_ipv4_addr, + mock_get_ipv6_addr, mock_list_nics, + mock_get_nic_hwaddr): + config_macs = ABSENT_MACS + " " + MACHINE_MACS['eth2'] + config = fake_config({'ext-port': config_macs}) + self.config.side_effect = config + mock_config.side_effect = config + + mock_get_ipv4_addr.side_effect = self._fake_get_ipv4 + mock_get_ipv6_addr.return_value = [] + mock_list_nics.return_value = MACHINE_MACS.keys() + mock_get_nic_hwaddr.side_effect = self._fake_get_hwaddr + + self.assertEquals(context.ExternalPortContext()(), + {'ext_port': 'eth2'}) + + config = fake_config({'ext-port': ABSENT_MACS}) + self.config.side_effect = config + mock_config.side_effect = config + + self.assertEquals(context.ExternalPortContext()(), {}) + + @patch('charmhelpers.contrib.openstack.context.is_phy_iface', + lambda arg: True) + @patch('charmhelpers.contrib.openstack.context.get_nic_hwaddr') + @patch('charmhelpers.contrib.openstack.context.list_nics') + @patch('charmhelpers.contrib.openstack.context.get_ipv6_addr') + @patch('charmhelpers.contrib.openstack.context.get_ipv4_addr') + @patch('charmhelpers.contrib.openstack.context.config') + def test_ext_port_mac_one_used_nic(self, mock_config, + mock_get_ipv4_addr, + mock_get_ipv6_addr, mock_list_nics, + mock_get_nic_hwaddr): + + self.relation_ids.return_value = ['neutron-plugin-api:1'] + self.related_units.return_value = ['neutron-api/0'] + self.relation_get.return_value = {'network-device-mtu': 1234, + 'l2-population': 'False'} + config_macs = "%s %s" % (MACHINE_MACS['eth1'], + MACHINE_MACS['eth2']) + + mock_get_ipv4_addr.side_effect = self._fake_get_ipv4 + mock_get_ipv6_addr.return_value = [] + mock_list_nics.return_value = MACHINE_MACS.keys() + mock_get_nic_hwaddr.side_effect = self._fake_get_hwaddr + + config = fake_config({'ext-port': config_macs}) + self.config.side_effect = config + mock_config.side_effect = config + self.assertEquals(context.ExternalPortContext()(), + {'ext_port': 'eth2', 'ext_port_mtu': 1234}) + + @patch('charmhelpers.contrib.openstack.context.NeutronPortContext.' + 'resolve_ports') + def test_data_port_eth(self, mock_resolve): + self.config.side_effect = fake_config({'data-port': + 'phybr1:eth1010 ' + 'phybr1:eth1011'}) + mock_resolve.side_effect = lambda ports: ['eth1010'] + self.assertEquals(context.DataPortContext()(), + {'eth1010': 'phybr1'}) + + @patch.object(context, 'get_nic_hwaddr') + @patch.object(context.NeutronPortContext, 'resolve_ports') + def test_data_port_mac(self, mock_resolve, mock_get_nic_hwaddr): + extant_mac = 'cb:23:ae:72:f2:33' + non_extant_mac = 'fa:16:3e:12:97:8e' + self.config.side_effect = fake_config({'data-port': + 'phybr1:%s phybr1:%s' % + (non_extant_mac, extant_mac)}) + + def fake_resolve(ports): + resolved = [] + for port in ports: + if port == extant_mac: + resolved.append('eth1010') + + return resolved + + mock_get_nic_hwaddr.side_effect = lambda nic: extant_mac + mock_resolve.side_effect = fake_resolve + + self.assertEquals(context.DataPortContext()(), + {'eth1010': 'phybr1'}) + + @patch.object(context.NeutronAPIContext, '__call__', lambda *args: + {'network_device_mtu': 5000}) + @patch.object(context, 'get_nic_hwaddr', lambda inst, port: port) + @patch.object(context.NeutronPortContext, 'resolve_ports', + lambda inst, ports: ports) + def test_phy_nic_mtu_context(self): + self.config.side_effect = fake_config({'data-port': + 'phybr1:eth0'}) + ctxt = context.PhyNICMTUContext()() + self.assertEqual(ctxt, {'devs': 'eth0', 'mtu': 5000}) + + @patch.object(context.glob, 'glob') + @patch.object(context.NeutronAPIContext, '__call__', lambda *args: + {'network_device_mtu': 5000}) + @patch.object(context, 'get_nic_hwaddr', lambda inst, port: port) + @patch.object(context.NeutronPortContext, 'resolve_ports', + lambda inst, ports: ports) + def test_phy_nic_mtu_context_vlan(self, mock_glob): + self.config.side_effect = fake_config({'data-port': + 'phybr1:eth0.100'}) + mock_glob.return_value = ['/sys/class/net/eth0.100/lower_eth0'] + ctxt = context.PhyNICMTUContext()() + self.assertEqual(ctxt, {'devs': 'eth0\\neth0.100', 'mtu': 5000}) + + @patch.object(context.glob, 'glob') + @patch.object(context.NeutronAPIContext, '__call__', lambda *args: + {'network_device_mtu': 5000}) + @patch.object(context, 'get_nic_hwaddr', lambda inst, port: port) + @patch.object(context.NeutronPortContext, 'resolve_ports', + lambda inst, ports: ports) + def test_phy_nic_mtu_context_vlan_w_duplicate_raw(self, mock_glob): + self.config.side_effect = fake_config({'data-port': + 'phybr1:eth0.100 ' + 'phybr1:eth0.200'}) + + def fake_glob(wcard): + if 'eth0.100' in wcard: + return ['/sys/class/net/eth0.100/lower_eth0'] + elif 'eth0.200' in wcard: + return ['/sys/class/net/eth0.200/lower_eth0'] + + raise Exception("Unexpeced key '%s'" % (wcard)) + + mock_glob.side_effect = fake_glob + ctxt = context.PhyNICMTUContext()() + self.assertEqual(ctxt, {'devs': 'eth0\\neth0.100\\neth0.200', + 'mtu': 5000}) + + def test_neutronapicontext_defaults(self): + self.relation_ids.return_value = [] + expected_keys = [ + 'l2_population', 'enable_dvr', 'enable_l3ha', + 'overlay_network_type', 'network_device_mtu', + 'enable_qos', 'enable_nsg_logging', 'global_physnet_mtu', + 'physical_network_mtus' + ] + api_ctxt = context.NeutronAPIContext()() + for key in expected_keys: + self.assertTrue(key in api_ctxt) + self.assertEquals(api_ctxt['polling_interval'], 2) + self.assertEquals(api_ctxt['rpc_response_timeout'], 60) + self.assertEquals(api_ctxt['report_interval'], 30) + self.assertEquals(api_ctxt['enable_nsg_logging'], False) + self.assertEquals(api_ctxt['global_physnet_mtu'], 1500) + self.assertIsNone(api_ctxt['physical_network_mtus']) + + def setup_neutron_api_context_relation(self, cfg): + self.relation_ids.return_value = ['neutron-plugin-api:1'] + self.related_units.return_value = ['neutron-api/0'] + # The l2-population key is used by the context as a way of checking if + # the api service on the other end is sending data in a recent format. + self.relation_get.return_value = cfg + + def test_neutronapicontext_extension_drivers_qos_on(self): + self.setup_neutron_api_context_relation({ + 'enable-qos': 'True', + 'l2-population': 'True'}) + api_ctxt = context.NeutronAPIContext()() + self.assertTrue(api_ctxt['enable_qos']) + self.assertEquals(api_ctxt['extension_drivers'], 'qos') + + def test_neutronapicontext_extension_drivers_qos_off(self): + self.setup_neutron_api_context_relation({ + 'enable-qos': 'False', + 'l2-population': 'True'}) + api_ctxt = context.NeutronAPIContext()() + self.assertFalse(api_ctxt['enable_qos']) + self.assertEquals(api_ctxt['extension_drivers'], '') + + def test_neutronapicontext_extension_drivers_qos_absent(self): + self.setup_neutron_api_context_relation({ + 'l2-population': 'True'}) + api_ctxt = context.NeutronAPIContext()() + self.assertFalse(api_ctxt['enable_qos']) + self.assertEquals(api_ctxt['extension_drivers'], '') + + def test_neutronapicontext_extension_drivers_log_off(self): + self.setup_neutron_api_context_relation({ + 'enable-nsg-logging': 'False', + 'l2-population': 'True'}) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['extension_drivers'], '') + + def test_neutronapicontext_extension_drivers_log_on(self): + self.setup_neutron_api_context_relation({ + 'enable-nsg-logging': 'True', + 'l2-population': 'True'}) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['extension_drivers'], 'log') + + def test_neutronapicontext_extension_drivers_log_qos_on(self): + self.setup_neutron_api_context_relation({ + 'enable-qos': 'True', + 'enable-nsg-logging': 'True', + 'l2-population': 'True'}) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['extension_drivers'], 'qos,log') + + def test_neutronapicontext_firewall_group_logging_on(self): + self.setup_neutron_api_context_relation({ + 'enable-nfg-logging': 'True', + 'l2-population': 'True' + }) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['enable_nfg_logging'], True) + + def test_neutronapicontext_firewall_group_logging_off(self): + self.setup_neutron_api_context_relation({ + 'enable-nfg-logging': 'False', + 'l2-population': 'True' + }) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['enable_nfg_logging'], False) + + def test_neutronapicontext_port_forwarding_on(self): + self.setup_neutron_api_context_relation({ + 'enable-port-forwarding': 'True', + 'l2-population': 'True' + }) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['enable_port_forwarding'], True) + + def test_neutronapicontext_port_forwarding_off(self): + self.setup_neutron_api_context_relation({ + 'enable-port-forwarding': 'False', + 'l2-population': 'True' + }) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['enable_port_forwarding'], False) + + def test_neutronapicontext_string_converted(self): + self.setup_neutron_api_context_relation({ + 'l2-population': 'True'}) + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['l2_population'], True) + + def test_neutronapicontext_none(self): + self.relation_ids.return_value = ['neutron-plugin-api:1'] + self.related_units.return_value = ['neutron-api/0'] + self.relation_get.return_value = {'l2-population': 'True'} + api_ctxt = context.NeutronAPIContext()() + self.assertEquals(api_ctxt['network_device_mtu'], None) + + def test_network_service_ctxt_no_units(self): + self.relation_ids.return_value = [] + self.relation_ids.return_value = ['foo'] + self.related_units.return_value = [] + self.assertEquals(context.NetworkServiceContext()(), {}) + + @patch.object(context.OSContextGenerator, 'context_complete') + def test_network_service_ctxt_no_data(self, mock_context_complete): + rel = FakeRelation(QUANTUM_NETWORK_SERVICE_RELATION) + self.relation_ids.side_effect = rel.relation_ids + self.related_units.side_effect = rel.relation_units + relation = FakeRelation(relation_data=QUANTUM_NETWORK_SERVICE_RELATION) + self.relation_get.side_effect = relation.get + mock_context_complete.return_value = False + self.assertEquals(context.NetworkServiceContext()(), {}) + + def test_network_service_ctxt_data(self): + data_result = { + 'keystone_host': '10.5.0.1', + 'service_port': '5000', + 'auth_port': '20000', + 'service_tenant': 'tenant', + 'service_username': 'username', + 'service_password': 'password', + 'quantum_host': '10.5.0.2', + 'quantum_port': '9696', + 'quantum_url': 'http://10.5.0.2:9696/v2', + 'region': 'aregion', + 'service_protocol': 'http', + 'auth_protocol': 'http', + 'api_version': '2.0', + } + rel = FakeRelation(QUANTUM_NETWORK_SERVICE_RELATION) + self.relation_ids.side_effect = rel.relation_ids + self.related_units.side_effect = rel.relation_units + relation = FakeRelation(relation_data=QUANTUM_NETWORK_SERVICE_RELATION) + self.relation_get.side_effect = relation.get + self.assertEquals(context.NetworkServiceContext()(), data_result) + + def test_network_service_ctxt_data_api_version(self): + data_result = { + 'keystone_host': '10.5.0.1', + 'service_port': '5000', + 'auth_port': '20000', + 'service_tenant': 'tenant', + 'service_username': 'username', + 'service_password': 'password', + 'quantum_host': '10.5.0.2', + 'quantum_port': '9696', + 'quantum_url': 'http://10.5.0.2:9696/v2', + 'region': 'aregion', + 'service_protocol': 'http', + 'auth_protocol': 'http', + 'api_version': '3', + } + rel = FakeRelation(QUANTUM_NETWORK_SERVICE_RELATION_VERSIONED) + self.relation_ids.side_effect = rel.relation_ids + self.related_units.side_effect = rel.relation_units + relation = FakeRelation( + relation_data=QUANTUM_NETWORK_SERVICE_RELATION_VERSIONED) + self.relation_get.side_effect = relation.get + self.assertEquals(context.NetworkServiceContext()(), data_result) + + def test_internal_endpoint_context(self): + config = {'use-internal-endpoints': False} + self.config.side_effect = fake_config(config) + ctxt = context.InternalEndpointContext() + self.assertFalse(ctxt()['use_internal_endpoints']) + config = {'use-internal-endpoints': True} + self.config.side_effect = fake_config(config) + self.assertTrue(ctxt()['use_internal_endpoints']) + + @patch.object(context, 'os_release') + def test_volume_api_context(self, mock_os_release): + mock_os_release.return_value = 'ocata' + config = {'use-internal-endpoints': False} + self.config.side_effect = fake_config(config) + ctxt = context.VolumeAPIContext('cinder-common') + c = ctxt() + self.assertEqual(c['volume_api_version'], '2') + self.assertEqual(c['volume_catalog_info'], + 'volumev2:cinderv2:publicURL') + + mock_os_release.return_value = 'pike' + config['use-internal-endpoints'] = True + self.config.side_effect = fake_config(config) + ctxt = context.VolumeAPIContext('cinder-common') + c = ctxt() + self.assertEqual(c['volume_api_version'], '3') + self.assertEqual(c['volume_catalog_info'], + 'volumev3:cinderv3:internalURL') + + def test_volume_api_context_no_pkg(self): + self.assertRaises(ValueError, context.VolumeAPIContext, "") + self.assertRaises(ValueError, context.VolumeAPIContext, None) + + def test_apparmor_context_call_not_valid(self): + ''' Tests for the apparmor context''' + mock_aa_object = context.AppArmorContext() + # Test with invalid config + self.config.return_value = 'NOTVALID' + self.assertEquals(mock_aa_object.__call__(), None) + + def test_apparmor_context_call_complain(self): + ''' Tests for the apparmor context''' + mock_aa_object = context.AppArmorContext() + # Test complain mode + self.config.return_value = 'complain' + self.assertEquals(mock_aa_object.__call__(), + {'aa_profile_mode': 'complain', + 'ubuntu_release': '16.04'}) + + def test_apparmor_context_call_enforce(self): + ''' Tests for the apparmor context''' + mock_aa_object = context.AppArmorContext() + # Test enforce mode + self.config.return_value = 'enforce' + self.assertEquals(mock_aa_object.__call__(), + {'aa_profile_mode': 'enforce', + 'ubuntu_release': '16.04'}) + + def test_apparmor_context_call_disable(self): + ''' Tests for the apparmor context''' + mock_aa_object = context.AppArmorContext() + # Test complain mode + self.config.return_value = 'disable' + self.assertEquals(mock_aa_object.__call__(), + {'aa_profile_mode': 'disable', + 'ubuntu_release': '16.04'}) + + def test_apparmor_setup_complain(self): + ''' Tests for the apparmor setup''' + AA = context.AppArmorContext(profile_name='fake-aa-profile') + AA.install_aa_utils = MagicMock() + AA.manually_disable_aa_profile = MagicMock() + # Test complain mode + self.config.return_value = 'complain' + AA.setup_aa_profile() + AA.install_aa_utils.assert_called_with() + self.check_call.assert_called_with(['aa-complain', 'fake-aa-profile']) + self.assertFalse(AA.manually_disable_aa_profile.called) + + def test_apparmor_setup_enforce(self): + ''' Tests for the apparmor setup''' + AA = context.AppArmorContext(profile_name='fake-aa-profile') + AA.install_aa_utils = MagicMock() + AA.manually_disable_aa_profile = MagicMock() + # Test enforce mode + self.config.return_value = 'enforce' + AA.setup_aa_profile() + self.check_call.assert_called_with(['aa-enforce', 'fake-aa-profile']) + self.assertFalse(AA.manually_disable_aa_profile.called) + + def test_apparmor_setup_disable(self): + ''' Tests for the apparmor setup''' + AA = context.AppArmorContext(profile_name='fake-aa-profile') + AA.install_aa_utils = MagicMock() + AA.manually_disable_aa_profile = MagicMock() + # Test disable mode + self.config.return_value = 'disable' + AA.setup_aa_profile() + self.check_call.assert_called_with(['aa-disable', 'fake-aa-profile']) + self.assertFalse(AA.manually_disable_aa_profile.called) + # Test failed to disable + from subprocess import CalledProcessError + self.check_call.side_effect = CalledProcessError(0, 0, 0) + AA.setup_aa_profile() + self.check_call.assert_called_with(['aa-disable', 'fake-aa-profile']) + AA.manually_disable_aa_profile.assert_called_with() + + @patch.object(context, 'enable_memcache') + @patch.object(context, 'is_ipv6_disabled') + def test_memcache_context_ipv6(self, _is_ipv6_disabled, _enable_memcache): + self.lsb_release.return_value = {'DISTRIB_CODENAME': 'xenial'} + _enable_memcache.return_value = True + _is_ipv6_disabled.return_value = False + config = { + 'openstack-origin': 'distro', + } + self.config.side_effect = fake_config(config) + ctxt = context.MemcacheContext() + self.assertTrue(ctxt()['use_memcache']) + expect = { + 'memcache_port': '11211', + 'memcache_server': '::1', + 'memcache_server_formatted': '[::1]', + 'memcache_url': 'inet6:[::1]:11211', + 'use_memcache': True} + self.assertEqual(ctxt(), expect) + self.lsb_release.return_value = {'DISTRIB_CODENAME': 'trusty'} + expect['memcache_server'] = 'ip6-localhost' + ctxt = context.MemcacheContext() + self.assertEqual(ctxt(), expect) + + @patch.object(context, 'enable_memcache') + @patch.object(context, 'is_ipv6_disabled') + def test_memcache_context_ipv4(self, _is_ipv6_disabled, _enable_memcache): + self.lsb_release.return_value = {'DISTRIB_CODENAME': 'xenial'} + _enable_memcache.return_value = True + _is_ipv6_disabled.return_value = True + config = { + 'openstack-origin': 'distro', + } + self.config.side_effect = fake_config(config) + ctxt = context.MemcacheContext() + self.assertTrue(ctxt()['use_memcache']) + expect = { + 'memcache_port': '11211', + 'memcache_server': '127.0.0.1', + 'memcache_server_formatted': '127.0.0.1', + 'memcache_url': '127.0.0.1:11211', + 'use_memcache': True} + self.assertEqual(ctxt(), expect) + self.lsb_release.return_value = {'DISTRIB_CODENAME': 'trusty'} + expect['memcache_server'] = 'localhost' + ctxt = context.MemcacheContext() + self.assertEqual(ctxt(), expect) + + @patch.object(context, 'enable_memcache') + def test_memcache_off_context(self, _enable_memcache): + _enable_memcache.return_value = False + config = {'openstack-origin': 'distro'} + self.config.side_effect = fake_config(config) + ctxt = context.MemcacheContext() + self.assertFalse(ctxt()['use_memcache']) + self.assertEqual(ctxt(), {'use_memcache': False}) + + @patch('charmhelpers.contrib.openstack.context.mkdir') + def test_ensure_dir_ctx(self, mkdir): + dirname = '/etc/keystone/policy.d' + owner = 'someuser' + group = 'somegroup' + perms = 0o555 + force = False + ctxt = context.EnsureDirContext(dirname, owner=owner, + group=group, perms=perms, + force=force) + ctxt() + mkdir.assert_called_with(dirname, owner=owner, group=group, + perms=perms, force=force) + + @patch.object(context, 'os_release') + def test_VersionsContext(self, os_release): + self.lsb_release.return_value = {'DISTRIB_CODENAME': 'xenial'} + os_release.return_value = 'essex' + self.assertEqual( + context.VersionsContext()(), + { + 'openstack_release': 'essex', + 'operating_system_release': 'xenial'}) + os_release.assert_called_once_with('python-keystone') + self.lsb_release.assert_called_once_with() + + def test_logrotate_context_unset(self): + logrotate = context.LogrotateContext(location='nova', + interval='weekly', + count=4) + ctxt = logrotate() + expected_ctxt = { + 'logrotate_logs_location': 'nova', + 'logrotate_interval': 'weekly', + 'logrotate_count': 'rotate 4', + } + self.assertEquals(ctxt, expected_ctxt) + + @patch.object(context, 'os_release') + def test_vendordata_static(self, os_release): + _vdata = '{"good": "json"}' + os_release.return_value = 'rocky' + self.config.side_effect = [_vdata, None] + ctxt = context.NovaVendorMetadataContext('nova-common')() + + self.assertTrue(ctxt['vendor_data']) + self.assertEqual('StaticJSON', ctxt['vendordata_providers']) + self.assertNotIn('vendor_data_url', ctxt) + + @patch.object(context, 'os_release') + def test_vendordata_dynamic(self, os_release): + _vdata_url = 'http://example.org/vdata' + os_release.return_value = 'rocky' + + self.config.side_effect = [None, _vdata_url] + ctxt = context.NovaVendorMetadataContext('nova-common')() + + self.assertEqual(_vdata_url, ctxt['vendor_data_url']) + self.assertEqual('DynamicJSON', ctxt['vendordata_providers']) + self.assertFalse(ctxt['vendor_data']) + + @patch.object(context, 'os_release') + def test_vendordata_static_and_dynamic(self, os_release): + os_release.return_value = 'rocky' + _vdata = '{"good": "json"}' + _vdata_url = 'http://example.org/vdata' + + self.config.side_effect = [_vdata, _vdata_url] + ctxt = context.NovaVendorMetadataContext('nova-common')() + + self.assertTrue(ctxt['vendor_data']) + self.assertEqual(_vdata_url, ctxt['vendor_data_url']) + self.assertEqual('StaticJSON,DynamicJSON', + ctxt['vendordata_providers']) + + @patch.object(context, 'log') + @patch.object(context, 'os_release') + def test_vendordata_static_invalid_and_dynamic(self, os_release, log): + os_release.return_value = 'rocky' + _vdata = '{bad: json}' + _vdata_url = 'http://example.org/vdata' + + self.config.side_effect = [_vdata, _vdata_url] + ctxt = context.NovaVendorMetadataContext('nova-common')() + + self.assertFalse(ctxt['vendor_data']) + self.assertEqual(_vdata_url, ctxt['vendor_data_url']) + self.assertEqual('DynamicJSON', ctxt['vendordata_providers']) + self.assertTrue(log.called) + + @patch('charmhelpers.contrib.openstack.context.log') + @patch.object(context, 'os_release') + def test_vendordata_static_and_dynamic_mitaka(self, os_release, log): + os_release.return_value = 'mitaka' + _vdata = '{"good": "json"}' + _vdata_url = 'http://example.org/vdata' + + self.config.side_effect = [_vdata, _vdata_url] + ctxt = context.NovaVendorMetadataContext('nova-common')() + + self.assertTrue(log.called) + self.assertTrue(ctxt['vendor_data']) + self.assertNotIn('vendor_data_url', ctxt) + self.assertNotIn('vendordata_providers', ctxt) + + @patch.object(context, 'log') + def test_vendordata_json_valid(self, log): + _vdata = '{"good": "json"}' + self.config.side_effect = [_vdata] + + ctxt = context.NovaVendorMetadataJSONContext('nova-common')() + + self.assertEqual({'vendor_data_json': _vdata}, ctxt) + self.assertFalse(log.called) + + @patch.object(context, 'log') + def test_vendordata_json_invalid(self, log): + _vdata = '{bad: json}' + self.config.side_effect = [_vdata] + + ctxt = context.NovaVendorMetadataJSONContext('nova-common')() + + self.assertEqual({'vendor_data_json': '{}'}, ctxt) + self.assertTrue(log.called) + + @patch.object(context, 'log') + def test_vendordata_json_empty(self, log): + self.config.side_effect = [None] + + ctxt = context.NovaVendorMetadataJSONContext('nova-common')() + + self.assertEqual({'vendor_data_json': '{}'}, ctxt) + self.assertFalse(log.called) + + @patch.object(context, 'socket') + def test_host_info_context(self, _socket): + _socket.getaddrinfo.return_value = [(None, None, None, 'myhost.mydomain', None)] + _socket.gethostname.return_value = 'myhost' + ctxt = context.HostInfoContext()() + self.assertEqual({ + 'host_fqdn': 'myhost.mydomain', + 'host': 'myhost', + 'use_fqdn_hint': False}, + ctxt) + ctxt = context.HostInfoContext(use_fqdn_hint_cb=lambda: True)() + self.assertEqual({ + 'host_fqdn': 'myhost.mydomain', + 'host': 'myhost', + 'use_fqdn_hint': True}, + ctxt) + # if getaddrinfo is unable to find the canonical name we should return + # the shortname to match the behaviour of the original implementation. + _socket.getaddrinfo.return_value = [(None, None, None, 'localhost', None)] + ctxt = context.HostInfoContext()() + self.assertEqual({ + 'host_fqdn': 'myhost', + 'host': 'myhost', + 'use_fqdn_hint': False}, + ctxt) + if six.PY2: + _socket.error = Exception + _socket.getaddrinfo.side_effect = Exception + else: + _socket.getaddrinfo.side_effect = OSError + _socket.gethostname.return_value = 'myhost' + ctxt = context.HostInfoContext()() + self.assertEqual({ + 'host_fqdn': 'myhost', + 'host': 'myhost', + 'use_fqdn_hint': False}, + ctxt) + + @patch.object(context, "DHCPAgentContext") + def test_validate_ovs_use_veth(self, _context): + # No existing dhcp_agent.ini and no config + _context.get_existing_ovs_use_veth.return_value = None + _context.parse_ovs_use_veth.return_value = None + self.assertEqual((None, None), context.validate_ovs_use_veth()) + + # No existing dhcp_agent.ini and config set + _context.get_existing_ovs_use_veth.return_value = None + _context.parse_ovs_use_veth.return_value = True + self.assertEqual((None, None), context.validate_ovs_use_veth()) + + # Existing dhcp_agent.ini and no config + _context.get_existing_ovs_use_veth.return_value = True + _context.parse_ovs_use_veth.return_value = None + self.assertEqual((None, None), context.validate_ovs_use_veth()) + + # Check for agreement with existing dhcp_agent.ini + _context.get_existing_ovs_use_veth.return_value = False + _context.parse_ovs_use_veth.return_value = False + self.assertEqual((None, None), context.validate_ovs_use_veth()) + + # Check for disagreement with existing dhcp_agent.ini + _context.get_existing_ovs_use_veth.return_value = True + _context.parse_ovs_use_veth.return_value = False + self.assertEqual( + ("blocked", + "Mismatched existing and configured ovs-use-veth. See log."), + context.validate_ovs_use_veth()) + + def test_dhcp_agent_context(self): + # Defaults + _config = { + "debug": False, + "dns-servers": None, + "enable-isolated-metadata": None, + "enable-metadata-network": None, + "instance-mtu": None, + "ovs-use-veth": None} + _expect = { + "debug": False, + "dns_servers": None, + "enable_isolated_metadata": None, + "enable_metadata_network": None, + "instance_mtu": None, + "ovs_use_veth": False} + self.config.side_effect = fake_config(_config) + _get_ovs_use_veth = MagicMock() + _get_ovs_use_veth.return_value = False + ctx_object = context.DHCPAgentContext() + ctx_object.get_ovs_use_veth = _get_ovs_use_veth + ctxt = ctx_object() + self.assertEqual(_expect, ctxt) + + # Non-defaults + _dns = "10.5.0.2" + _mtu = 8950 + _config = { + "debug": True, + "dns-servers": _dns, + "enable-isolated-metadata": True, + "enable-metadata-network": True, + "instance-mtu": _mtu, + "ovs-use-veth": True} + _expect = { + "debug": True, + "dns_servers": _dns, + "enable_isolated_metadata": True, + "enable_metadata_network": True, + "instance_mtu": _mtu, + "ovs_use_veth": True} + self.config.side_effect = fake_config(_config) + _get_ovs_use_veth.return_value = True + ctxt = ctx_object() + self.assertEqual(_expect, ctxt) + + def test_dhcp_agent_context_no_dns_domain(self): + _config = {"dns-servers": '8.8.8.8'} + self.config.side_effect = fake_config(_config) + self.relation_ids.return_value = ['rid1'] + self.related_units.return_value = ['nova-compute/0'] + self.relation_get.return_value = 'nova' + self.assertEqual( + context.DHCPAgentContext()(), + {'instance_mtu': None, + 'dns_servers': '8.8.8.8', + 'ovs_use_veth': False, + "enable_isolated_metadata": None, + "enable_metadata_network": None, + "debug": None} + ) + + def test_dhcp_agent_context_dnsmasq_flags(self): + _config = {'dnsmasq-flags': 'dhcp-userclass=set:ipxe,iPXE,' + 'dhcp-match=set:ipxe,175,' + 'server=1.2.3.4'} + self.config.side_effect = fake_config(_config) + self.assertEqual( + context.DHCPAgentContext()(), + { + 'dnsmasq_flags': collections.OrderedDict( + [('dhcp-userclass', 'set:ipxe,iPXE'), + ('dhcp-match', 'set:ipxe,175'), + ('server', '1.2.3.4')]), + 'instance_mtu': None, + 'dns_servers': None, + 'ovs_use_veth': False, + "enable_isolated_metadata": None, + "enable_metadata_network": None, + "debug": None, + } + ) + + def test_get_ovs_use_veth(self): + _get_existing_ovs_use_veth = MagicMock() + _parse_ovs_use_veth = MagicMock() + ctx_object = context.DHCPAgentContext() + ctx_object.get_existing_ovs_use_veth = _get_existing_ovs_use_veth + ctx_object.parse_ovs_use_veth = _parse_ovs_use_veth + + # Default + _get_existing_ovs_use_veth.return_value = None + _parse_ovs_use_veth.return_value = None + self.assertEqual(False, ctx_object.get_ovs_use_veth()) + + # Existing dhcp_agent.ini and no config + _get_existing_ovs_use_veth.return_value = True + _parse_ovs_use_veth.return_value = None + self.assertEqual(True, ctx_object.get_ovs_use_veth()) + + # No existing dhcp_agent.ini and config set + _get_existing_ovs_use_veth.return_value = None + _parse_ovs_use_veth.return_value = False + self.assertEqual(False, ctx_object.get_ovs_use_veth()) + + # Both set matching + _get_existing_ovs_use_veth.return_value = True + _parse_ovs_use_veth.return_value = True + self.assertEqual(True, ctx_object.get_ovs_use_veth()) + + # Both set mismatch: existing overrides + _get_existing_ovs_use_veth.return_value = False + _parse_ovs_use_veth.return_value = True + self.assertEqual(False, ctx_object.get_ovs_use_veth()) + + # Both set mismatch: existing overrides + _get_existing_ovs_use_veth.return_value = True + _parse_ovs_use_veth.return_value = False + self.assertEqual(True, ctx_object.get_ovs_use_veth()) + + @patch.object(context, 'config_ini') + @patch.object(context.os.path, 'isfile') + def test_get_existing_ovs_use_veth(self, _is_file, _config_ini): + _config = {"ovs-use-veth": None} + self.config.side_effect = fake_config(_config) + + ctx_object = context.DHCPAgentContext() + + # Default + _is_file.return_value = False + self.assertEqual(None, ctx_object.get_existing_ovs_use_veth()) + + # Existing + _is_file.return_value = True + _config_ini.return_value = {"DEFAULT": {"ovs_use_veth": True}} + self.assertEqual(True, ctx_object.get_existing_ovs_use_veth()) + + # Existing config_ini returns string + _is_file.return_value = True + _config_ini.return_value = {"DEFAULT": {"ovs_use_veth": "False"}} + self.assertEqual(False, ctx_object.get_existing_ovs_use_veth()) + + @patch.object(context, 'bool_from_string') + def test_parse_ovs_use_veth(self, _bool_from_string): + _config = {"ovs-use-veth": None} + self.config.side_effect = fake_config(_config) + + ctx_object = context.DHCPAgentContext() + + # Unset + self.assertEqual(None, ctx_object.parse_ovs_use_veth()) + _bool_from_string.assert_not_called() + + # Consider empty string unset + _config = {"ovs-use-veth": ""} + self.config.side_effect = fake_config(_config) + self.assertEqual(None, ctx_object.parse_ovs_use_veth()) + _bool_from_string.assert_not_called() + + # Lower true + _bool_from_string.return_value = True + _config = {"ovs-use-veth": "true"} + self.config.side_effect = fake_config(_config) + self.assertEqual(True, ctx_object.parse_ovs_use_veth()) + _bool_from_string.assert_called_with("true") + + # Lower false + _bool_from_string.return_value = False + _bool_from_string.reset_mock() + _config = {"ovs-use-veth": "false"} + self.config.side_effect = fake_config(_config) + self.assertEqual(False, ctx_object.parse_ovs_use_veth()) + _bool_from_string.assert_called_with("false") + + # Upper True + _bool_from_string.return_value = True + _bool_from_string.reset_mock() + _config = {"ovs-use-veth": "True"} + self.config.side_effect = fake_config(_config) + self.assertEqual(True, ctx_object.parse_ovs_use_veth()) + _bool_from_string.assert_called_with("True") + + # Invalid + _bool_from_string.reset_mock() + _config = {"ovs-use-veth": "Invalid"} + self.config.side_effect = fake_config(_config) + _bool_from_string.side_effect = ValueError + with self.assertRaises(ValueError): + ctx_object.parse_ovs_use_veth() + _bool_from_string.assert_called_with("Invalid") + + +class MockPCIDevice(object): + """Simple wrapper to mock pci.PCINetDevice class""" + def __init__(self, address): + self.pci_address = address + + +TEST_CPULIST_1 = "0-3" +TEST_CPULIST_2 = "0-7,16-23" +TEST_CPULIST_3 = "0,4,8,12,16,20,24" +DPDK_DATA_PORTS = ( + "br-phynet3:fe:16:41:df:23:fe " + "br-phynet1:fe:16:41:df:23:fd " + "br-phynet2:fe:f2:d0:45:dc:66" +) +BOND_MAPPINGS = ( + "bond0:fe:16:41:df:23:fe " + "bond0:fe:16:41:df:23:fd " + "bond1:fe:f2:d0:45:dc:66" +) +PCI_DEVICE_MAP = { + 'fe:16:41:df:23:fd': MockPCIDevice('0000:00:1c.0'), + 'fe:16:41:df:23:fe': MockPCIDevice('0000:00:1d.0'), +} + + +class TestDPDKUtils(tests.utils.BaseTestCase): + + def test_resolve_pci_from_mapping_config(self): + # FIXME: need to mock out the unit key value store + self.patch_object(context, 'config') + self.config.side_effect = lambda x: { + 'data-port': DPDK_DATA_PORTS, + 'dpdk-bond-mappings': BOND_MAPPINGS, + }.get(x) + _pci_devices = Mock() + _pci_devices.get_device_from_mac.side_effect = PCI_DEVICE_MAP.get + self.patch_object(context, 'pci') + self.pci.PCINetDevices.return_value = _pci_devices + self.assertDictEqual( + context.resolve_pci_from_mapping_config('data-port'), + { + '0000:00:1c.0': context.EntityMac( + 'br-phynet1', 'fe:16:41:df:23:fd'), + '0000:00:1d.0': context.EntityMac( + 'br-phynet3', 'fe:16:41:df:23:fe'), + }) + self.config.assert_called_once_with('data-port') + self.config.reset_mock() + self.assertDictEqual( + context.resolve_pci_from_mapping_config('dpdk-bond-mappings'), + { + '0000:00:1c.0': context.EntityMac( + 'bond0', 'fe:16:41:df:23:fd'), + '0000:00:1d.0': context.EntityMac( + 'bond0', 'fe:16:41:df:23:fe'), + }) + self.config.assert_called_once_with('dpdk-bond-mappings') + + +DPDK_PATCH = [ + 'resolve_pci_from_mapping_config', + 'glob', +] + +NUMA_CORES_SINGLE = { + '0': [0, 1, 2, 3] +} + +NUMA_CORES_MULTI = { + '0': [0, 1, 2, 3], + '1': [4, 5, 6, 7] +} + + +class TestOVSDPDKDeviceContext(tests.utils.BaseTestCase): + + def setUp(self): + super(TestOVSDPDKDeviceContext, self).setUp() + self.patch_object(context, 'config') + self.config.side_effect = lambda x: { + 'enable-dpdk': True, + } + self.target = context.OVSDPDKDeviceContext() + + def patch_target(self, attr, return_value=None): + mocked = mock.patch.object(self.target, attr) + self._patches[attr] = mocked + started = mocked.start() + started.return_value = return_value + self._patches_start[attr] = started + setattr(self, attr, started) + + def test__parse_cpu_list(self): + self.assertEqual(self.target._parse_cpu_list(TEST_CPULIST_1), + [0, 1, 2, 3]) + self.assertEqual(self.target._parse_cpu_list(TEST_CPULIST_2), + [0, 1, 2, 3, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23]) + self.assertEqual(self.target._parse_cpu_list(TEST_CPULIST_3), + [0, 4, 8, 12, 16, 20, 24]) + + def test__numa_node_cores(self): + self.patch_target('_parse_cpu_list') + self._parse_cpu_list.return_value = [0, 1, 2, 3] + self.patch_object(context, 'glob') + self.glob.glob.return_value = [ + '/sys/devices/system/node/node0' + ] + with patch_open() as (_, mock_file): + mock_file.read.return_value = TEST_CPULIST_1 + self.target._numa_node_cores() + self.assertEqual(self.target._numa_node_cores(), + {'0': [0, 1, 2, 3]}) + self.glob.glob.assert_called_with('/sys/devices/system/node/node*') + self._parse_cpu_list.assert_called_with(TEST_CPULIST_1) + + def test_device_whitelist(self): + """Test device whitelist generation""" + self.patch_object( + context, 'resolve_pci_from_mapping_config', + return_value=collections.OrderedDict( + sorted({ + '0000:00:1c.0': 'br-data', + '0000:00:1d.0': 'br-data', + }.items()))) + self.assertEqual(self.target.device_whitelist(), + '-w 0000:00:1c.0 -w 0000:00:1d.0') + self.resolve_pci_from_mapping_config.assert_has_calls([ + call('data-port'), + call('dpdk-bond-mappings'), + ]) + + def test_socket_memory(self): + """Test socket memory configuration""" + self.patch_object(context, 'glob') + self.patch_object(context, 'config') + self.config.side_effect = lambda x: { + 'dpdk-socket-memory': 1024, + }.get(x) + self.glob.glob.return_value = ['a'] + self.assertEqual(self.target.socket_memory(), + '1024') + + self.glob.glob.return_value = ['a', 'b'] + self.assertEqual(self.target.socket_memory(), + '1024,1024') + + self.config.side_effect = lambda x: { + 'dpdk-socket-memory': 2048, + }.get(x) + self.assertEqual(self.target.socket_memory(), + '2048,2048') + + def test_cpu_mask(self): + """Test generation of hex CPU masks""" + self.patch_target('_numa_node_cores') + self._numa_node_cores.return_value = NUMA_CORES_SINGLE + self.config.side_effect = lambda x: { + 'dpdk-socket-cores': 1, + }.get(x) + self.assertEqual(self.target.cpu_mask(), '0x01') + + self._numa_node_cores.return_value = NUMA_CORES_MULTI + self.assertEqual(self.target.cpu_mask(), '0x11') + + self.config.side_effect = lambda x: { + 'dpdk-socket-cores': 2, + }.get(x) + self.assertEqual(self.target.cpu_mask(), '0x33') + + def test_context_no_devices(self): + """Ensure that DPDK is disable when no devices detected""" + self.patch_object(context, 'resolve_pci_from_mapping_config') + self.resolve_pci_from_mapping_config.return_value = {} + self.assertEqual(self.target(), {}) + self.resolve_pci_from_mapping_config.assert_has_calls([ + call('data-port'), + call('dpdk-bond-mappings'), + ]) + + def test_context_devices(self): + """Ensure DPDK is enabled when devices are detected""" + self.patch_target('_numa_node_cores') + self.patch_target('devices') + self.devices.return_value = collections.OrderedDict(sorted({ + '0000:00:1c.0': 'br-data', + '0000:00:1d.0': 'br-data', + }.items())) + self._numa_node_cores.return_value = NUMA_CORES_SINGLE + self.patch_object(context, 'glob') + self.glob.glob.return_value = ['a'] + self.config.side_effect = lambda x: { + 'dpdk-socket-cores': 1, + 'dpdk-socket-memory': 1024, + 'enable-dpdk': True, + }.get(x) + self.assertEqual(self.target(), { + 'cpu_mask': '0x01', + 'device_whitelist': '-w 0000:00:1c.0 -w 0000:00:1d.0', + 'dpdk_enabled': True, + 'socket_memory': '1024' + }) + + +class TestDPDKDeviceContext(tests.utils.BaseTestCase): + + _dpdk_bridges = { + '0000:00:1c.0': 'br-data', + '0000:00:1d.0': 'br-physnet1', + } + _dpdk_bonds = { + '0000:00:1c.1': 'dpdk-bond0', + '0000:00:1d.1': 'dpdk-bond0', + } + + def setUp(self): + super(TestDPDKDeviceContext, self).setUp() + self.target = context.DPDKDeviceContext() + self.patch_object(context, 'resolve_pci_from_mapping_config') + self.resolve_pci_from_mapping_config.side_effect = [ + self._dpdk_bridges, + self._dpdk_bonds, + ] + + def test_context(self): + self.patch_object(context, 'config') + self.config.side_effect = lambda x: { + 'dpdk-driver': 'uio_pci_generic', + }.get(x) + devices = copy.deepcopy(self._dpdk_bridges) + devices.update(self._dpdk_bonds) + self.assertEqual(self.target(), { + 'devices': devices, + 'driver': 'uio_pci_generic' + }) + self.config.assert_called_with('dpdk-driver') + + def test_context_none_driver(self): + self.patch_object(context, 'config') + self.config.return_value = None + self.assertEqual(self.target(), {}) + self.config.assert_called_with('dpdk-driver') + + +class TestBridgePortInterfaceMap(tests.utils.BaseTestCase): + + def test__init__(self): + self.maxDiff = None + self.patch_object(context, 'config') + # system with three interfaces (eth0, eth1 and eth2) where + # eth0 and eth1 is part of linux bond bond0. + # Bridge mapping br-ex:eth2, br-provider1:bond0 + self.config.side_effect = lambda x: { + 'data-port': ( + 'br-ex:eth2 ' + 'br-provider1:00:00:5e:00:00:41 ' + 'br-provider1:00:00:5e:00:00:40'), + 'dpdk-bond-mappings': '', + }.get(x) + self.patch_object(context, 'resolve_pci_from_mapping_config') + self.resolve_pci_from_mapping_config.side_effect = [ + { + '0000:00:1c.0': context.EntityMac( + 'br-ex', '00:00:5e:00:00:42'), + }, + {}, + ] + self.patch_object(context, 'list_nics') + self.list_nics.return_value = ['bond0', 'eth0', 'eth1', 'eth2'] + self.patch_object(context, 'is_phy_iface') + self.is_phy_iface.side_effect = lambda x: True if not x.startswith( + 'bond') else False + self.patch_object(context, 'get_bond_master') + self.get_bond_master.side_effect = lambda x: 'bond0' if x in ( + 'eth0', 'eth1') else None + self.patch_object(context, 'get_nic_hwaddr') + self.get_nic_hwaddr.side_effect = lambda x: { + 'bond0': '00:00:5e:00:00:24', + 'eth0': '00:00:5e:00:00:40', + 'eth1': '00:00:5e:00:00:41', + 'eth2': '00:00:5e:00:00:42', + }.get(x) + bpi = context.BridgePortInterfaceMap() + self.maxDiff = None + expect = { + 'br-provider1': { + 'bond0': { + 'bond0': { + 'type': 'system', + }, + }, + }, + 'br-ex': { + 'eth2': { + 'eth2': { + 'type': 'system', + }, + }, + }, + } + self.assertDictEqual(bpi._map, expect) + # do it again but this time use the linux bond name instead of mac + # addresses. + self.config.side_effect = lambda x: { + 'data-port': ( + 'br-ex:eth2 ' + 'br-provider1:bond0'), + 'dpdk-bond-mappings': '', + }.get(x) + bpi = context.BridgePortInterfaceMap() + self.assertDictEqual(bpi._map, expect) + # and if a user asks for a purely virtual interface let's not stop them + expect = { + 'br-provider1': { + 'bond0.1234': { + 'bond0.1234': { + 'type': 'system', + }, + }, + }, + 'br-ex': { + 'eth2': { + 'eth2': { + 'type': 'system', + }, + }, + }, + } + self.config.side_effect = lambda x: { + 'data-port': ( + 'br-ex:eth2 ' + 'br-provider1:bond0.1234'), + 'dpdk-bond-mappings': '', + }.get(x) + bpi = context.BridgePortInterfaceMap() + self.assertDictEqual(bpi._map, expect) + # system with three interfaces (eth0, eth1 and eth2) where we should + # enable DPDK and create OVS bond of eth0 and eth1. + # Bridge mapping br-ex:eth2 br-provider1:dpdk-bond0 + self.config.side_effect = lambda x: { + 'enable-dpdk': True, + 'data-port': ( + 'br-ex:00:00:5e:00:00:42 ' + 'br-provider1:dpdk-bond0'), + 'dpdk-bond-mappings': ( + 'dpdk-bond0:00:00:5e:00:00:40 ' + 'dpdk-bond0:00:00:5e:00:00:41'), + }.get(x) + self.resolve_pci_from_mapping_config.side_effect = [ + { + '0000:00:1c.0': context.EntityMac( + 'br-ex', '00:00:5e:00:00:42'), + }, + { + '0000:00:1d.0': context.EntityMac( + 'dpdk-bond0', '00:00:5e:00:00:40'), + '0000:00:1e.0': context.EntityMac( + 'dpdk-bond0', '00:00:5e:00:00:41'), + }, + ] + # once devices are bound to DPDK they disappear from the system list + # of interfaces + self.list_nics.return_value = [] + bpi = context.BridgePortInterfaceMap(global_mtu=1500) + self.assertDictEqual(bpi._map, { + 'br-provider1': { + 'dpdk-bond0': { + 'dpdk-600a59e': { + 'pci-address': '0000:00:1d.0', + 'type': 'dpdk', + 'mtu-request': '1500', + }, + 'dpdk-5fc1d91': { + 'pci-address': '0000:00:1e.0', + 'type': 'dpdk', + 'mtu-request': '1500', + }, + }, + }, + 'br-ex': { + 'dpdk-6204d33': { + 'dpdk-6204d33': { + 'pci-address': '0000:00:1c.0', + 'type': 'dpdk', + 'mtu-request': '1500', + }, + }, + }, + }) + + def test_add_interface(self): + self.patch_object(context, 'config') + self.config.return_value = '' + ctx = context.BridgePortInterfaceMap() + ctx.add_interface("br1", "bond1", "port1", ctx.interface_type.dpdk, + "00:00:00:00:00:01", 1500) + ctx.add_interface("br1", "bond1", "port2", ctx.interface_type.dpdk, + "00:00:00:00:00:02", 1500) + ctx.add_interface("br1", "bond2", "port3", ctx.interface_type.dpdk, + "00:00:00:00:00:03", 1500) + ctx.add_interface("br1", "bond2", "port4", ctx.interface_type.dpdk, + "00:00:00:00:00:04", 1500) + + expected = ( + 'br1', { + 'bond1': { + 'port1': { + 'type': 'dpdk', + 'pci-address': '00:00:00:00:00:01', + 'mtu-request': '1500', + }, + 'port2': { + 'type': 'dpdk', + 'pci-address': '00:00:00:00:00:02', + 'mtu-request': '1500', + }, + }, + 'bond2': { + 'port3': { + 'type': 'dpdk', + 'pci-address': '00:00:00:00:00:03', + 'mtu-request': '1500', + }, + 'port4': { + 'type': 'dpdk', + 'pci-address': '00:00:00:00:00:04', + 'mtu-request': '1500', + }, + }, + }, + ) + for br, bonds in ctx.items(): + self.maxDiff = None + self.assertEqual(br, expected[0]) + self.assertDictEqual(bonds, expected[1]) + + +class TestBondConfig(tests.utils.BaseTestCase): + + def test_get_bond_config(self): + self.patch_object(context, 'config') + self.config.side_effect = lambda x: { + 'dpdk-bond-config': ':active-backup bond1:balance-slb:off', + }.get(x) + bonds_config = context.BondConfig() + + self.assertEqual(bonds_config.get_bond_config('bond0'), + {'mode': 'active-backup', + 'lacp': 'active', + 'lacp-time': 'fast' + }) + self.assertEqual(bonds_config.get_bond_config('bond1'), + {'mode': 'balance-slb', + 'lacp': 'off', + 'lacp-time': 'fast' + }) + + +class TestSRIOVContext(tests.utils.BaseTestCase): + + class ObjectView(object): + + def __init__(self, _dict): + self.__dict__ = _dict + + def test___init__(self): + self.patch_object(context.pci, 'PCINetDevices') + pci_devices = self.ObjectView({ + 'pci_devices': [ + self.ObjectView({ + 'sriov': True, + 'interface_name': 'eth0', + 'sriov_totalvfs': 16, + }), + self.ObjectView({ + 'sriov': True, + 'interface_name': 'eth1', + 'sriov_totalvfs': 32, + }), + self.ObjectView({ + 'sriov': False, + 'interface_name': 'eth2', + }), + ] + }) + self.PCINetDevices.return_value = pci_devices + self.patch_object(context, 'config') + # auto sets up numvfs = totalvfs + self.config.return_value = { + 'sriov-numvfs': 'auto', + } + self.assertDictEqual(context.SRIOVContext()(), { + 'eth0': 16, + 'eth1': 32, + }) + # when sriov-device-mappings is used only listed devices are set up + self.config.return_value = { + 'sriov-numvfs': 'auto', + 'sriov-device-mappings': 'physnet1:eth0', + } + self.assertDictEqual(context.SRIOVContext()(), { + 'eth0': 16, + }) + self.config.return_value = { + 'sriov-numvfs': 'eth0:8', + 'sriov-device-mappings': 'physnet1:eth0', + } + self.assertDictEqual(context.SRIOVContext()(), { + 'eth0': 8, + }) + self.config.return_value = { + 'sriov-numvfs': 'eth1:8', + } + self.assertDictEqual(context.SRIOVContext()(), { + 'eth1': 8, + }) + # setting a numvfs value higher than a nic supports will revert to + # the nics max value + self.config.return_value = { + 'sriov-numvfs': 'eth1:64', + } + self.assertDictEqual(context.SRIOVContext()(), { + 'eth1': 32, + }) + # devices listed in sriov-numvfs have precedence over + # sriov-device-mappings and the limiter still works when both are used + self.config.return_value = { + 'sriov-numvfs': 'eth1:64', + 'sriov-device-mappings': 'physnet:eth0', + } + self.assertDictEqual(context.SRIOVContext()(), { + 'eth1': 32, + }) + # alternate config keys have effect + self.config.return_value = { + 'my-own-sriov-numvfs': 'auto', + 'my-own-sriov-device-mappings': 'physnet1:eth0', + } + self.assertDictEqual( + context.SRIOVContext( + numvfs_key='my-own-sriov-numvfs', + device_mappings_key='my-own-sriov-device-mappings')(), + { + 'eth0': 16, + }) + # blanket configuration works and respects limits + self.config.return_value = { + 'sriov-numvfs': '24', + } + self.assertDictEqual(context.SRIOVContext()(), { + 'eth0': 16, + 'eth1': 24, + }) + + def test___call__(self): + self.patch_object(context.pci, 'PCINetDevices') + pci_devices = self.ObjectView({'pci_devices': []}) + self.PCINetDevices.return_value = pci_devices + self.patch_object(context, 'config') + self.config.return_value = {'sriov-numvfs': 'auto'} + ctxt_obj = context.SRIOVContext() + ctxt_obj._map = {} + self.assertDictEqual(ctxt_obj(), {}) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_templating.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_templating.py new file mode 100644 index 0000000000000000000000000000000000000000..b30dc31f69e12d8687974d8af6346fb745a04e9d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_templating.py @@ -0,0 +1,374 @@ + +import os +import unittest + +from mock import patch, call, MagicMock + +import charmhelpers.contrib.openstack.templating as templating + +from jinja2.exceptions import TemplateNotFound + +import six +if not six.PY3: + builtin_open = '__builtin__.open' +else: + builtin_open = 'builtins.open' + + +class FakeContextGenerator(object): + interfaces = None + + def set(self, interfaces, context): + self.interfaces = interfaces + self.context = context + + def __call__(self): + return self.context + + +class FakeLoader(object): + def set(self, template): + self.template = template + + def get(self, name): + return self.template + + +class MockFSLoader(object): + def __init__(self, dirs): + self.searchpath = [dirs] + + +class MockChoiceLoader(object): + def __init__(self, loaders): + self.loaders = loaders + + +def MockTemplate(): + templ = MagicMock() + templ.render = MagicMock() + return templ + + +class TemplatingTests(unittest.TestCase): + def setUp(self): + path = os.path.dirname(__file__) + self.loader = FakeLoader() + self.context = FakeContextGenerator() + + self.addCleanup(patch.object(templating, 'apt_install').start().stop()) + self.addCleanup(patch.object(templating, 'log').start().stop()) + + templating.FileSystemLoader = MockFSLoader + templating.ChoiceLoader = MockChoiceLoader + templating.Environment = MagicMock + + self.renderer = templating.OSConfigRenderer(templates_dir=path, + openstack_release='folsom') + + @patch.object(templating, 'apt_install') + def test_initializing_a_render_ensures_jinja2_present(self, apt): + '''Creatinga new renderer object installs jinja2 if needed''' + # temp. undo the patching from setUp + templating.FileSystemLoader = None + templating.ChoiceLoader = None + templating.Environment = None + templating.OSConfigRenderer(templates_dir='/tmp', + openstack_release='foo') + templating.FileSystemLoader = MockFSLoader + templating.ChoiceLoader = MockChoiceLoader + templating.Environment = MagicMock + if six.PY2: + apt.assert_called_with('python-jinja2') + else: + apt.assert_called_with('python3-jinja2') + + def test_create_renderer_invalid_templates_dir(self): + '''Ensure OSConfigRenderer checks templates_dir''' + self.assertRaises(templating.OSConfigException, + templating.OSConfigRenderer, + templates_dir='/tmp/foooo0', + openstack_release='grizzly') + + def test_render_unregistered_config(self): + '''Ensure cannot render an unregistered config file''' + self.assertRaises(templating.OSConfigException, + self.renderer.render, + config_file='/tmp/foo') + + def test_write_unregistered_config(self): + '''Ensure cannot write an unregistered config file''' + self.assertRaises(templating.OSConfigException, + self.renderer.write, + config_file='/tmp/foo') + + def test_render_complete_context(self): + '''It renders a template when provided a complete context''' + self.loader.set('{{ foo }}') + self.context.set(interfaces=['fooservice'], context={'foo': 'bar'}) + self.renderer.register('/tmp/foo', [self.context]) + with patch.object(self.renderer, '_get_template') as _get_t: + fake_tmpl = MockTemplate() + _get_t.return_value = fake_tmpl + self.renderer.render('/tmp/foo') + fake_tmpl.render.assert_called_with(self.context()) + self.assertIn('fooservice', self.renderer.complete_contexts()) + + def test_render_incomplete_context_with_template(self): + '''It renders a template when provided an incomplete context''' + self.context.set(interfaces=['fooservice'], context={}) + self.renderer.register('/tmp/foo', [self.context]) + with patch.object(self.renderer, '_get_template') as _get_t: + fake_tmpl = MockTemplate() + _get_t.return_value = fake_tmpl + self.renderer.render('/tmp/foo') + fake_tmpl.render.assert_called_with({}) + self.assertNotIn('fooservice', self.renderer.complete_contexts()) + + def test_render_template_registered_but_not_found(self): + '''It loads a template by basename of config file first''' + path = os.path.dirname(__file__) + renderer = templating.OSConfigRenderer(templates_dir=path, + openstack_release='folsom') + e = TemplateNotFound('') + renderer._get_template = MagicMock() + renderer._get_template.side_effect = e + renderer.register('/etc/nova/nova.conf', contexts=[]) + self.assertRaises( + TemplateNotFound, renderer.render, '/etc/nova/nova.conf') + + def test_render_template_by_basename_first(self): + '''It loads a template by basename of config file first''' + path = os.path.dirname(__file__) + renderer = templating.OSConfigRenderer(templates_dir=path, + openstack_release='folsom') + renderer._get_template = MagicMock() + renderer.register('/etc/nova/nova.conf', contexts=[]) + renderer.render('/etc/nova/nova.conf') + self.assertEquals(1, len(renderer._get_template.call_args_list)) + self.assertEquals( + [call('nova.conf')], renderer._get_template.call_args_list) + + def test_render_template_by_munged_full_path_last(self): + '''It loads a template by full path of config file second''' + path = os.path.dirname(__file__) + renderer = templating.OSConfigRenderer(templates_dir=path, + openstack_release='folsom') + tmp = MagicMock() + tmp.render = MagicMock() + e = TemplateNotFound('') + renderer._get_template = MagicMock() + renderer._get_template.side_effect = [e, tmp] + renderer.register('/etc/nova/nova.conf', contexts=[]) + renderer.render('/etc/nova/nova.conf') + self.assertEquals(2, len(renderer._get_template.call_args_list)) + self.assertEquals( + [call('nova.conf'), call('etc_nova_nova.conf')], + renderer._get_template.call_args_list) + + def test_render_template_by_basename(self): + '''It renders template if it finds it by config file basename''' + + @patch(builtin_open) + @patch.object(templating, 'get_loader') + def test_write_out_config(self, loader, _open): + '''It writes a templated config when provided a complete context''' + self.context.set(interfaces=['fooservice'], context={'foo': 'bar'}) + self.renderer.register('/tmp/foo', [self.context]) + with patch.object(self.renderer, '_get_template') as _get_t: + fake_tmpl = MockTemplate() + _get_t.return_value = fake_tmpl + self.renderer.write('/tmp/foo') + _open.assert_called_with('/tmp/foo', 'wb') + + def test_write_all(self): + '''It writes out all configuration files at once''' + self.context.set(interfaces=['fooservice'], context={'foo': 'bar'}) + self.renderer.register('/tmp/foo', [self.context]) + self.renderer.register('/tmp/bar', [self.context]) + ex_calls = [ + call('/tmp/bar'), + call('/tmp/foo'), + ] + with patch.object(self.renderer, 'write') as _write: + self.renderer.write_all() + self.assertEquals(sorted(ex_calls), sorted(_write.call_args_list)) + pass + + @patch.object(templating, 'get_loader') + def test_reset_template_loader_for_new_os_release(self, loader): + self.loader.set('') + self.context.set(interfaces=['fooservice'], context={}) + loader.return_value = MockFSLoader('/tmp/foo') + self.renderer.register('/tmp/foo', [self.context]) + self.renderer.render('/tmp/foo') + loader.assert_called_with(os.path.dirname(__file__), 'folsom') + self.renderer.set_release(openstack_release='grizzly') + self.renderer.render('/tmp/foo') + loader.assert_called_with(os.path.dirname(__file__), 'grizzly') + + @patch.object(templating, 'get_loader') + def test_incomplete_context_not_reported_complete(self, loader): + '''It does not recognize an incomplete context as a complete context''' + self.context.set(interfaces=['fooservice'], context={}) + self.renderer.register('/tmp/foo', [self.context]) + self.assertNotIn('fooservice', self.renderer.complete_contexts()) + + @patch.object(templating, 'get_loader') + def test_complete_context_reported_complete(self, loader): + '''It recognizes a complete context as a complete context''' + self.context.set(interfaces=['fooservice'], context={'foo': 'bar'}) + self.renderer.register('/tmp/foo', [self.context]) + self.assertIn('fooservice', self.renderer.complete_contexts()) + + @patch('os.path.isdir') + def test_get_loader_no_templates_dir(self, isdir): + '''Ensure getting loader fails with no template dir''' + isdir.return_value = False + self.assertRaises(templating.OSConfigException, + templating.get_loader, + templates_dir='/tmp/foo', os_release='foo') + + @patch('os.path.isdir') + def test_get_loader_all_search_paths(self, isdir): + '''Ensure loader reverse searches of all release template dirs''' + isdir.return_value = True + choice_loader = templating.get_loader('/tmp/foo', + os_release='icehouse') + dirs = [l.searchpath for l in choice_loader.loaders] + + common_tmplts = os.path.join(os.path.dirname(templating.__file__), + 'templates') + expected = [['/tmp/foo/icehouse'], + ['/tmp/foo/havana'], + ['/tmp/foo/grizzly'], + ['/tmp/foo/folsom'], + ['/tmp/foo/essex'], + ['/tmp/foo/diablo'], + ['/tmp/foo'], + [common_tmplts]] + self.assertEquals(dirs, expected) + + @patch('os.path.isdir') + def test_get_loader_some_search_paths(self, isdir): + '''Ensure loader reverse searches of some release template dirs''' + isdir.return_value = True + choice_loader = templating.get_loader('/tmp/foo', os_release='grizzly') + dirs = [l.searchpath for l in choice_loader.loaders] + + common_tmplts = os.path.join(os.path.dirname(templating.__file__), + 'templates') + + expected = [['/tmp/foo/grizzly'], + ['/tmp/foo/folsom'], + ['/tmp/foo/essex'], + ['/tmp/foo/diablo'], + ['/tmp/foo'], + [common_tmplts]] + self.assertEquals(dirs, expected) + + def test_register_template_with_list_of_contexts(self): + '''Ensure registering a template with a list of context generators''' + def _c1(): + pass + + def _c2(): + pass + tmpl = templating.OSConfigTemplate(config_file='/tmp/foo', + contexts=[_c1, _c2]) + self.assertEquals(tmpl.contexts, [_c1, _c2]) + + def test_register_template_with_single_context(self): + '''Ensure registering a template with a single non-list context''' + def _c1(): + pass + tmpl = templating.OSConfigTemplate(config_file='/tmp/foo', + contexts=_c1) + self.assertEquals(tmpl.contexts, [_c1]) + + +class TemplatingStringTests(unittest.TestCase): + def setUp(self): + path = os.path.dirname(__file__) + self.loader = FakeLoader() + self.context = FakeContextGenerator() + + self.addCleanup(patch.object(templating, + 'apt_install').start().stop()) + self.addCleanup(patch.object(templating, 'log').start().stop()) + + templating.FileSystemLoader = MockFSLoader + templating.ChoiceLoader = MockChoiceLoader + + self.config_file = '/etc/confd/extensible.d/drop-in.conf' + self.config_template = 'use: {{ fake_key }}' + self.renderer = templating.OSConfigRenderer(templates_dir=path, + openstack_release='folsom') + + def test_render_template_from_string_full_context(self): + ''' + Test rendering a specified config file with a string template + and a context. + ''' + + context = {'fake_key': 'fake_val'} + self.context.set( + interfaces=['fooservice'], + context=context + ) + + expected_output = 'use: {}'.format(context['fake_key']) + + self.renderer.register( + config_file=self.config_file, + contexts=[self.context], + config_template=self.config_template + ) + + # should return a string given we render from an in-memory + # template source + output = self.renderer.render(self.config_file) + + self.assertEquals(output, expected_output) + + def test_render_template_from_string_incomplete_context(self): + ''' + Test rendering a specified config file with a string template + and a context. + ''' + + self.context.set( + interfaces=['fooservice'], + context={} + ) + + expected_output = 'use: ' + + self.renderer.register( + config_file=self.config_file, + contexts=[self.context], + config_template=self.config_template + ) + + # should return a string given we render from an in-memory + # template source + output = self.renderer.render(self.config_file) + + self.assertEquals(output, expected_output) + + def test_register_string_template_with_single_context(self): + '''Template rendering from a provided string with a context''' + def _c1(): + pass + + config_file = '/etc/confdir/custom-drop-in.conf' + config_template = 'use: {{ key_available_in_c1 }}' + tmpl = templating.OSConfigTemplate( + config_file=config_file, + contexts=_c1, + config_template=config_template + ) + + self.assertEquals(tmpl.contexts, [_c1]) + self.assertEquals(tmpl.config_file, config_file) + self.assertEquals(tmpl.config_template, config_template) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..99ed17d9f91d8123740b5ca7f2efd729901deb82 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_os_utils.py @@ -0,0 +1,277 @@ +import collections +import json +import mock +import six +import unittest + +from charmhelpers.contrib.openstack import utils + +if not six.PY3: + builtin_open = '__builtin__.open' +else: + builtin_open = 'builtins.open' + + +class UtilsTests(unittest.TestCase): + def setUp(self): + super(UtilsTests, self).setUp() + + def test_compare_openstack_comparator(self): + self.assertTrue(utils.CompareOpenStackReleases('mitaka') < 'newton') + self.assertTrue(utils.CompareOpenStackReleases('pike') > 'essex') + + @mock.patch.object(utils, 'config') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_set') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_ids') + @mock.patch('charmhelpers.contrib.openstack.utils.get_ipv6_addr') + def test_sync_db_with_multi_ipv6_addresses(self, mock_get_ipv6_addr, + mock_relation_ids, + mock_relation_set, + mock_config): + mock_config.return_value = None + addr1 = '2001:db8:1:0:f816:3eff:fe45:7c/64' + addr2 = '2001:db8:1:0:d0cf:528c:23eb:5000/64' + mock_get_ipv6_addr.return_value = [addr1, addr2] + mock_relation_ids.return_value = ['shared-db'] + + utils.sync_db_with_multi_ipv6_addresses('testdb', 'testdbuser') + hosts = json.dumps([addr1, addr2]) + mock_relation_set.assert_called_with(relation_id='shared-db', + database='testdb', + username='testdbuser', + hostname=hosts) + + @mock.patch.object(utils, 'config') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_set') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_ids') + @mock.patch('charmhelpers.contrib.openstack.utils.get_ipv6_addr') + def test_sync_db_with_multi_ipv6_addresses_single(self, mock_get_ipv6_addr, + mock_relation_ids, + mock_relation_set, + mock_config): + mock_config.return_value = None + addr1 = '2001:db8:1:0:f816:3eff:fe45:7c/64' + mock_get_ipv6_addr.return_value = [addr1] + mock_relation_ids.return_value = ['shared-db'] + + utils.sync_db_with_multi_ipv6_addresses('testdb', 'testdbuser') + hosts = json.dumps([addr1]) + mock_relation_set.assert_called_with(relation_id='shared-db', + database='testdb', + username='testdbuser', + hostname=hosts) + + @mock.patch.object(utils, 'config') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_set') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_ids') + @mock.patch('charmhelpers.contrib.openstack.utils.get_ipv6_addr') + def test_sync_db_with_multi_ipv6_addresses_w_prefix(self, + mock_get_ipv6_addr, + mock_relation_ids, + mock_relation_set, + mock_config): + mock_config.return_value = None + addr1 = '2001:db8:1:0:f816:3eff:fe45:7c/64' + mock_get_ipv6_addr.return_value = [addr1] + mock_relation_ids.return_value = ['shared-db'] + + utils.sync_db_with_multi_ipv6_addresses('testdb', 'testdbuser', + relation_prefix='bungabunga') + hosts = json.dumps([addr1]) + mock_relation_set.assert_called_with(relation_id='shared-db', + bungabunga_database='testdb', + bungabunga_username='testdbuser', + bungabunga_hostname=hosts) + + @mock.patch.object(utils, 'config') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_set') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_ids') + @mock.patch('charmhelpers.contrib.openstack.utils.get_ipv6_addr') + def test_sync_db_with_multi_ipv6_addresses_vips(self, mock_get_ipv6_addr, + mock_relation_ids, + mock_relation_set, + mock_config): + addr1 = '2001:db8:1:0:f816:3eff:fe45:7c/64' + addr2 = '2001:db8:1:0:d0cf:528c:23eb:5000/64' + vip1 = '2001:db8:1:0:f816:3eff:32b3:7c' + vip2 = '2001:db8:1:0:f816:3eff:32b3:7d' + mock_config.return_value = '%s 10.0.0.1 %s' % (vip1, vip2) + + mock_get_ipv6_addr.return_value = [addr1, addr2] + mock_relation_ids.return_value = ['shared-db'] + + utils.sync_db_with_multi_ipv6_addresses('testdb', 'testdbuser') + hosts = json.dumps([addr1, addr2, vip1, vip2]) + mock_relation_set.assert_called_with(relation_id='shared-db', + database='testdb', + username='testdbuser', + hostname=hosts) + + @mock.patch('uuid.uuid4') + @mock.patch('charmhelpers.contrib.openstack.utils.related_units') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_set') + @mock.patch('charmhelpers.contrib.openstack.utils.relation_ids') + def test_remote_restart(self, mock_relation_ids, mock_relation_set, + mock_related_units, mock_uuid4): + mock_relation_ids.return_value = ['neutron-plugin-api-subordinate:8'] + mock_related_units.return_value = ['neutron-api/0'] + mock_uuid4.return_value = 'uuid4' + utils.remote_restart('neutron-plugin-api-subordinate') + mock_relation_set.assert_called_with( + relation_id='neutron-plugin-api-subordinate:8', + relation_settings={'restart-trigger': 'uuid4'} + ) + + @mock.patch.object(utils, 'lsb_release') + @mock.patch.object(utils, 'config') + @mock.patch('charmhelpers.contrib.openstack.utils.get_os_codename_package') + @mock.patch('charmhelpers.contrib.openstack.utils.' + 'get_os_codename_install_source') + def test_os_release(self, mock_get_os_codename_install_source, + mock_get_os_codename_package, + mock_config, mock_lsb_release): + # Wipe the modules cached os_rel + utils._os_rel = None + mock_lsb_release.return_value = {"DISTRIB_CODENAME": "trusty"} + mock_get_os_codename_install_source.return_value = None + mock_get_os_codename_package.return_value = None + mock_config.return_value = 'cloud-pocket' + self.assertEqual(utils.os_release('my-pkg'), 'icehouse') + mock_get_os_codename_install_source.assert_called_once_with( + 'cloud-pocket') + mock_get_os_codename_package.assert_called_once_with( + 'my-pkg', fatal=False) + mock_config.assert_called_once_with('openstack-origin') + # Next call to os_release should pickup cached version + mock_get_os_codename_install_source.reset_mock() + mock_get_os_codename_package.reset_mock() + self.assertEqual(utils.os_release('my-pkg'), 'icehouse') + self.assertFalse(mock_get_os_codename_install_source.called) + self.assertFalse(mock_get_os_codename_package.called) + # Call os_release and bypass cache + mock_lsb_release.return_value = {"DISTRIB_CODENAME": "xenial"} + mock_get_os_codename_install_source.reset_mock() + mock_get_os_codename_package.reset_mock() + self.assertEqual(utils.os_release('my-pkg', reset_cache=True), + 'mitaka') + mock_get_os_codename_install_source.assert_called_once_with( + 'cloud-pocket') + mock_get_os_codename_package.assert_called_once_with( + 'my-pkg', fatal=False) + # Override base + mock_lsb_release.return_value = {"DISTRIB_CODENAME": "xenial"} + mock_get_os_codename_install_source.reset_mock() + mock_get_os_codename_package.reset_mock() + self.assertEqual(utils.os_release('my-pkg', reset_cache=True, base="ocata"), + 'ocata') + mock_get_os_codename_install_source.assert_called_once_with( + 'cloud-pocket') + mock_get_os_codename_package.assert_called_once_with( + 'my-pkg', fatal=False) + # Override source key + mock_config.reset_mock() + mock_get_os_codename_install_source.reset_mock() + mock_get_os_codename_package.reset_mock() + mock_get_os_codename_package.return_value = None + utils.os_release('my-pkg', reset_cache=True, source_key='source') + mock_config.assert_called_once_with('source') + mock_get_os_codename_install_source.assert_called_once_with( + 'cloud-pocket') + + @mock.patch.object(utils, 'os_release') + @mock.patch.object(utils, 'get_os_codename_install_source') + def test_enable_memcache(self, _get_os_codename_install_source, + _os_release): + # Check call with 'release' + self.assertFalse(utils.enable_memcache(release='icehouse')) + self.assertTrue(utils.enable_memcache(release='ocata')) + # Check call with 'source' + _os_release.return_value = None + _get_os_codename_install_source.return_value = 'icehouse' + self.assertFalse(utils.enable_memcache(source='distro')) + _os_release.return_value = None + _get_os_codename_install_source.return_value = 'ocata' + self.assertTrue(utils.enable_memcache(source='distro')) + # Check call with 'package' + _os_release.return_value = 'icehouse' + _get_os_codename_install_source.return_value = None + self.assertFalse(utils.enable_memcache(package='pkg1')) + _os_release.return_value = 'ocata' + _get_os_codename_install_source.return_value = None + self.assertTrue(utils.enable_memcache(package='pkg1')) + + @mock.patch.object(utils, 'enable_memcache') + def test_enable_token_cache_pkgs(self, _enable_memcache): + _enable_memcache.return_value = False + self.assertEqual(utils.token_cache_pkgs(source='distro'), []) + _enable_memcache.return_value = True + self.assertEqual(utils.token_cache_pkgs(source='distro'), + ['memcached', 'python-memcache']) + + def test_update_json_file(self): + TEST_POLICY = """{ + "delete_image_location": "", + "get_image_location": "", + "set_image_location": "", + "extra_property": "False" + }""" + + TEST_POLICY_FILE = "/etc/glance/policy.json" + + items_to_update = { + "get_image_location": "role:admin", + "extra_policy": "extra", + } + + mock_open = mock.mock_open(read_data=TEST_POLICY) + with mock.patch(builtin_open, mock_open) as mock_file: + utils.update_json_file(TEST_POLICY_FILE, {}) + self.assertFalse(mock_file.called) + + utils.update_json_file(TEST_POLICY_FILE, items_to_update) + mock_file.assert_has_calls([ + mock.call(TEST_POLICY_FILE), + mock.call(TEST_POLICY_FILE, 'w'), + ], any_order=True) + + modified_policy = json.loads(TEST_POLICY) + modified_policy.update(items_to_update) + mock_open().write.assert_called_with( + json.dumps(modified_policy, indent=4, sort_keys=True)) + + tmp = json.loads(TEST_POLICY) + tmp.update(items_to_update) + TEST_POLICY = json.dumps(tmp) + mock_open = mock.mock_open(read_data=TEST_POLICY) + with mock.patch(builtin_open, mock_open) as mock_file: + utils.update_json_file(TEST_POLICY_FILE, items_to_update) + mock_file.assert_has_calls([ + mock.call(TEST_POLICY_FILE), + ], any_order=True) + + def test_ordered(self): + data = {'one': 1, 'two': 2, 'three': 3} + expected = [('one', 1), ('three', 3), ('two', 2)] + self.assertSequenceEqual(expected, + [x for x in utils.ordered(data).items()]) + + data = { + 'one': 1, + 'two': 2, + 'three': { + 'uno': 1, + 'dos': 2, + 'tres': 3 + } + } + expected = collections.OrderedDict() + expected['one'] = 1 + nested = collections.OrderedDict() + nested['dos'] = 2 + nested['tres'] = 3 + nested['uno'] = 1 + expected['three'] = nested + expected['two'] = 2 + self.assertEqual(expected, utils.ordered(data)) + + self.assertRaises(ValueError, utils.ordered, "foo") diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_policyd.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_policyd.py new file mode 100644 index 0000000000000000000000000000000000000000..378e58d4ab7ee037efbd9e12e94dcab8b9dbd4d8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_policyd.py @@ -0,0 +1,467 @@ +import contextlib +import copy +import io +import os +import mock +import six +import unittest + +from charmhelpers.contrib.openstack import policyd + + +if not six.PY3: + builtin_open = '__builtin__.open' +else: + builtin_open = 'builtins.open' + + +class PolicydTests(unittest.TestCase): + def setUp(self): + super(PolicydTests, self).setUp() + + def test_is_policyd_override_valid_on_this_release(self): + self.assertTrue( + policyd.is_policyd_override_valid_on_this_release("queens")) + self.assertTrue( + policyd.is_policyd_override_valid_on_this_release("rocky")) + self.assertFalse( + policyd.is_policyd_override_valid_on_this_release("pike")) + + @mock.patch.object(policyd, "clean_policyd_dir_for") + @mock.patch.object(policyd, "remove_policy_success_file") + @mock.patch.object(policyd, "process_policy_resource_file") + @mock.patch.object(policyd, "get_policy_resource_filename") + @mock.patch.object(policyd, "is_policyd_override_valid_on_this_release") + @mock.patch.object(policyd, "_policy_success_file") + @mock.patch("os.path.isfile") + @mock.patch.object(policyd.hookenv, "config") + @mock.patch("charmhelpers.core.hookenv.log") + def test_maybe_do_policyd_overrides( + self, + mock_log, + mock_config, + mock_isfile, + mock__policy_success_file, + mock_is_policyd_override_valid_on_this_release, + mock_get_policy_resource_filename, + mock_process_policy_resource_file, + mock_remove_policy_success_file, + mock_clean_policyd_dir_for, + ): + mock_isfile.return_value = False + mock__policy_success_file.return_value = "s-return" + # test success condition + mock_config.return_value = {policyd.POLICYD_CONFIG_NAME: True} + mock_is_policyd_override_valid_on_this_release.return_value = True + mock_get_policy_resource_filename.return_value = "resource.zip" + mock_process_policy_resource_file.return_value = True + mod_fn = mock.Mock() + restart_handler = mock.Mock() + policyd.maybe_do_policyd_overrides( + "arelease", "aservice", ["a"], ["b"], mod_fn, restart_handler) + mock_is_policyd_override_valid_on_this_release.assert_called_once_with( + "arelease") + mock_get_policy_resource_filename.assert_called_once_with() + mock_process_policy_resource_file.assert_called_once_with( + "resource.zip", "aservice", ["a"], ["b"], mod_fn) + restart_handler.assert_called_once_with() + # test process_policy_resource_file is not called if not valid on the + # release. + mock_process_policy_resource_file.reset_mock() + restart_handler.reset_mock() + mock_is_policyd_override_valid_on_this_release.return_value = False + policyd.maybe_do_policyd_overrides( + "arelease", "aservice", ["a"], ["b"], mod_fn, restart_handler) + mock_process_policy_resource_file.assert_not_called() + restart_handler.assert_not_called() + # test restart_handler is not called if not needed. + mock_is_policyd_override_valid_on_this_release.return_value = True + mock_process_policy_resource_file.return_value = False + policyd.maybe_do_policyd_overrides( + "arelease", "aservice", ["a"], ["b"], mod_fn, restart_handler) + mock_process_policy_resource_file.assert_called_once_with( + "resource.zip", "aservice", ["a"], ["b"], mod_fn) + restart_handler.assert_not_called() + # test that directory gets cleaned if the config is not set + mock_config.return_value = {policyd.POLICYD_CONFIG_NAME: False} + mock_process_policy_resource_file.reset_mock() + policyd.maybe_do_policyd_overrides( + "arelease", "aservice", ["a"], ["b"], mod_fn, restart_handler) + mock_process_policy_resource_file.assert_not_called() + mock_remove_policy_success_file.assert_called_once_with() + mock_clean_policyd_dir_for.assert_called_once_with( + "aservice", ["a"], user='aservice', group='aservice') + + @mock.patch.object(policyd, "maybe_do_policyd_overrides") + def test_maybe_do_policyd_overrides_with_config_changed( + self, + mock_maybe_do_policyd_overrides, + ): + mod_fn = mock.Mock() + restart_handler = mock.Mock() + policyd.maybe_do_policyd_overrides_on_config_changed( + "arelease", "aservice", ["a"], ["b"], mod_fn, restart_handler) + mock_maybe_do_policyd_overrides.assert_called_once_with( + "arelease", "aservice", ["a"], ["b"], mod_fn, restart_handler, + config_changed=True) + + @mock.patch("charmhelpers.core.hookenv.resource_get") + def test_get_policy_resource_filename(self, mock_resource_get): + mock_resource_get.return_value = "test-file" + self.assertEqual(policyd.get_policy_resource_filename(), + "test-file") + mock_resource_get.assert_called_once_with( + policyd.POLICYD_RESOURCE_NAME) + + # check that if an error is raised, that None is returned. + def go_bang(*args): + raise Exception("bang") + + mock_resource_get.side_effect = go_bang + self.assertEqual(policyd.get_policy_resource_filename(), None) + + @mock.patch.object(policyd, "_yamlfiles") + @mock.patch.object(policyd.zipfile, "ZipFile") + def test_open_and_filter_yaml_files(self, mock_ZipFile, mock__yamlfiles): + mock__yamlfiles.return_value = [ + ("file1", ".yaml", "file1.yaml", None), + ("file2", ".yml", "file2.YML", None)] + mock_ZipFile.return_value.__enter__.return_value = "zfp" + # test a valid zip file + with policyd.open_and_filter_yaml_files("some-file") as (zfp, files): + self.assertEqual(zfp, "zfp") + mock_ZipFile.assert_called_once_with("some-file", "r") + self.assertEqual(files, [ + ("file1", ".yaml", "file1.yaml", None), + ("file2", ".yml", "file2.YML", None)]) + # ensure that there must be at least one file. + mock__yamlfiles.return_value = [] + with self.assertRaises(policyd.BadPolicyZipFile): + with policyd.open_and_filter_yaml_files("some-file"): + pass + # ensure that it picks up duplicates + mock__yamlfiles.return_value = [ + ("file1", ".yaml", "file1.yaml", None), + ("file2", ".yml", "file2.yml", None), + ("file1", ".yml", "file1.yml", None)] + with self.assertRaises(policyd.BadPolicyZipFile): + with policyd.open_and_filter_yaml_files("some-file"): + pass + + def test__yamlfiles(self): + class MockZipFile(object): + def __init__(self, infolist): + self._infolist = infolist + + def infolist(self): + return self._infolist + + class MockInfoListItem(object): + def __init__(self, is_dir, filename): + self.filename = filename + self._is_dir = is_dir + + def is_dir(self): + return self._is_dir + + def __repr__(self): + return "MockInfoListItem({}, {})".format(self._is_dir, + self.filename) + + zipfile = MockZipFile([ + MockInfoListItem(False, "file1.yaml"), + MockInfoListItem(False, "file2.md"), + MockInfoListItem(False, "file3.YML"), + MockInfoListItem(False, "file4.Yaml"), + MockInfoListItem(True, "file5"), + MockInfoListItem(True, "file6.yaml"), + MockInfoListItem(False, "file7"), + MockInfoListItem(False, "file8.j2")]) + + self.assertEqual(list(policyd._yamlfiles(zipfile)), + [("file1", ".yaml", "file1.yaml", mock.ANY), + ("file3", ".yml", "file3.YML", mock.ANY), + ("file4", ".yaml", "file4.Yaml", mock.ANY), + ("file8", ".j2", "file8.j2", mock.ANY)]) + + @mock.patch.object(policyd.yaml, "safe_load") + def test_read_and_validate_yaml(self, mock_safe_load): + # test a valid document + good_doc = { + "key1": "rule1", + "key2": "rule2", + } + mock_safe_load.return_value = copy.deepcopy(good_doc) + doc = policyd.read_and_validate_yaml("test-stream") + self.assertEqual(doc, good_doc) + mock_safe_load.assert_called_once_with("test-stream") + # test an invalid document - return a string + mock_safe_load.return_value = "wrong" + with self.assertRaises(policyd.BadPolicyYamlFile): + policyd.read_and_validate_yaml("test-stream") + # test for black-listed keys + with self.assertRaises(policyd.BadPolicyYamlFile): + mock_safe_load.return_value = copy.deepcopy(good_doc) + policyd.read_and_validate_yaml("test-stream", ["key1"]) + # test for non string keys + bad_key_doc = { + (1,): "rule1", + "key2": "rule2", + } + with self.assertRaises(policyd.BadPolicyYamlFile): + mock_safe_load.return_value = copy.deepcopy(bad_key_doc) + policyd.read_and_validate_yaml("test-stream", ["key1"]) + # test for non string values (i.e. no nested keys) + bad_key_doc2 = { + "key1": "rule1", + "key2": {"sub_key": "rule2"}, + } + with self.assertRaises(policyd.BadPolicyYamlFile): + mock_safe_load.return_value = copy.deepcopy(bad_key_doc2) + policyd.read_and_validate_yaml("test-stream", ["key1"]) + + def test_policyd_dir_for(self): + self.assertEqual(policyd.policyd_dir_for('thing'), + "/etc/thing/policy.d") + + @mock.patch.object(policyd.hookenv, 'log') + @mock.patch("os.remove") + @mock.patch("shutil.rmtree") + @mock.patch("charmhelpers.core.host.mkdir") + @mock.patch("os.path.exists") + @mock.patch.object(policyd, "policyd_dir_for") + def test_clean_policyd_dir_for(self, + mock_policyd_dir_for, + mock_os_path_exists, + mock_mkdir, + mock_shutil_rmtree, + mock_os_remove, + mock_log): + if hasattr(os, 'scandir'): + mock_scan_dir_parts = (mock.patch, ["os.scandir"]) + else: + mock_scan_dir_parts = (mock.patch.object, + [policyd, "_fallback_scandir"]) + + class MockDirEntry(object): + def __init__(self, path, is_dir): + self.path = path + self._is_dir = is_dir + + def is_dir(self): + return self._is_dir + + # list of scanned objects + directory_contents = [ + MockDirEntry("one", False), + MockDirEntry("two", False), + MockDirEntry("three", True), + MockDirEntry("four", False)] + + mock_policyd_dir_for.return_value = "the-path" + + # Initial conditions + mock_os_path_exists.return_value = False + + # call the function + with mock_scan_dir_parts[0](*mock_scan_dir_parts[1]) as \ + mock_os_scandir: + mock_os_scandir.return_value = directory_contents + policyd.clean_policyd_dir_for("aservice") + + # check it did the right thing + mock_policyd_dir_for.assert_called_once_with("aservice") + mock_os_path_exists.assert_called_once_with("the-path") + mock_mkdir.assert_called_once_with("the-path", + owner="aservice", + group="aservice", + perms=0o775) + mock_shutil_rmtree.assert_called_once_with("three") + mock_os_remove.assert_has_calls([ + mock.call("one"), mock.call("two"), mock.call("four")]) + + # check also that we can omit paths ... reset everything + mock_os_remove.reset_mock() + mock_shutil_rmtree.reset_mock() + mock_os_path_exists.reset_mock() + mock_os_path_exists.return_value = True + mock_mkdir.reset_mock() + + with mock_scan_dir_parts[0](*mock_scan_dir_parts[1]) as \ + mock_os_scandir: + mock_os_scandir.return_value = directory_contents + policyd.clean_policyd_dir_for("aservice", + keep_paths=["one", "three"]) + + # verify all worked as we expected + mock_mkdir.assert_not_called() + mock_shutil_rmtree.assert_not_called() + mock_os_remove.assert_has_calls([mock.call("two"), mock.call("four")]) + + def test_path_for_policy_file(self): + self.assertEqual(policyd.path_for_policy_file('this', 'that'), + "/etc/this/policy.d/that.yaml") + + @mock.patch("charmhelpers.core.hookenv.charm_dir") + def test__policy_success_file(self, mock_charm_dir): + mock_charm_dir.return_value = "/this" + self.assertEqual(policyd._policy_success_file(), + "/this/{}".format(policyd.POLICYD_SUCCESS_FILENAME)) + + @mock.patch("os.remove") + @mock.patch.object(policyd, "_policy_success_file") + def test_remove_policy_success_file(self, mock_file, mock_os_remove): + mock_file.return_value = "the-path" + policyd.remove_policy_success_file() + mock_os_remove.assert_called_once_with("the-path") + + # now test that failure doesn't fail the function + def go_bang(*args): + raise Exception("bang") + + mock_os_remove.side_effect = go_bang + policyd.remove_policy_success_file() + + @mock.patch("os.path.isfile") + @mock.patch.object(policyd, "_policy_success_file") + def test_policyd_status_message_prefix(self, mock_file, mock_is_file): + mock_file.return_value = "the-path" + mock_is_file.return_value = True + self.assertEqual(policyd.policyd_status_message_prefix(), "PO:") + mock_is_file.return_value = False + self.assertEqual( + policyd.policyd_status_message_prefix(), "PO (broken):") + + @mock.patch("yaml.dump") + @mock.patch.object(policyd, "_policy_success_file") + @mock.patch.object(policyd.hookenv, "log") + @mock.patch.object(policyd, "read_and_validate_yaml") + @mock.patch.object(policyd, "path_for_policy_file") + @mock.patch.object(policyd, "clean_policyd_dir_for") + @mock.patch.object(policyd, "remove_policy_success_file") + @mock.patch.object(policyd, "open_and_filter_yaml_files") + @mock.patch.object(policyd.ch_host, 'write_file') + @mock.patch.object(policyd, "maybe_create_directory_for") + def test_process_policy_resource_file( + self, + mock_maybe_create_directory_for, + mock_write_file, + mock_open_and_filter_yaml_files, + mock_remove_policy_success_file, + mock_clean_policyd_dir_for, + mock_path_for_policy_file, + mock_read_and_validate_yaml, + mock_log, + mock__policy_success_file, + mock_yaml_dump, + ): + mock_zfp = mock.MagicMock() + mod_fn = mock.Mock() + mock_path_for_policy_file.side_effect = lambda s, n: s + "/" + n + gen = [ + ("file1", ".yaml", "file1.yaml", "file1-zipinfo"), + ("file2", ".yml", "file2.yml", "file2-zipinfo")] + mock_open_and_filter_yaml_files.return_value.__enter__.return_value = \ + (mock_zfp, gen) + # first verify that we can blacklist a file + res = policyd.process_policy_resource_file( + "resource.zip", "aservice", ["aservice/file1"], [], mod_fn) + self.assertFalse(res) + mock_remove_policy_success_file.assert_called_once_with() + mock_clean_policyd_dir_for.assert_has_calls([ + mock.call("aservice", + ["aservice/file1"], + user='aservice', + group='aservice'), + mock.call("aservice", + ["aservice/file1"], + user='aservice', + group='aservice')]) + mock_zfp.open.assert_not_called() + mod_fn.assert_not_called() + mock_log.assert_any_call("Processing resource.zip failed: policy.d" + " name aservice/file1 is blacklisted", + level=policyd.POLICYD_LOG_LEVEL_DEFAULT) + + # now test for success + @contextlib.contextmanager + def _patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = mock.MagicMock(spec=open) + mock_file = mock.MagicMock(spec=io.FileIO) + + with mock.patch(builtin_open, mock_open): + yield mock_open, mock_file + + mock_clean_policyd_dir_for.reset_mock() + mock_zfp.reset_mock() + mock_fp = mock.MagicMock() + mock_fp.read.return_value = '{"rule1": "value1"}' + mock_zfp.open.return_value.__enter__.return_value = mock_fp + gen = [("file1", ".j2", "file1.j2", "file1-zipinfo")] + mock_open_and_filter_yaml_files.return_value.__enter__.return_value = \ + (mock_zfp, gen) + mock_read_and_validate_yaml.return_value = {"rule1": "modded_value1"} + mod_fn.return_value = '{"rule1": "modded_value1"}' + mock__policy_success_file.return_value = "policy-success-file" + mock_yaml_dump.return_value = "dumped-file" + with _patch_open() as (mock_open, mock_file): + res = policyd.process_policy_resource_file( + "resource.zip", "aservice", [], ["key"], mod_fn) + self.assertTrue(res) + # mock_open.assert_any_call("aservice/file1", "wt") + mock_write_file.assert_called_once_with( + "aservice/file1", + b'dumped-file', + "aservice", + "aservice") + mock_open.assert_any_call("policy-success-file", "w") + mock_yaml_dump.assert_called_once_with({"rule1": "modded_value1"}) + mock_zfp.open.assert_called_once_with("file1-zipinfo") + mock_read_and_validate_yaml.assert_called_once_with( + '{"rule1": "modded_value1"}', ["key"]) + mod_fn.assert_called_once_with('{"rule1": "value1"}') + + # raise a BadPolicyZipFile if we have a template, but there is no + # template function + mock_log.reset_mock() + with _patch_open() as (mock_open, mock_file): + res = policyd.process_policy_resource_file( + "resource.zip", "aservice", [], ["key"], + template_function=None) + self.assertFalse(res) + mock_log.assert_any_call( + "Processing resource.zip failed: Template file1.j2 " + "but no template_function is available", + level=policyd.POLICYD_LOG_LEVEL_DEFAULT) + + # raise the IOError to validate that code path + def raise_ioerror(*args): + raise IOError("bang") + + mock_open_and_filter_yaml_files.side_effect = raise_ioerror + mock_log.reset_mock() + res = policyd.process_policy_resource_file( + "resource.zip", "aservice", [], ["key"], mod_fn) + self.assertFalse(res, False) + mock_log.assert_any_call( + "File resource.zip failed with IOError. " + "This really shouldn't happen -- error: bang", + level=policyd.POLICYD_LOG_LEVEL_DEFAULT) + # raise a general exception, so that is caught and logged too. + + def raise_exception(*args): + raise Exception("bang2") + + mock_open_and_filter_yaml_files.reset_mock() + mock_open_and_filter_yaml_files.side_effect = raise_exception + mock_log.reset_mock() + res = policyd.process_policy_resource_file( + "resource.zip", "aservice", [], ["key"], mod_fn) + self.assertFalse(res, False) + mock_log.assert_any_call( + "General Exception(bang2) during policyd processing", + level=policyd.POLICYD_LOG_LEVEL_DEFAULT) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_ssh_migrations.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_ssh_migrations.py new file mode 100644 index 0000000000000000000000000000000000000000..5a3244228d96199f4d3661da18bec83777a32b52 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_ssh_migrations.py @@ -0,0 +1,582 @@ +import mock +import six +import subprocess +import unittest + +from tests.helpers import patch_open, mock_open + +import charmhelpers.contrib.openstack.ssh_migrations as ssh_migrations + +if not six.PY3: + builtin_open = '__builtin__.open' + builtin_import = '__builtin__.__import__' +else: + builtin_open = 'builtins.open' + builtin_import = 'builtins.__import__' + + +UNIT1_HOST_KEY_1 = """|1|EaIiWNsBsaSke5T5bdDlaV5xKPU=|WKMu3Va+oNwRjXmPGOZ+mrpWbM8= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZdZdR7I35ymFdspruN1CIez/0m62sJeld2nLuOGaNbdl/rk5bGrWUAZh6c9p9H53FAqGAXBD/1C8dZ5dgIAGdTs7PAZq7owXCpgUPQcGOYVAtBwv8qfnWyI1W+Vpi6vnb2sgYr6XGbB9b84i4vrd98IIpXIleC9qd0VUTSYgd7+NPaFNoK0HZmqcNEf5leaa8sgSf4t5F+BTWEXzU3ql/3isFT8lEpJ9N8wOvNzAoFEQcxqauvOJn72QQ6kUrQT3NdQFUMHquS/s+nBrQNPbUmzqrvSOed75Qk8359zqU1Rce7U39cqc0scYi1ak3oJdojwfLFKJw4TMPn/Pq7JnT""" +UNIT1_HOST_KEY_2 = """|1|mCyYWqJl8loqV6LCY84lu2rpqLA=|51m+M+0ES3jYVzr3Kco3CDg8hEY= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZdZdR7I35ymFdspruN1CIez/0m62sJeld2nLuOGaNbdl/rk5bGrWUAZh6c9p9H53FAqGAXBD/1C8dZ5dgIAGdTs7PAZq7owXCpgUPQcGOYVAtBwv8qfnWyI1W+Vpi6vnb2sgYr6XGbB9b84i4vrd98IIpXIleC9qd0VUTSYgd7+NPaFNoK0HZmqcNEf5leaa8sgSf4t5F+BTWEXzU3ql/3isFT8lEpJ9N8wOvNzAoFEQcxqauvOJn72QQ6kUrQT3NdQFUMHquS/s+nBrQNPbUmzqrvSOed75Qk8359zqU1Rce7U39cqc0scYi1ak3oJdojwfLFKJw4TMPn/Pq7JnT""" +UNIT2_HOST_KEY_1 = """|1|eWagMqrN7XmX7NdVpZbqMZ2cb4Q=|3jgGiFEU9SMhXwdX0w0kkG54CZc= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC27Lv4wtAiPIOTsrUCFOU4qaNsov+LZSVHtxlu0aERuD+oU3ZILXOITXJlDohweXN6YuP4hFg49FF119gmMag+hDiA8/BztQmsplkwHrWEPuWKpReLRNLBU+Nt78jrJkjTK8Egwxbaxu8fAPZkCgGyeLkIH4ghrdWlOaWYXzwuxXkYWSpQOgF6E/T+19JKVKNpt2i6w7q9vVwZEjwVr30ubs1bNdPzE9ylNLQRrGa7c38SKsEos5RtZJjEuZGTC9KI0QdEgwnxxNMlT/CIgwWA1V38vLsosF2pHKxbmtCNvBuPNtrBDgXhukVqyEh825RhTAyQYGshMCbAbxl/M9c3""" +UNIT2_HOST_KEY_2 = """|1|zRH8troNwhVzrkMx86E5Ibevw5s=|gESlgkwUumP8q0A6l+CoRlFRpTw= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC27Lv4wtAiPIOTsrUCFOU4qaNsov+LZSVHtxlu0aERuD+oU3ZILXOITXJlDohweXN6YuP4hFg49FF119gmMag+hDiA8/BztQmsplkwHrWEPuWKpReLRNLBU+Nt78jrJkjTK8Egwxbaxu8fAPZkCgGyeLkIH4ghrdWlOaWYXzwuxXkYWSpQOgF6E/T+19JKVKNpt2i6w7q9vVwZEjwVr30ubs1bNdPzE9ylNLQRrGa7c38SKsEos5RtZJjEuZGTC9KI0QdEgwnxxNMlT/CIgwWA1V38vLsosF2pHKxbmtCNvBuPNtrBDgXhukVqyEh825RhTAyQYGshMCbAbxl/M9c3""" +HOST_KEYS = [UNIT1_HOST_KEY_1, UNIT1_HOST_KEY_2, + UNIT2_HOST_KEY_1, UNIT2_HOST_KEY_2] +UNIT1_PUBKEY_1 = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDqtnB3zh3sMufZd1khi44su0hTg/LqLb3ma2iyueTULZikDYa65UidVxzsa6r0Y9jkHwknGlh7fNnGdmc3S8EE+rVUNF4r3JF2Zd/pdfCBia/BmKJcO7+NyRWc8ihlrA3xYUSm+Yg8ZIpqoSb1LKjgAdYISh9HQQaXut2sXtHESdpilNpDf42AZfuQM+B0op0v7bq86ZXOM1rvdJriI6BduHaAOux+d9HDNvV5AxYTICrUkXqIvdHnoRyOFfhTcKun0EtuUxpDiAi0im9C+i+MPwMvA6AmRbot6Tqt2xZPRBYY8+WF7I5cBoovES/dWKP5TZwaGBr+WNv+z2JJhvlN root@juju-4665be-20180716142533-8""" +UNIT2_PUBKEY_1 = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkWfkVrG7wTnfifvL0GkmDj6L33PKrWjzN2hOwZb9EoxwNzFGTMTBIpepTAnO6hdFBwtus1Ej/L12K6L/0YRDZAKjE7yTWOsh1kUxPZ1INRCqLILiefE5A/LPNx8NDb+d/2ryc5QmOQXUALs6mC5VDNchImUp9L7l0RIzPOgPXZCqMC1nZLqqX+eI9EUaf29/+NztYw59rFAa3hWNe8RJCSFeU+iWirWP8rfX9jsLzD9hO3nuZjP23M6tv1jX9LQD+8qkx0WSMa2WrIjkMiclP6tkyCJOZogyoPzZm/+dUhLeY9bIizbZCQKH/b4gOl5m/PkWoqEFshfqGzUIPkAJp root@juju-4665be-20180716142533-9""" +PUB_KEYS = [UNIT1_PUBKEY_1, UNIT2_PUBKEY_1] + + +class SSHMigrationsTests(unittest.TestCase): + + def setUp(self): + self._patches = {} + self._patches_start = {} + + def tearDown(self): + """Run teardown of patches.""" + for k, v in self._patches.items(): + v.stop() + setattr(self, k, None) + self._patches = None + self._patches_start = None + + def patch_object(self, obj, attr, return_value=None, name=None, new=None, + **kwargs): + """Patch the given object.""" + if name is None: + name = attr + if new is not None: + mocked = mock.patch.object(obj, attr, new=new, **kwargs) + else: + mocked = mock.patch.object(obj, attr, **kwargs) + self._patches[name] = mocked + started = mocked.start() + if new is None: + started.return_value = return_value + self._patches_start[name] = started + setattr(self, name, started) + + def setup_mocks_ssh_directory_for_unit(self, app_name, ssh_dir_exists, + app_dir_exists, auth_keys_exists, + known_hosts_exists, user=None): + def _isdir(x): + return { + ssh_dir + '/': ssh_dir_exists, + app_dir: app_dir_exists}[x] + + def _isfile(x): + return { + '{}/authorized_keys'.format(app_dir): auth_keys_exists, + '{}/known_hosts'.format(app_dir): known_hosts_exists}[x] + + if user: + app_name = "{}_{}".format(app_name, user) + ssh_dir = '/etc/nova/compute_ssh' + app_dir = '{}/{}'.format(ssh_dir, app_name) + + self.patch_object(ssh_migrations.os, 'mkdir') + self.patch_object(ssh_migrations.os.path, 'isdir', side_effect=_isdir) + self.patch_object(ssh_migrations.os.path, 'isfile', + side_effect=_isfile) + + def test_ssh_directory_for_unit(self): + self.setup_mocks_ssh_directory_for_unit( + 'nova-compute-lxd', + ssh_dir_exists=True, + app_dir_exists=True, + auth_keys_exists=True, + known_hosts_exists=True) + self.assertEqual( + ssh_migrations.ssh_directory_for_unit('nova-compute-lxd'), + '/etc/nova/compute_ssh/nova-compute-lxd') + self.assertFalse(self.mkdir.called) + + def test_ssh_directory_for_unit_user(self): + self.setup_mocks_ssh_directory_for_unit( + 'nova-compute-lxd', + ssh_dir_exists=True, + app_dir_exists=True, + auth_keys_exists=True, + known_hosts_exists=True, + user='nova') + self.assertEqual( + ssh_migrations.ssh_directory_for_unit( + 'nova-compute-lxd', + user='nova'), + '/etc/nova/compute_ssh/nova-compute-lxd_nova') + self.assertFalse(self.mkdir.called) + + def test_ssh_directory_missing_dir(self): + self.setup_mocks_ssh_directory_for_unit( + 'nova-compute-lxd', + ssh_dir_exists=False, + app_dir_exists=True, + auth_keys_exists=True, + known_hosts_exists=True) + self.assertEqual( + ssh_migrations.ssh_directory_for_unit('nova-compute-lxd'), + '/etc/nova/compute_ssh/nova-compute-lxd') + self.mkdir.assert_called_once_with('/etc/nova/compute_ssh/') + + def test_ssh_directory_missing_dirs(self): + self.setup_mocks_ssh_directory_for_unit( + 'nova-compute-lxd', + ssh_dir_exists=False, + app_dir_exists=False, + auth_keys_exists=True, + known_hosts_exists=True) + self.assertEqual( + ssh_migrations.ssh_directory_for_unit('nova-compute-lxd'), + '/etc/nova/compute_ssh/nova-compute-lxd') + mkdir_calls = [ + mock.call('/etc/nova/compute_ssh/'), + mock.call('/etc/nova/compute_ssh/nova-compute-lxd')] + self.mkdir.assert_has_calls(mkdir_calls) + + @mock.patch(builtin_open) + def test_ssh_directory_missing_file(self, _open): + self.setup_mocks_ssh_directory_for_unit( + 'nova-compute-lxd', + ssh_dir_exists=True, + app_dir_exists=True, + auth_keys_exists=False, + known_hosts_exists=True) + self.assertEqual( + ssh_migrations.ssh_directory_for_unit('nova-compute-lxd'), + '/etc/nova/compute_ssh/nova-compute-lxd') + _open.assert_called_once_with( + '/etc/nova/compute_ssh/nova-compute-lxd/authorized_keys', + 'w') + self.assertFalse(self.mkdir.called) + + @mock.patch(builtin_open) + def test_ssh_directory_missing_files(self, _open): + self.setup_mocks_ssh_directory_for_unit( + 'nova-compute-lxd', + ssh_dir_exists=True, + app_dir_exists=True, + auth_keys_exists=False, + known_hosts_exists=False) + self.assertEqual( + ssh_migrations.ssh_directory_for_unit('nova-compute-lxd'), + '/etc/nova/compute_ssh/nova-compute-lxd') + open_calls = [ + mock.call( + '/etc/nova/compute_ssh/nova-compute-lxd/authorized_keys', + 'w'), + mock.call().close(), + mock.call( + '/etc/nova/compute_ssh/nova-compute-lxd/known_hosts', + 'w'), + mock.call().close()] + _open.assert_has_calls(open_calls) + self.assertFalse(self.mkdir.called) + + def setup_ssh_directory_for_unit_mocks(self): + self.patch_object( + ssh_migrations, + 'ssh_directory_for_unit', + return_value='/somedir') + + def test_known_hosts(self): + self.setup_ssh_directory_for_unit_mocks() + self.assertEqual( + ssh_migrations.known_hosts('nova-compute-lxd'), + '/somedir/known_hosts') + + def test_authorized_keys(self): + self.setup_ssh_directory_for_unit_mocks() + self.assertEqual( + ssh_migrations.authorized_keys('nova-compute-lxd'), + '/somedir/authorized_keys') + + @mock.patch('subprocess.check_output') + def test_ssh_known_host_key(self, _check_output): + self.setup_ssh_directory_for_unit_mocks() + _check_output.return_value = UNIT1_HOST_KEY_1 + self.assertEqual( + ssh_migrations.ssh_known_host_key( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd'), + UNIT1_HOST_KEY_1) + + @mock.patch('subprocess.check_output') + def test_ssh_known_host_key_multi_match(self, _check_output): + self.setup_ssh_directory_for_unit_mocks() + _check_output.return_value = '{}\n{}\n'.format(UNIT1_HOST_KEY_1, + UNIT1_HOST_KEY_2) + self.assertEqual( + ssh_migrations.ssh_known_host_key( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd'), + UNIT1_HOST_KEY_1) + + @mock.patch('subprocess.check_output') + def test_ssh_known_host_key_rc1(self, _check_output): + self.setup_ssh_directory_for_unit_mocks() + _check_output.side_effect = subprocess.CalledProcessError( + cmd=['anything'], + returncode=1, + output=UNIT1_HOST_KEY_1) + self.assertEqual( + ssh_migrations.ssh_known_host_key( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd'), + UNIT1_HOST_KEY_1) + + @mock.patch('subprocess.check_output') + def test_ssh_known_host_key_rc2(self, _check_output): + self.setup_ssh_directory_for_unit_mocks() + _check_output.side_effect = subprocess.CalledProcessError( + cmd=['anything'], + returncode=2, + output='') + with self.assertRaises(subprocess.CalledProcessError): + ssh_migrations.ssh_known_host_key( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd') + + @mock.patch('subprocess.check_output') + def test_ssh_known_host_key_no_match(self, _check_output): + self.setup_ssh_directory_for_unit_mocks() + _check_output.return_value = '' + self.assertIsNone( + ssh_migrations.ssh_known_host_key( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd')) + + @mock.patch('subprocess.check_call') + def test_remove_known_host(self, _check_call): + self.patch_object(ssh_migrations, 'log') + self.setup_ssh_directory_for_unit_mocks() + ssh_migrations.remove_known_host( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd') + _check_call.assert_called_once_with([ + 'ssh-keygen', + '-f', + '/somedir/known_hosts', + '-R', + 'juju-4665be-20180716142533-8']) + + def test_is_same_key(self): + self.assertTrue( + ssh_migrations.is_same_key(UNIT1_HOST_KEY_1, UNIT1_HOST_KEY_2)) + + def test_is_same_key_false(self): + self.assertFalse( + ssh_migrations.is_same_key(UNIT1_HOST_KEY_1, UNIT2_HOST_KEY_1)) + + def setup_mocks_add_known_host(self): + self.setup_ssh_directory_for_unit_mocks() + self.patch_object(ssh_migrations.subprocess, 'check_output') + self.patch_object(ssh_migrations, 'log') + self.patch_object(ssh_migrations, 'ssh_known_host_key') + self.patch_object(ssh_migrations, 'remove_known_host') + + def test_add_known_host(self): + self.setup_mocks_add_known_host() + self.check_output.return_value = UNIT1_HOST_KEY_1 + self.ssh_known_host_key.return_value = '' + with patch_open() as (mock_open, mock_file): + ssh_migrations.add_known_host( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd') + mock_file.write.assert_called_with(UNIT1_HOST_KEY_1 + '\n') + mock_open.assert_called_with('/somedir/known_hosts', 'a') + self.assertFalse(self.remove_known_host.called) + + def test_add_known_host_existing_invalid_key(self): + self.setup_mocks_add_known_host() + self.check_output.return_value = UNIT1_HOST_KEY_1 + self.ssh_known_host_key.return_value = UNIT2_HOST_KEY_1 + with patch_open() as (mock_open, mock_file): + ssh_migrations.add_known_host( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd') + mock_file.write.assert_called_with(UNIT1_HOST_KEY_1 + '\n') + mock_open.assert_called_with('/somedir/known_hosts', 'a') + self.remove_known_host.assert_called_once_wth( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd') + + def test_add_known_host_existing_valid_key(self): + self.setup_mocks_add_known_host() + self.check_output.return_value = UNIT2_HOST_KEY_1 + self.ssh_known_host_key.return_value = UNIT2_HOST_KEY_1 + with patch_open() as (mock_open, mock_file): + ssh_migrations.add_known_host( + 'juju-4665be-20180716142533-8', + 'nova-compute-lxd') + self.assertFalse(mock_open.called) + self.assertFalse(self.remove_known_host.called) + + def test_ssh_authorized_key_exists(self): + self.setup_mocks_add_known_host() + contents = '{}\n{}\n'.format(UNIT1_PUBKEY_1, UNIT2_PUBKEY_1) + with mock_open('/somedir/authorized_keys', contents=contents): + self.assertTrue( + ssh_migrations.ssh_authorized_key_exists( + UNIT1_PUBKEY_1, + 'nova-compute-lxd')) + + def test_ssh_authorized_key_exists_false(self): + self.setup_mocks_add_known_host() + contents = '{}\n'.format(UNIT1_PUBKEY_1) + with mock_open('/somedir/authorized_keys', contents=contents): + self.assertFalse( + ssh_migrations.ssh_authorized_key_exists( + UNIT2_PUBKEY_1, + 'nova-compute-lxd')) + + def test_add_authorized_key(self): + self.setup_mocks_add_known_host() + with patch_open() as (mock_open, mock_file): + ssh_migrations.add_authorized_key( + UNIT1_PUBKEY_1, + 'nova-compute-lxd') + mock_file.write.assert_called_with(UNIT1_PUBKEY_1 + '\n') + mock_open.assert_called_with('/somedir/authorized_keys', 'a') + + def setup_mocks_ssh_compute_add_host_and_key(self): + self.setup_ssh_directory_for_unit_mocks() + self.patch_object(ssh_migrations, 'log') + self.patch_object(ssh_migrations, 'get_hostname') + self.patch_object(ssh_migrations, 'get_host_ip') + self.patch_object(ssh_migrations, 'ns_query') + self.patch_object(ssh_migrations, 'add_known_host') + self.patch_object(ssh_migrations, 'ssh_authorized_key_exists') + self.patch_object(ssh_migrations, 'add_authorized_key') + + def test_ssh_compute_add_host_and_key(self): + self.setup_mocks_ssh_compute_add_host_and_key() + self.get_hostname.return_value = 'alt-hostname.project.serverstack' + self.ns_query.return_value = '10.6.0.17' + ssh_migrations.ssh_compute_add_host_and_key( + UNIT1_PUBKEY_1, + 'juju-4665be-20180716142533-8.project.serverstack', + '10.5.0.17', + 'nova-compute-lxd') + expect_hosts = [ + 'juju-4665be-20180716142533-8.project.serverstack', + 'alt-hostname.project.serverstack', + 'alt-hostname'] + add_known_host_calls = [] + for host in expect_hosts: + add_known_host_calls.append( + mock.call(host, 'nova-compute-lxd', None)) + self.add_known_host.assert_has_calls( + add_known_host_calls, + any_order=True) + self.add_authorized_key.assert_called_once_with( + UNIT1_PUBKEY_1, + 'nova-compute-lxd', + None) + + def test_ssh_compute_add_host_and_key_priv_addr_not_ip(self): + self.setup_mocks_ssh_compute_add_host_and_key() + self.get_hostname.return_value = 'alt-hostname.project.serverstack' + self.ns_query.return_value = '10.6.0.17' + self.get_host_ip.return_value = '10.6.0.17' + ssh_migrations.ssh_compute_add_host_and_key( + UNIT1_PUBKEY_1, + 'juju-4665be-20180716142533-8.project.serverstack', + 'bob.maas', + 'nova-compute-lxd') + expect_hosts = [ + 'bob.maas', + 'juju-4665be-20180716142533-8.project.serverstack', + '10.6.0.17', + 'bob'] + add_known_host_calls = [] + for host in expect_hosts: + add_known_host_calls.append( + mock.call(host, 'nova-compute-lxd', None)) + self.add_known_host.assert_has_calls( + add_known_host_calls, + any_order=True) + self.add_authorized_key.assert_called_once_with( + UNIT1_PUBKEY_1, + 'nova-compute-lxd', + None) + + def test_ssh_compute_add_host_and_key_ipv6(self): + self.setup_mocks_ssh_compute_add_host_and_key() + ssh_migrations.ssh_compute_add_host_and_key( + UNIT1_PUBKEY_1, + 'juju-4665be-20180716142533-8.project.serverstack', + 'fe80::8842:a9ff:fe53:72e4', + 'nova-compute-lxd') + self.add_known_host.assert_called_once_with( + 'fe80::8842:a9ff:fe53:72e4', + 'nova-compute-lxd', + None) + self.add_authorized_key.assert_called_once_with( + UNIT1_PUBKEY_1, + 'nova-compute-lxd', + None) + + @mock.patch.object(ssh_migrations, 'ssh_compute_add_host_and_key') + @mock.patch.object(ssh_migrations, 'relation_get') + def test_ssh_compute_add(self, _relation_get, + _ssh_compute_add_host_and_key): + _relation_get.return_value = { + 'hostname': 'juju-4665be-20180716142533-8.project.serverstack', + 'private-address': '10.5.0.17', + } + ssh_migrations.ssh_compute_add( + UNIT1_PUBKEY_1, + 'nova-compute-lxd', + rid='cloud-compute:23', + unit='nova-compute-lxd/2') + _ssh_compute_add_host_and_key.assert_called_once_with( + UNIT1_PUBKEY_1, + 'juju-4665be-20180716142533-8.project.serverstack', + '10.5.0.17', + 'nova-compute-lxd', + user=None) + + @mock.patch.object(ssh_migrations, 'known_hosts') + def test_ssh_known_hosts_lines(self, _known_hosts): + _known_hosts.return_value = '/somedir/known_hosts' + contents = '\n'.join(HOST_KEYS) + with mock_open('/somedir/known_hosts', contents=contents): + self.assertEqual( + ssh_migrations.ssh_known_hosts_lines('nova-compute-lxd'), + HOST_KEYS) + + @mock.patch.object(ssh_migrations, 'authorized_keys') + def test_ssh_authorized_keys_lines(self, _authorized_keys): + _authorized_keys.return_value = '/somedir/authorized_keys' + contents = '\n'.join(PUB_KEYS) + with mock_open('/somedir/authorized_keys', contents=contents): + self.assertEqual( + ssh_migrations.ssh_authorized_keys_lines('nova-compute-lxd'), + PUB_KEYS) + + def setup_mocks_ssh_compute_remove(self, isfile, authorized_keys_lines): + self.patch_object( + ssh_migrations, + 'ssh_authorized_keys_lines', + return_value=authorized_keys_lines) + self.patch_object(ssh_migrations, 'known_hosts') + self.patch_object( + ssh_migrations, + 'authorized_keys', + return_value='/somedir/authorized_keys') + self.patch_object( + ssh_migrations.os.path, + 'isfile', + return_value=isfile) + + def test_ssh_compute_remove(self): + self.setup_mocks_ssh_compute_remove( + isfile=True, + authorized_keys_lines=PUB_KEYS) + with patch_open() as (mock_open, mock_file): + ssh_migrations.ssh_compute_remove( + UNIT1_PUBKEY_1, + 'nova-compute-lxd') + mock_file.write.assert_called_with(UNIT2_PUBKEY_1 + '\n') + mock_open.assert_called_with('/somedir/authorized_keys', 'w') + + def test_ssh_compute_remove_missing_file(self): + self.setup_mocks_ssh_compute_remove( + isfile=False, + authorized_keys_lines=PUB_KEYS) + with patch_open() as (mock_open, mock_file): + ssh_migrations.ssh_compute_remove( + UNIT1_PUBKEY_1, + 'nova-compute-lxd') + self.assertFalse(mock_file.write.called) + + def test_ssh_compute_remove_missing_key(self): + self.setup_mocks_ssh_compute_remove( + isfile=False, + authorized_keys_lines=[UNIT2_PUBKEY_1]) + with patch_open() as (mock_open, mock_file): + ssh_migrations.ssh_compute_remove( + UNIT1_PUBKEY_1, + 'nova-compute-lxd') + self.assertFalse(mock_file.write.called) + + @mock.patch.object(ssh_migrations, 'ssh_known_hosts_lines') + @mock.patch.object(ssh_migrations, 'ssh_authorized_keys_lines') + def test_get_ssh_settings(self, _ssh_authorized_keys_lines, + _ssh_known_hosts_lines): + _ssh_authorized_keys_lines.return_value = PUB_KEYS + _ssh_known_hosts_lines.return_value = HOST_KEYS + expect = { + 'known_hosts_0': UNIT1_HOST_KEY_1, + 'known_hosts_1': UNIT1_HOST_KEY_2, + 'known_hosts_2': UNIT2_HOST_KEY_1, + 'known_hosts_3': UNIT2_HOST_KEY_2, + 'known_hosts_max_index': 4, + 'authorized_keys_0': UNIT1_PUBKEY_1, + 'authorized_keys_1': UNIT2_PUBKEY_1, + 'authorized_keys_max_index': 2, + } + self.assertEqual( + ssh_migrations.get_ssh_settings('nova-compute-lxd'), + expect) + + @mock.patch.object(ssh_migrations, 'ssh_known_hosts_lines') + @mock.patch.object(ssh_migrations, 'ssh_authorized_keys_lines') + def test_get_ssh_settings_user(self, _ssh_authorized_keys_lines, + _ssh_known_hosts_lines): + _ssh_authorized_keys_lines.return_value = PUB_KEYS + _ssh_known_hosts_lines.return_value = HOST_KEYS + expect = { + 'nova_known_hosts_0': UNIT1_HOST_KEY_1, + 'nova_known_hosts_1': UNIT1_HOST_KEY_2, + 'nova_known_hosts_2': UNIT2_HOST_KEY_1, + 'nova_known_hosts_3': UNIT2_HOST_KEY_2, + 'nova_known_hosts_max_index': 4, + 'nova_authorized_keys_0': UNIT1_PUBKEY_1, + 'nova_authorized_keys_1': UNIT2_PUBKEY_1, + 'nova_authorized_keys_max_index': 2, + } + self.assertEqual( + ssh_migrations.get_ssh_settings('nova-compute-lxd', user='nova'), + expect) + + @mock.patch.object(ssh_migrations, 'ssh_known_hosts_lines') + @mock.patch.object(ssh_migrations, 'ssh_authorized_keys_lines') + def test_get_ssh_settings_empty(self, _ssh_authorized_keys_lines, + _ssh_known_hosts_lines): + _ssh_authorized_keys_lines.return_value = [] + _ssh_known_hosts_lines.return_value = [] + self.assertEqual( + ssh_migrations.get_ssh_settings('nova-compute-lxd'), + {}) + + @mock.patch.object(ssh_migrations, 'get_ssh_settings') + def test_get_all_user_ssh_settings(self, _get_ssh_settings): + def ssh_setiings(application_name, user=None): + base_settings = { + 'known_hosts_0': UNIT1_HOST_KEY_1, + 'known_hosts_max_index': 1, + 'authorized_keys_0': UNIT1_PUBKEY_1, + 'authorized_keys_max_index': 1} + user_settings = { + 'nova_known_hosts_0': UNIT1_HOST_KEY_1, + 'nova_known_hosts_max_index': 1, + 'nova_authorized_keys_0': UNIT1_PUBKEY_1, + 'nova_authorized_keys_max_index': 1} + if user: + return user_settings + else: + return base_settings + _get_ssh_settings.side_effect = ssh_setiings + expect = { + 'known_hosts_0': UNIT1_HOST_KEY_1, + 'known_hosts_max_index': 1, + 'authorized_keys_0': UNIT1_PUBKEY_1, + 'authorized_keys_max_index': 1, + 'nova_known_hosts_0': UNIT1_HOST_KEY_1, + 'nova_known_hosts_max_index': 1, + 'nova_authorized_keys_0': UNIT1_PUBKEY_1, + 'nova_authorized_keys_max_index': 1} + self.assertEqual( + ssh_migrations.get_all_user_ssh_settings('nova-compute-lxd'), + expect) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_vaultlocker.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_vaultlocker.py new file mode 100644 index 0000000000000000000000000000000000000000..496fd56fdd1b6e1dab0d5abb5b08a82e40d94393 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/openstack/test_vaultlocker.py @@ -0,0 +1,261 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import mock +import os +import sys +import unittest + +import charmhelpers.contrib.openstack.vaultlocker as vaultlocker + +from .test_os_contexts import TestDB + + +INCOMPLETE_RELATION = { + 'secrets-storage:1': { + 'vault/0': {} + } +} + +COMPLETE_RELATION = { + 'secrets-storage:1': { + 'vault/0': { + 'vault_url': json.dumps('http://vault:8200'), + 'test-service/0_role_id': json.dumps('test-role-from-vault'), + 'test-service/0_token': + json.dumps('00c9a9ab-c523-459d-a250-2ce8f0877c03'), + } + } +} + +DIRTY_RELATION = { + 'secrets-storage:1': { + 'vault/0': { + 'vault_url': json.dumps('http://vault:8200'), + 'test-service/0_role_id': json.dumps('test-role-from-vault'), + 'test-service/0_token': + json.dumps('00c9a9ab-c523-459d-a250-2ce8f0877c03'), + }, + 'vault/1': { + 'vault_url': json.dumps('http://vault:8200'), + 'test-service/0_role_id': json.dumps('test-role-from-vault'), + 'test-service/0_token': + json.dumps('67b36149-dc86-4b80-96c4-35b91847d16e'), + } + } +} + +COMPLETE_WITH_CA_RELATION = { + 'secrets-storage:1': { + 'vault/0': { + 'vault_url': json.dumps('http://vault:8200'), + 'test-service/0_role_id': json.dumps('test-role-from-vault'), + 'test-service/0_token': + json.dumps('00c9a9ab-c523-459d-a250-2ce8f0877c03'), + 'vault_ca': json.dumps('test-ca-data'), + } + } +} + + +class VaultLockerTestCase(unittest.TestCase): + + to_patch = [ + 'hookenv', + 'templating', + 'alternatives', + 'host', + 'unitdata', + ] + + _target_path = '/var/lib/charm/test-service/vaultlocker.conf' + + def setUp(self): + for m in self.to_patch: + setattr(self, m, self._patch(m)) + self.hookenv.service_name.return_value = 'test-service' + self.hookenv.local_unit.return_value = 'test-service/0' + self.db = TestDB() + self.unitdata.kv.return_value = self.db + fake_exc = mock.MagicMock() + fake_exc.InvalidRequest = Exception + self.fake_hvac = mock.MagicMock() + self.fake_hvac.exceptions = fake_exc + sys.modules['hvac'] = self.fake_hvac + + def fake_retrieve_secret_id(self, url=None, token=None): + if token == self.good_token: + return '31be8e65-20a3-45e0-a4a8-4d5a0554fb60' + else: + raise self.fake_hvac.exceptions.InvalidRequest + + def _patch(self, target): + _m = mock.patch.object(vaultlocker, target) + _mock = _m.start() + self.addCleanup(_m.stop) + return _mock + + def test_write_vl_config(self): + ctxt = {'test': 'data'} + vaultlocker.write_vaultlocker_conf(context=ctxt) + self.hookenv.service_name.assert_called_once_with() + self.host.mkdir.assert_called_once_with( + os.path.dirname(self._target_path), + perms=0o700 + ) + self.templating.render.assert_called_once_with( + source='vaultlocker.conf.j2', + target=self._target_path, + context=ctxt, + perms=0o600, + ) + self.alternatives.install_alternative.assert_called_once_with( + 'vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + self._target_path, + 100 + ) + + def test_write_vl_config_priority(self): + ctxt = {'test': 'data'} + vaultlocker.write_vaultlocker_conf(context=ctxt, priority=200) + self.hookenv.service_name.assert_called_once_with() + self.host.mkdir.assert_called_once_with( + os.path.dirname(self._target_path), + perms=0o700 + ) + self.templating.render.assert_called_once_with( + source='vaultlocker.conf.j2', + target=self._target_path, + context=ctxt, + perms=0o600, + ) + self.alternatives.install_alternative.assert_called_once_with( + 'vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + self._target_path, + 200 + ) + + def _setup_relation(self, relation): + self.hookenv.relation_ids.side_effect = ( + lambda _: relation.keys() + ) + self.hookenv.related_units.side_effect = ( + lambda rid: relation[rid].keys() + ) + self.hookenv.relation_get.side_effect = ( + lambda unit, rid: + relation[rid][unit] + ) + + def test_context_incomplete(self): + self._setup_relation(INCOMPLETE_RELATION) + context = vaultlocker.VaultKVContext('charm-test') + self.assertEqual(context(), {}) + self.hookenv.relation_ids.assert_called_with('secrets-storage') + self.assertFalse(vaultlocker.vault_relation_complete()) + + @mock.patch.object(vaultlocker, 'retrieve_secret_id') + def test_context_complete(self, retrieve_secret_id): + self._setup_relation(COMPLETE_RELATION) + context = vaultlocker.VaultKVContext('charm-test') + retrieve_secret_id.return_value = 'a3551c8d-0147-4cb6-afc6-efb3db2fccb2' + self.assertEqual(context(), + {'role_id': 'test-role-from-vault', + 'secret_backend': 'charm-test', + 'secret_id': 'a3551c8d-0147-4cb6-afc6-efb3db2fccb2', + 'vault_url': 'http://vault:8200'}) + self.hookenv.relation_ids.assert_called_with('secrets-storage') + self.assertTrue(vaultlocker.vault_relation_complete()) + calls = [mock.call(url='http://vault:8200', + token='00c9a9ab-c523-459d-a250-2ce8f0877c03')] + retrieve_secret_id.assert_has_calls(calls) + + @mock.patch.object(vaultlocker, 'retrieve_secret_id') + def test_context_complete_cached_secret_id(self, retrieve_secret_id): + self._setup_relation(COMPLETE_RELATION) + context = vaultlocker.VaultKVContext('charm-test') + self.db.set('secret-id', '5502fd27-059b-4b0a-91b2-eaff40b6a112') + self.good_token = 'invalid-token' # i.e. cause failure + retrieve_secret_id.side_effect = self.fake_retrieve_secret_id + self.assertEqual(context(), + {'role_id': 'test-role-from-vault', + 'secret_backend': 'charm-test', + 'secret_id': '5502fd27-059b-4b0a-91b2-eaff40b6a112', + 'vault_url': 'http://vault:8200'}) + self.hookenv.relation_ids.assert_called_with('secrets-storage') + self.assertTrue(vaultlocker.vault_relation_complete()) + calls = [mock.call(url='http://vault:8200', + token='00c9a9ab-c523-459d-a250-2ce8f0877c03')] + retrieve_secret_id.assert_has_calls(calls) + + @mock.patch.object(vaultlocker, 'retrieve_secret_id') + def test_purge_old_tokens(self, retrieve_secret_id): + self._setup_relation(DIRTY_RELATION) + context = vaultlocker.VaultKVContext('charm-test') + self.db.set('secret-id', '5502fd27-059b-4b0a-91b2-eaff40b6a112') + self.good_token = '67b36149-dc86-4b80-96c4-35b91847d16e' + retrieve_secret_id.side_effect = self.fake_retrieve_secret_id + self.assertEqual(context(), + {'role_id': 'test-role-from-vault', + 'secret_backend': 'charm-test', + 'secret_id': '31be8e65-20a3-45e0-a4a8-4d5a0554fb60', + 'vault_url': 'http://vault:8200'}) + self.hookenv.relation_ids.assert_called_with('secrets-storage') + self.assertTrue(vaultlocker.vault_relation_complete()) + self.assertEquals(self.db.get('secret-id'), + '31be8e65-20a3-45e0-a4a8-4d5a0554fb60') + calls = [mock.call(url='http://vault:8200', + token='67b36149-dc86-4b80-96c4-35b91847d16e')] + retrieve_secret_id.assert_has_calls(calls) + + @mock.patch.object(vaultlocker, 'retrieve_secret_id') + def test_context_complete_cached_dirty_data(self, retrieve_secret_id): + self._setup_relation(DIRTY_RELATION) + context = vaultlocker.VaultKVContext('charm-test') + self.db.set('secret-id', '5502fd27-059b-4b0a-91b2-eaff40b6a112') + self.good_token = '67b36149-dc86-4b80-96c4-35b91847d16e' + retrieve_secret_id.side_effect = self.fake_retrieve_secret_id + self.assertEqual(context(), + {'role_id': 'test-role-from-vault', + 'secret_backend': 'charm-test', + 'secret_id': '31be8e65-20a3-45e0-a4a8-4d5a0554fb60', + 'vault_url': 'http://vault:8200'}) + self.hookenv.relation_ids.assert_called_with('secrets-storage') + self.assertTrue(vaultlocker.vault_relation_complete()) + self.assertEquals(self.db.get('secret-id'), + '31be8e65-20a3-45e0-a4a8-4d5a0554fb60') + calls = [mock.call(url='http://vault:8200', + token='67b36149-dc86-4b80-96c4-35b91847d16e')] + retrieve_secret_id.assert_has_calls(calls) + + @mock.patch.object(vaultlocker, 'retrieve_secret_id') + def test_context_complete_with_ca(self, retrieve_secret_id): + self._setup_relation(COMPLETE_WITH_CA_RELATION) + retrieve_secret_id.return_value = 'token1234' + context = vaultlocker.VaultKVContext('charm-test') + retrieve_secret_id.return_value = 'a3551c8d-0147-4cb6-afc6-efb3db2fccb2' + self.assertEqual(context(), + {'role_id': 'test-role-from-vault', + 'secret_backend': 'charm-test', + 'secret_id': 'a3551c8d-0147-4cb6-afc6-efb3db2fccb2', + 'vault_url': 'http://vault:8200', + 'vault_ca': 'test-ca-data'}) + self.hookenv.relation_ids.assert_called_with('secrets-storage') + self.assertTrue(vaultlocker.vault_relation_complete()) + calls = [mock.call(url='http://vault:8200', + token='00c9a9ab-c523-459d-a250-2ce8f0877c03')] + retrieve_secret_id.assert_has_calls(calls) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/peerstorage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/peerstorage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/peerstorage/test_peerstorage.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/peerstorage/test_peerstorage.py new file mode 100644 index 0000000000000000000000000000000000000000..63b71366dd9e190e91ca2359210d63b45b4719a7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/peerstorage/test_peerstorage.py @@ -0,0 +1,337 @@ +import copy +import json + +from tests.helpers import FakeRelation +from testtools import TestCase +from mock import patch, call +from charmhelpers.contrib import peerstorage + + +TO_PATCH = [ + 'current_relation_id', + 'is_relation_made', + 'local_unit', + 'relation_get', + '_relation_get', + 'relation_ids', + 'relation_set', + '_relation_set', + '_leader_get', + 'leader_set', + 'is_leader', +] +FAKE_RELATION_NAME = 'cluster' +FAKE_RELATION = { + 'cluster:0': { + 'cluster/0': { + }, + 'cluster/1': { + }, + 'cluster/2': { + }, + }, + +} +FAKE_RELATION_IDS = ['cluster:0'] +FAKE_LOCAL_UNIT = 'test_host' + + +class TestPeerStorage(TestCase): + def setUp(self): + super(TestPeerStorage, self).setUp() + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + self.fake_relation_name = FAKE_RELATION_NAME + self.fake_relation = FakeRelation(FAKE_RELATION) + self.local_unit.return_value = FAKE_LOCAL_UNIT + self.relation_get.return_value = {'key1': 'value1', + 'key2': 'value2', + 'private-address': '127.0.0.1', + 'public-address': '91.189.90.159'} + + def _patch(self, method): + _m = patch('charmhelpers.contrib.peerstorage.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def test_peer_retrieve_no_relation(self): + self.relation_ids.return_value = [] + self.assertRaises(ValueError, peerstorage.peer_retrieve, 'key', relation_name=self.fake_relation_name) + + def test_peer_retrieve_with_relation(self): + self.relation_ids.return_value = FAKE_RELATION_IDS + peerstorage.peer_retrieve('key', self.fake_relation_name) + self.relation_get.assert_called_with(attribute='key', rid=FAKE_RELATION_IDS[0], unit=FAKE_LOCAL_UNIT) + + def test_peer_store_no_relation(self): + self.relation_ids.return_value = [] + self.assertRaises(ValueError, peerstorage.peer_store, 'key', 'value', relation_name=self.fake_relation_name) + + def test_peer_store_with_relation(self): + self.relation_ids.return_value = FAKE_RELATION_IDS + peerstorage.peer_store('key', 'value', self.fake_relation_name) + self.relation_set.assert_called_with(relation_id=FAKE_RELATION_IDS[0], + relation_settings={'key': 'value'}) + + def test_peer_echo_no_includes(self): + peerstorage.is_leader.side_effect = NotImplementedError + settings = {'key1': 'value1', 'key2': 'value2'} + self._relation_get.copy.return_value = settings + self._relation_get.return_value = settings + peerstorage.peer_echo() + self._relation_set.assert_called_with(relation_settings=settings) + + def test_peer_echo_includes(self): + peerstorage.is_leader.side_effect = NotImplementedError + settings = {'key1': 'value1'} + self._relation_get.copy.return_value = settings + self._relation_get.return_value = settings + peerstorage.peer_echo(['key1']) + self._relation_set.assert_called_with(relation_settings=settings) + + @patch.object(peerstorage, 'peer_store') + def test_peer_store_and_set_no_relation(self, peer_store): + self.is_relation_made.return_value = False + peerstorage.peer_store_and_set(relation_id='db', kwarg1='kwarg1_v') + self.relation_set.assert_called_with(relation_id='db', + relation_settings={}, + kwarg1='kwarg1_v') + peer_store.assert_not_called() + + @patch.object(peerstorage, 'peer_store') + def test_peer_store_and_set_no_relation_fatal(self, peer_store): + self.is_relation_made.return_value = False + self.assertRaises(ValueError, + peerstorage.peer_store_and_set, + relation_id='db', + kwarg1='kwarg1_v', + peer_store_fatal=True) + + @patch.object(peerstorage, 'peer_store') + def test_peer_store_and_set_kwargs(self, peer_store): + self.is_relation_made.return_value = True + peerstorage.peer_store_and_set(relation_id='db', kwarg1='kwarg1_v') + self.relation_set.assert_called_with(relation_id='db', + relation_settings={}, + kwarg1='kwarg1_v') + calls = [call('db_kwarg1', 'kwarg1_v', relation_name='cluster')] + peer_store.assert_has_calls(calls, any_order=True) + + @patch.object(peerstorage, 'peer_store') + def test_peer_store_and_rel_settings(self, peer_store): + self.is_relation_made.return_value = True + rel_setting = { + 'rel_set1': 'relset1_v' + } + peerstorage.peer_store_and_set(relation_id='db', + relation_settings=rel_setting) + self.relation_set.assert_called_with(relation_id='db', + relation_settings=rel_setting) + calls = [call('db_rel_set1', 'relset1_v', relation_name='cluster')] + peer_store.assert_has_calls(calls, any_order=True) + + @patch.object(peerstorage, 'peer_store') + def test_peer_store_and_set(self, peer_store): + self.is_relation_made.return_value = True + rel_setting = { + 'rel_set1': 'relset1_v' + } + peerstorage.peer_store_and_set(relation_id='db', + relation_settings=rel_setting, + kwarg1='kwarg1_v', + delimiter='+') + self.relation_set.assert_called_with(relation_id='db', + relation_settings=rel_setting, + kwarg1='kwarg1_v') + calls = [call('db+rel_set1', 'relset1_v', relation_name='cluster'), + call('db+kwarg1', 'kwarg1_v', relation_name='cluster')] + peer_store.assert_has_calls(calls, any_order=True) + + @patch.object(peerstorage, 'peer_retrieve') + def test_peer_retrieve_by_prefix(self, peer_retrieve): + rel_id = 'db:2' + settings = { + 'user': 'bob', + 'pass': 'reallyhardpassword', + 'host': 'myhost', + } + peer_settings = {rel_id + '_' + k: v for k, v in settings.items()} + peer_retrieve.return_value = peer_settings + self.assertEquals(peerstorage.peer_retrieve_by_prefix(rel_id), settings) + + @patch.object(peerstorage, 'peer_retrieve') + def test_peer_retrieve_by_prefix_empty_relation(self, peer_retrieve): + # If relation-get returns None, peer_retrieve_by_prefix returns + # an empty dictionary. + peer_retrieve.return_value = None + rel_id = 'db:2' + self.assertEquals(peerstorage.peer_retrieve_by_prefix(rel_id), {}) + + @patch.object(peerstorage, 'peer_retrieve') + def test_peer_retrieve_by_prefix_exc_list(self, peer_retrieve): + rel_id = 'db:2' + settings = { + 'user': 'bob', + 'pass': 'reallyhardpassword', + 'host': 'myhost', + } + peer_settings = {rel_id + '_' + k: v for k, v in settings.items()} + del settings['host'] + peer_retrieve.return_value = peer_settings + self.assertEquals(peerstorage.peer_retrieve_by_prefix(rel_id, + exc_list=['host']), + settings) + + @patch.object(peerstorage, 'peer_retrieve') + def test_peer_retrieve_by_prefix_inc_list(self, peer_retrieve): + rel_id = 'db:2' + settings = { + 'user': 'bob', + 'pass': 'reallyhardpassword', + 'host': 'myhost', + } + peer_settings = {rel_id + '_' + k: v for k, v in settings.items()} + peer_retrieve.return_value = peer_settings + self.assertEquals(peerstorage.peer_retrieve_by_prefix(rel_id, + inc_list=['host']), + {'host': 'myhost'}) + + def test_leader_get_migration_is_leader(self): + self.is_leader.return_value = True + l_settings = {'s3': 3} + r_settings = {'s1': 1, 's2': 2} + + def mock_relation_get(attribute=None, unit=None, rid=None): + if attribute: + if attribute in r_settings: + return r_settings.get(attribute) + else: + return None + + return copy.deepcopy(r_settings) + + def mock_leader_get(attribute=None): + if attribute: + if attribute in l_settings: + return l_settings.get(attribute) + else: + return None + + return copy.deepcopy(l_settings) + + def mock_leader_set(settings=None, **kwargs): + if settings: + l_settings.update(settings) + + l_settings.update(kwargs) + + def check_leader_db(dicta, dictb): + _dicta = copy.deepcopy(dicta) + _dictb = copy.deepcopy(dictb) + miga = json.loads(_dicta[migration_key]).sort() + migb = json.loads(_dictb[migration_key]).sort() + self.assertEqual(miga, migb) + del _dicta[migration_key] + del _dictb[migration_key] + self.assertEqual(_dicta, _dictb) + + migration_key = '__leader_get_migrated_settings__' + self._relation_get.side_effect = mock_relation_get + self._leader_get.side_effect = mock_leader_get + self.leader_set.side_effect = mock_leader_set + + self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get()) + self.assertEqual({'s3': 3}, peerstorage._leader_get()) + self.assertEqual({'s1': 1, 's2': 2, 's3': 3}, peerstorage.leader_get()) + check_leader_db({'s1': 1, 's2': 2, 's3': 3, + migration_key: '["s2", "s1"]'}, l_settings) + self.assertTrue(peerstorage.leader_set.called) + + peerstorage.leader_set.reset_mock() + self.assertEqual({'s1': 1, 's2': 2, 's3': 3}, peerstorage.leader_get()) + check_leader_db({'s1': 1, 's2': 2, 's3': 3, + migration_key: '["s2", "s1"]'}, l_settings) + self.assertFalse(peerstorage.leader_set.called) + + l_settings = {'s3': 3} + peerstorage.leader_set.reset_mock() + self.assertEqual(1, peerstorage.leader_get('s1')) + check_leader_db({'s1': 1, 's3': 3, + migration_key: '["s1"]'}, l_settings) + self.assertTrue(peerstorage.leader_set.called) + + # Test that leader vals take precedence over non-leader vals + r_settings['s3'] = 2 + r_settings['s4'] = 3 + l_settings['s4'] = 4 + + peerstorage.leader_set.reset_mock() + self.assertEqual(4, peerstorage.leader_get('s4')) + check_leader_db({'s1': 1, 's3': 3, 's4': 4, + migration_key: '["s1", "s4"]'}, l_settings) + self.assertTrue(peerstorage.leader_set.called) + + peerstorage.leader_set.reset_mock() + self.assertEqual({'s1': 1, 's2': 2, 's3': 2, 's4': 3}, + peerstorage._relation_get()) + check_leader_db({'s1': 1, 's3': 3, 's4': 4, + migration_key: '["s1", "s4"]'}, + peerstorage._leader_get()) + self.assertEqual({'s1': 1, 's2': 2, 's3': 3, 's4': 4}, + peerstorage.leader_get()) + check_leader_db({'s1': 1, 's2': 2, 's3': 3, 's4': 4, + migration_key: '["s3", "s2", "s1", "s4"]'}, + l_settings) + self.assertTrue(peerstorage.leader_set.called) + + def test_leader_get_migration_is_not_leader(self): + self.is_leader.return_value = False + l_settings = {'s3': 3} + r_settings = {'s1': 1, 's2': 2} + + def mock_relation_get(attribute=None, unit=None, rid=None): + if attribute: + if attribute in r_settings: + return r_settings.get(attribute) + else: + return None + + return copy.deepcopy(r_settings) + + def mock_leader_get(attribute=None): + if attribute: + if attribute in l_settings: + return l_settings.get(attribute) + else: + return None + + return copy.deepcopy(l_settings) + + def mock_leader_set(settings=None, **kwargs): + if settings: + l_settings.update(settings) + + l_settings.update(kwargs) + + self._relation_get.side_effect = mock_relation_get + self._leader_get.side_effect = mock_leader_get + self.leader_set.side_effect = mock_leader_set + self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get()) + self.assertEqual({'s3': 3}, peerstorage._leader_get()) + self.assertEqual({'s3': 3}, peerstorage.leader_get()) + self.assertEqual({'s3': 3}, l_settings) + self.assertFalse(peerstorage.leader_set.called) + + self.assertEqual({'s3': 3}, peerstorage.leader_get()) + self.assertEqual({'s3': 3}, l_settings) + self.assertFalse(peerstorage.leader_set.called) + + # Test that leader vals take precedence over non-leader vals + r_settings['s3'] = 2 + r_settings['s4'] = 3 + l_settings['s4'] = 4 + + self.assertEqual(4, peerstorage.leader_get('s4')) + self.assertEqual({'s3': 3, 's4': 4}, l_settings) + self.assertFalse(peerstorage.leader_set.called) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/python/test_python.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/python/test_python.py new file mode 100644 index 0000000000000000000000000000000000000000..59c8b6ede89eda52c0fd4eda8e6275bce7aac68e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/python/test_python.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# coding: utf-8 + +from __future__ import absolute_import + +from unittest import TestCase + +from charmhelpers.fetch.python import debug +from charmhelpers.fetch.python import packages +from charmhelpers.fetch.python import rpdb +from charmhelpers.fetch.python import version +from charmhelpers.contrib.python import debug as contrib_debug +from charmhelpers.contrib.python import packages as contrib_packages +from charmhelpers.contrib.python import rpdb as contrib_rpdb +from charmhelpers.contrib.python import version as contrib_version + + +class ContribDebugTestCase(TestCase): + def test_aliases(self): + assert contrib_debug is debug + assert contrib_packages is packages + assert contrib_rpdb is rpdb + assert contrib_version is version diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/saltstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/saltstack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/saltstack/test_saltstates.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/saltstack/test_saltstates.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ac35ea80c842cc3a2330066c7e7a3a05154d8b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/saltstack/test_saltstates.py @@ -0,0 +1,75 @@ +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +import mock +import unittest + +import charmhelpers.contrib.saltstack + + +class InstallSaltSupportTestCase(unittest.TestCase): + + def setUp(self): + super(InstallSaltSupportTestCase, self).setUp() + + patcher = mock.patch('charmhelpers.contrib.saltstack.subprocess') + self.mock_subprocess = patcher.start() + self.addCleanup(patcher.stop) + + patcher = mock.patch('charmhelpers.fetch') + self.mock_charmhelpers_fetch = patcher.start() + self.addCleanup(patcher.stop) + + def test_adds_ppa_by_default(self): + charmhelpers.contrib.saltstack.install_salt_support() + + expected_calls = [((cmd,), {}) for cmd in [ + ['/usr/bin/add-apt-repository', '--yes', 'ppa:saltstack/salt'], + ['/usr/bin/apt-get', 'update'], + ]] + self.assertEqual(self.mock_subprocess.check_call.call_count, 2) + self.assertEqual( + expected_calls, self.mock_subprocess.check_call.call_args_list) + self.mock_charmhelpers_fetch.apt_install.assert_called_once_with( + 'salt-common') + + def test_no_ppa(self): + charmhelpers.contrib.saltstack.install_salt_support( + from_ppa=False) + + self.assertEqual(self.mock_subprocess.check_call.call_count, 0) + self.mock_charmhelpers_fetch.apt_install.assert_called_once_with( + 'salt-common') + + +class UpdateMachineStateTestCase(unittest.TestCase): + + def setUp(self): + super(UpdateMachineStateTestCase, self).setUp() + + patcher = mock.patch('charmhelpers.contrib.saltstack.subprocess') + self.mock_subprocess = patcher.start() + self.addCleanup(patcher.stop) + + patcher = mock.patch('charmhelpers.contrib.templating.contexts.' + 'juju_state_to_yaml') + self.mock_config_2_grains = patcher.start() + self.addCleanup(patcher.stop) + + def test_calls_local_salt_template(self): + charmhelpers.contrib.saltstack.update_machine_state( + 'states/install.yaml') + + self.mock_subprocess.check_call.assert_called_once_with([ + 'salt-call', + '--local', + 'state.template', + 'states/install.yaml', + ]) + + def test_updates_grains(self): + charmhelpers.contrib.saltstack.update_machine_state( + 'states/install.yaml') + + self.mock_config_2_grains.assert_called_once_with('/etc/salt/grains') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/test_service.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/test_service.py new file mode 100644 index 0000000000000000000000000000000000000000..389f6db2341afa44d9f26399e490e55e5de71a82 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/test_service.py @@ -0,0 +1,78 @@ +from testtools import TestCase +import tempfile +import shutil +import subprocess +import six +import mock + +from os.path import exists, join, isdir + +from charmhelpers.contrib.ssl import service + + +class ServiceCATest(TestCase): + + def setUp(self): + super(ServiceCATest, self).setUp() + self.temp_dir = tempfile.mkdtemp() + + def tearDown(self): + super(ServiceCATest, self).tearDown() + shutil.rmtree(self.temp_dir, ignore_errors=True) + + @mock.patch("charmhelpers.contrib.ssl.service.log") + def test_init(self, *args): + """ + Tests that a ServiceCA is initialized with the correct directory + layout. + """ + ca_root_dir = join(self.temp_dir, 'ca') + ca = service.ServiceCA('fake-name', ca_root_dir) + ca.init() + + paths_to_verify = [ + 'certs/', + 'crl/', + 'newcerts/', + 'private/', + 'private/cacert.key', + 'cacert.pem', + 'serial', + 'index.txt', + 'ca.cnf', + 'signing.cnf', + ] + + for path in paths_to_verify: + full_path = join(ca_root_dir, path) + self.assertTrue(exists(full_path), + 'Path {} does not exist'.format(full_path)) + + if path.endswith('/'): + self.assertTrue(isdir(full_path), + 'Path {} is not a dir'.format(full_path)) + + @mock.patch("charmhelpers.contrib.ssl.service.log") + def test_create_cert(self, *args): + """ + Tests that a generated certificate is valid against the ca. + """ + ca_root_dir = join(self.temp_dir, 'ca') + ca = service.ServiceCA('fake-name', ca_root_dir) + ca.init() + + ca.get_or_create_cert('fake-cert') + + # Verify that the cert belongs to the ca + self.assertTrue('fake-cert' in ca) + + full_cert_path = join(ca_root_dir, 'certs', 'fake-cert.crt') + cmd = ['openssl', 'verify', '-verbose', + '-CAfile', join(ca_root_dir, 'cacert.pem'), full_cert_path] + + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT).strip() + expected = '{}: OK'.format(full_cert_path) + if six.PY3: + expected = bytes(expected, 'utf-8') + self.assertEqual(expected, output) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/test_ssl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/test_ssl.py new file mode 100644 index 0000000000000000000000000000000000000000..b1fa461c5f2d24d2256427174657671f7a3f5162 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/ssl/test_ssl.py @@ -0,0 +1,56 @@ +from mock import patch +from testtools import TestCase + +from charmhelpers.contrib import ssl + + +class HelpersTest(TestCase): + @patch('subprocess.check_call') + def test_generate_selfsigned_dict(self, mock_call): + subject = {"country": "UK", + "locality": "my_locality", + "state": "my_state", + "organization": "my_organization", + "organizational_unit": "my_unit", + "cn": "mysite.example.com", + "email": "me@example.com" + } + + ssl.generate_selfsigned("mykey.key", "mycert.crt", subject=subject) + mock_call.assert_called_with(['/usr/bin/openssl', 'req', '-new', + '-newkey', 'rsa:1024', '-days', '365', + '-nodes', '-x509', '-keyout', + 'mykey.key', '-out', 'mycert.crt', + '-subj', + '/C=UK/ST=my_state/L=my_locality' + '/O=my_organization/OU=my_unit' + '/CN=mysite.example.com' + '/emailAddress=me@example.com'] + ) + + @patch('charmhelpers.core.hookenv.log') + def test_generate_selfsigned_failure(self, mock_log): + # This is NOT enough, functino requires cn key + subject = {"country": "UK", + "locality": "my_locality"} + + result = ssl.generate_selfsigned("mykey.key", "mycert.crt", subject=subject) + self.assertFalse(result) + + @patch('subprocess.check_call') + def test_generate_selfsigned_file(self, mock_call): + ssl.generate_selfsigned("mykey.key", "mycert.crt", config="test.cnf") + mock_call.assert_called_with(['/usr/bin/openssl', 'req', '-new', + '-newkey', 'rsa:1024', '-days', '365', + '-nodes', '-x509', '-keyout', + 'mykey.key', '-out', 'mycert.crt', + '-config', 'test.cnf']) + + @patch('subprocess.check_call') + def test_generate_selfsigned_cn_key(self, mock_call): + ssl.generate_selfsigned("mykey.key", "mycert.crt", keysize="2048", cn="mysite.example.com") + mock_call.assert_called_with(['/usr/bin/openssl', 'req', '-new', + '-newkey', 'rsa:2048', '-days', '365', + '-nodes', '-x509', '-keyout', + 'mykey.key', '-out', 'mycert.crt', + '-subj', '/CN=mysite.example.com']) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_bcache.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_bcache.py new file mode 100644 index 0000000000000000000000000000000000000000..847828d78bdc170c20fac5e23eb18d675aea492e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_bcache.py @@ -0,0 +1,90 @@ +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import json +from mock import patch +from testtools import TestCase +from tempfile import mkdtemp +from charmhelpers.contrib.storage.linux import bcache + +test_stats = { + 'bypassed': '128G\n', + 'cache_bypass_hits': '1132623\n', + 'cache_bypass_misses': '0\n', + 'cache_hit_ratio': '64\n', + 'cache_hits': '12177090\n', + 'cache_miss_collisions': '7091\n', + 'cache_misses': '6717011\n', + 'cache_readaheads': '0\n', +} + +tmpdir = 'bcache-stats-test.' +cacheset = 'abcde' +cachedev = 'sdfoo' + + +class BcacheTestCase(TestCase): + def setUp(self): + super(BcacheTestCase, self).setUp() + self.sysfs = sysfs = mkdtemp(prefix=tmpdir) + self.addCleanup(shutil.rmtree, sysfs) + p = patch('charmhelpers.contrib.storage.linux.bcache.SYSFS', new=sysfs) + p.start() + self.addCleanup(p.stop) + self.cacheset = '{}/fs/bcache/{}'.format(sysfs, cacheset) + os.makedirs(self.cacheset) + self.devcache = '{}/block/{}/bcache'.format(sysfs, cachedev) + for n in ['register', 'register_quiet']: + with open('{}/fs/bcache/{}'.format(sysfs, n), 'w') as f: + f.write('foo') + for kind in self.cacheset, self.devcache: + for sub in bcache.stats_intervals: + intvaldir = '{}/{}'.format(kind, sub) + os.makedirs(intvaldir) + for fn, val in test_stats.items(): + with open(os.path.join(intvaldir, fn), 'w') as f: + f.write(val) + + def test_get_bcache_fs(self): + bcachedirs = bcache.get_bcache_fs() + assert len(bcachedirs) == 1 + assert next(iter(bcachedirs)).cachepath.endswith('/fs/bcache/abcde') + + @patch('charmhelpers.contrib.storage.linux.bcache.log', lambda *args, **kwargs: None) + @patch('charmhelpers.contrib.storage.linux.bcache.os.listdir') + def test_get_bcache_fs_nobcache(self, mock_listdir): + mock_listdir.side_effect = OSError( + '[Errno 2] No such file or directory:...') + bcachedirs = bcache.get_bcache_fs() + assert bcachedirs == [] + + def test_get_stats_global(self): + out = bcache.get_stats_action( + 'global', 'hour') + out = json.loads(out) + assert len(out.keys()) == 1 + k = next(iter(out.keys())) + assert k.endswith(cacheset) + assert out[k]['bypassed'] == '128G' + + def test_get_stats_dev(self): + out = bcache.get_stats_action( + cachedev, 'hour') + out = json.loads(out) + assert len(out.keys()) == 1 + k = next(iter(out.keys())) + assert k.endswith('sdfoo/bcache') + assert out[k]['cache_hit_ratio'] == '64' diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_ceph.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_ceph.py new file mode 100644 index 0000000000000000000000000000000000000000..53c63a18f9b86316ba1d86b2634eeb6dd645ea2a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_ceph.py @@ -0,0 +1,1893 @@ +from mock import patch, call, mock_open + +import collections +import six +import errno +from shutil import rmtree +from tempfile import mkdtemp +from threading import Timer +from testtools import TestCase +import json +import copy +import shutil + +import charmhelpers.contrib.storage.linux.ceph as ceph_utils + +from charmhelpers.core.unitdata import Storage +from subprocess import CalledProcessError +from tests.helpers import patch_open, FakeRelation +import nose.plugins.attrib +import os +import time + +LS_POOLS = b""" +.rgw.foo +images +volumes +rbd +""" + +LS_RBDS = b""" +rbd1 +rbd2 +rbd3 +""" + +IMG_MAP = b""" +bar +baz +""" +# Vastly abbreviated output from ceph osd dump --format=json +OSD_DUMP = b""" +{ + "pools": [ + { + "pool": 2, + "pool_name": "rbd", + "flags": 1, + "flags_names": "hashpspool", + "type": 1, + "size": 3, + "min_size": 2, + "crush_ruleset": 0, + "object_hash": 2, + "pg_num": 64, + "pg_placement_num": 64, + "crash_replay_interval": 0, + "last_change": "1", + "last_force_op_resend": "0", + "auid": 0, + "snap_mode": "selfmanaged", + "snap_seq": 0, + "snap_epoch": 0, + "pool_snaps": [], + "removed_snaps": "[]", + "quota_max_bytes": 0, + "quota_max_objects": 0, + "tiers": [], + "tier_of": -1, + "read_tier": -1, + "write_tier": -1, + "cache_mode": "writeback", + "target_max_bytes": 0, + "target_max_objects": 0, + "cache_target_dirty_ratio_micro": 0, + "cache_target_full_ratio_micro": 0, + "cache_min_flush_age": 0, + "cache_min_evict_age": 0, + "erasure_code_profile": "", + "hit_set_params": { + "type": "none" + }, + "hit_set_period": 0, + "hit_set_count": 0, + "stripe_width": 0 + } + ] +} +""" + +MONMAP_DUMP = b"""{ + "name": "ip-172-31-13-119", "rank": 0, "state": "leader", + "election_epoch": 18, "quorum": [0, 1, 2], + "outside_quorum": [], + "extra_probe_peers": [], + "sync_provider": [], + "monmap": { + "epoch": 1, + "fsid": "9fdc313c-db30-11e5-9805-0242fda74275", + "modified": "0.000000", + "created": "0.000000", + "mons": [ + { + "rank": 0, + "name": "ip-172-31-13-119", + "addr": "172.31.13.119:6789\\/0"}, + { + "rank": 1, + "name": "ip-172-31-24-50", + "addr": "172.31.24.50:6789\\/0"}, + { + "rank": 2, + "name": "ip-172-31-33-107", + "addr": "172.31.33.107:6789\\/0"} + ]}}""" + +CEPH_CLIENT_RELATION = { + 'ceph:8': { + 'ceph/0': { + 'auth': 'cephx', + 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', + 'broker-rsp-glance-1': '{"request-id": "0880e22a", "exit-code": 0}', + 'broker-rsp-glance-2': '{"request-id": "0da543b8", "exit-code": 0}', + 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', + 'ceph-public-address': '10.5.44.103', + 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', + 'private-address': '10.5.44.103', + }, + 'ceph/1': { + 'auth': 'cephx', + 'ceph-public-address': '10.5.44.104', + 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', + 'private-address': '10.5.44.104', + }, + 'ceph/2': { + 'auth': 'cephx', + 'ceph-public-address': '10.5.44.105', + 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', + 'private-address': '10.5.44.105', + }, + 'glance/0': { + 'broker_req': '{"api-version": 1, "request-id": "0bc7dc54", "ops": [{"replicas": 3, "name": "glance", "op": "create-pool"}]}', + 'private-address': '10.5.44.109', + }, + } +} + +CEPH_CLIENT_RELATION_LEGACY = copy.deepcopy(CEPH_CLIENT_RELATION) +CEPH_CLIENT_RELATION_LEGACY['ceph:8']['ceph/0'] = { + 'auth': 'cephx', + 'broker_rsp': '{"exit-code": 0}', + 'ceph-public-address': '10.5.44.103', + 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', + 'private-address': '10.5.44.103', +} + + +class TestConfig(): + + def __init__(self): + self.config = {} + + def set(self, key, value): + self.config[key] = value + + def get(self, key): + return self.config.get(key) + + +class CephBasicUtilsTests(TestCase): + def setUp(self): + super(CephBasicUtilsTests, self).setUp() + [self._patch(m) for m in [ + 'check_output', + ]] + + def _patch(self, method): + _m = patch.object(ceph_utils, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + def test_enabled_manager_modules(self): + self.check_output.return_value = b'{"enabled_modules": []}' + ceph_utils.enabled_manager_modules() + self.check_output.assert_called_once_with(['ceph', 'mgr', 'module', 'ls']) + + +class CephUtilsTests(TestCase): + def setUp(self): + super(CephUtilsTests, self).setUp() + [self._patch(m) for m in [ + 'check_call', + 'check_output', + 'config', + 'relation_get', + 'related_units', + 'relation_ids', + 'relation_set', + 'log', + 'cmp_pkgrevno', + 'enabled_manager_modules', + ]] + # Ensure the config is setup for mocking properly. + self.test_config = TestConfig() + self.config.side_effect = self.test_config.get + self.cmp_pkgrevno.return_value = 1 + self.enabled_manager_modules.return_value = [] + + def _patch(self, method): + _m = patch.object(ceph_utils, method) + mock = _m.start() + self.addCleanup(_m.stop) + setattr(self, method, mock) + + def _get_osd_settings_test_helper(self, settings, expected=None): + units = { + 'client:1': ['ceph-iscsi/1', 'ceph-iscsi/2'], + 'client:3': ['cinder-ceph/0', 'cinder-ceph/3']} + self.relation_ids.return_value = units.keys() + self.related_units.side_effect = lambda x: units[x] + self.relation_get.side_effect = lambda x, y, z: settings[y] + if expected: + self.assertEqual( + ceph_utils.get_osd_settings('client'), + expected) + else: + ceph_utils.get_osd_settings('client'), + + def test_get_osd_settings_all_unset(self): + settings = { + 'ceph-iscsi/1': None, + 'ceph-iscsi/2': None, + 'cinder-ceph/0': None, + 'cinder-ceph/3': None} + self._get_osd_settings_test_helper(settings, {}) + + def test_get_osd_settings_one_group_set(self): + settings = { + 'ceph-iscsi/1': '{"osd heartbeat grace": 5}', + 'ceph-iscsi/2': '{"osd heartbeat grace": 5}', + 'cinder-ceph/0': '{"osd heartbeat interval": 25}', + 'cinder-ceph/3': '{"osd heartbeat interval": 25}'} + self._get_osd_settings_test_helper( + settings, + {'osd heartbeat interval': 25, + 'osd heartbeat grace': 5}) + + def test_get_osd_settings_invalid_option(self): + settings = { + 'ceph-iscsi/1': '{"osd foobar": 5}', + 'ceph-iscsi/2': None, + 'cinder-ceph/0': None, + 'cinder-ceph/3': None} + self.assertRaises( + ceph_utils.OSDSettingNotAllowed, + self._get_osd_settings_test_helper, + settings) + + def test_get_osd_settings_conflicting_options(self): + settings = { + 'ceph-iscsi/1': '{"osd heartbeat grace": 5}', + 'ceph-iscsi/2': None, + 'cinder-ceph/0': '{"osd heartbeat grace": 6}', + 'cinder-ceph/3': None} + self.assertRaises( + ceph_utils.OSDSettingConflict, + self._get_osd_settings_test_helper, + settings) + + @patch.object(ceph_utils, 'get_osd_settings') + def test_send_osd_settings(self, _get_osd_settings): + self.relation_ids.return_value = ['client:1', 'client:3'] + _get_osd_settings.return_value = { + 'osd heartbeat grace': 5, + 'osd heartbeat interval': 25} + ceph_utils.send_osd_settings() + expected_calls = [ + call( + relation_id='client:1', + relation_settings={ + 'osd-settings': ('{"osd heartbeat grace": 5, ' + '"osd heartbeat interval": 25}')}), + call( + relation_id='client:3', + relation_settings={ + 'osd-settings': ('{"osd heartbeat grace": 5, ' + '"osd heartbeat interval": 25}')})] + self.relation_set.assert_has_calls(expected_calls, any_order=True) + + @patch.object(ceph_utils, 'get_osd_settings') + def test_send_osd_settings_bad_settings(self, _get_osd_settings): + _get_osd_settings.side_effect = ceph_utils.OSDSettingConflict() + ceph_utils.send_osd_settings() + self.assertFalse(self.relation_set.called) + + def test_validator_valid(self): + # 1 is an int + ceph_utils.validator(value=1, + valid_type=int) + + def test_validator_valid_range(self): + # 1 is an int between 0 and 2 + ceph_utils.validator(value=1, + valid_type=int, + valid_range=[0, 2]) + + def test_validator_invalid_range(self): + # 1 is an int that isn't in the valid list of only 0 + self.assertRaises(ValueError, ceph_utils.validator, + value=1, + valid_type=int, + valid_range=[0]) + + def test_validator_invalid_string_list(self): + # foo is a six.string_types that isn't in the valid string list + self.assertRaises(AssertionError, ceph_utils.validator, + value="foo", + valid_type=six.string_types, + valid_range=["valid", "list", "of", "strings"]) + + def test_validator_valid_string(self): + ceph_utils.validator(value="foo", + valid_type=six.string_types, + valid_range=["foo"]) + + def test_validator_valid_string_type(self): + ceph_utils.validator(value="foo", + valid_type=str, + valid_range=["foo"]) + + def test_pool_add_cache_tier(self): + p = ceph_utils.Pool(name='test', service='admin') + p.add_cache_tier('cacher', 'readonly') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'tier', 'add', 'test', 'cacher']), + call(['ceph', '--id', 'admin', 'osd', 'tier', 'cache-mode', 'cacher', 'readonly']), + call(['ceph', '--id', 'admin', 'osd', 'tier', 'set-overlay', 'test', 'cacher']), + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set', 'cacher', 'hit_set_type', 'bloom']), + ]) + + @patch.object(ceph_utils, 'get_cache_mode') + def test_pool_remove_readonly_cache_tier(self, cache_mode): + cache_mode.return_value = 'readonly' + + p = ceph_utils.Pool(name='test', service='admin') + p.remove_cache_tier(cache_pool='cacher') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'tier', 'cache-mode', 'cacher', 'none']), + call(['ceph', '--id', 'admin', 'osd', 'tier', 'remove', 'test', 'cacher']), + ]) + + @patch.object(ceph_utils, 'get_cache_mode') + def test_pool_remove_writeback_cache_tier(self, cache_mode): + cache_mode.return_value = 'writeback' + self.cmp_pkgrevno.return_value = 1 + + p = ceph_utils.Pool(name='test', service='admin') + p.remove_cache_tier(cache_pool='cacher') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'tier', 'cache-mode', 'cacher', 'forward', + '--yes-i-really-mean-it']), + call(['rados', '--id', 'admin', '-p', 'cacher', 'cache-flush-evict-all']), + call(['ceph', '--id', 'admin', 'osd', 'tier', 'remove-overlay', 'test']), + call(['ceph', '--id', 'admin', 'osd', 'tier', 'remove', 'test', 'cacher']), + ]) + + @patch.object(ceph_utils, 'get_osds') + def test_get_pg_num_pg_calc_values(self, get_osds): + """Tests the calculated pg num in the normal case works""" + # Check the growth case ... e.g. 200 PGs per OSD if the cluster is + # expected to grown in the near future. + get_osds.return_value = range(1, 11) + self.test_config.set('pgs-per-osd', 200) + p = ceph_utils.Pool(name='test', service='admin') + + # For Pool Size of 3, 200 PGs/OSD, and 40% of the overall data, + # the pg num should be 256 + pg_num = p.get_pgs(pool_size=3, percent_data=40) + self.assertEqual(256, pg_num) + + self.test_config.set('pgs-per-osd', 300) + pg_num = p.get_pgs(pool_size=3, percent_data=100) + self.assertEquals(1024, pg_num) + + # Tests the case in which the expected OSD count is provided (and is + # greater than the found OSD count). + self.test_config.set('pgs-per-osd', 100) + self.test_config.set('expected-osd-count', 20) + pg_num = p.get_pgs(pool_size=3, percent_data=100) + self.assertEquals(512, pg_num) + + # Test small % weight with minimal OSD count (3) + get_osds.return_value = range(1, 3) + self.test_config.set('expected-osd-count', None) + self.test_config.set('pgs-per-osd', None) + pg_num = p.get_pgs(pool_size=3, percent_data=0.1) + self.assertEquals(2, pg_num) + + # Check device_class is passed to get_osds + p.get_pgs(pool_size=3, percent_data=90, device_class='nvme') + get_osds.assert_called_with('admin', 'nvme') + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_old_ceph(self, get_osds): + self.cmp_pkgrevno.return_value = -1 + get_osds.return_value = None + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3) + p.create() + + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'create', 'test', str(200)]), + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set', 'test', 'size', str(3)]), + ]) + self.assertEqual(self.check_call.call_count, 2) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_luminous_ceph(self, get_osds): + self.cmp_pkgrevno.side_effect = [-1, 1] + get_osds.return_value = None + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3) + p.create() + + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', + 'pool', 'create', 'test', str(200)]), + call(['ceph', '--id', 'admin', 'osd', + 'pool', 'set', 'test', 'size', str(3)]), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'application', 'enable', 'test', 'unknown']) + ]) + self.assertEqual(self.check_call.call_count, 3) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_small_osds(self, get_osds): + get_osds.return_value = range(1, 5) + self.cmp_pkgrevno.return_value = -1 + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3, + percent_data=10) + p.create() + + # Using the PG Calc, for 4 OSDs with a size of 3 and 10% of the data + # at 100 PGs/OSD, the number of expected placement groups will be 16 + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'create', 'test', + '16']), + ]) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_medium_osds(self, get_osds): + self.cmp_pkgrevno.return_value = -1 + get_osds.return_value = range(1, 9) + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3, + percent_data=50) + p.create() + + # Using the PG Calc, for 8 OSDs with a size of 3 and 50% of the data + # at 100 PGs/OSD, the number of expected placement groups will be 128 + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'create', 'test', + '128']), + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set', 'test', 'size', '3']), + ]) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_autoscaler(self, get_osds): + self.enabled_manager_modules.return_value = ['pg_autoscaler'] + self.cmp_pkgrevno.return_value = 1 + get_osds.return_value = range(1, 9) + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3, + percent_data=50) + p.create() + # Using the PG Calc, for 8 OSDs with a size of 3 and 50% of the data + + # at 100 PGs/OSD, the number of expected placement groups will be 128 + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'create', '--pg-num-min=32', 'test', '128']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'size', '3']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'target_size_ratio', '0.5']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'application', 'enable', 'test', 'unknown']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'pg_autoscale_mode', 'on']) + ]) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_autoscaler_small(self, get_osds): + self.enabled_manager_modules.return_value = ['pg_autoscaler'] + self.cmp_pkgrevno.return_value = 1 + get_osds.return_value = range(1, 3) + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3, + percent_data=1) + p.create() + # Using the PG Calc, for 8 OSDs with a size of 3 and 50% of the data + + # at 100 PGs/OSD, the number of expected placement groups will be 128 + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'create', '--pg-num-min=2', 'test', '2']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'size', '3']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'target_size_ratio', '0.01']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'application', 'enable', 'test', 'unknown']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'pg_autoscale_mode', 'on']) + ]) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_large_osds(self, get_osds): + get_osds.return_value = range(1, 41) + self.cmp_pkgrevno.return_value = -1 + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3, + percent_data=100) + p.create() + + # Using the PG Calc, for 40 OSDs with a size of 3 and 100% of the + # data at 100 PGs/OSD then the number of expected placement groups + # will be 1024. + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'create', 'test', + '1024']), + ]) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_xlarge_osds(self, get_osds): + get_osds.return_value = range(1, 1001) + self.cmp_pkgrevno.return_value = -1 + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3, + percent_data=100) + p.create() + + # Using the PG Calc, for 1,000 OSDs with a size of 3 and 100% of the + # data at 100 PGs/OSD then the number of expected placement groups + # will be 32768 + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'create', 'test', + '32768']), + ]) + + @patch.object(ceph_utils, 'get_osds') + def test_replicated_pool_create_failed(self, get_osds): + get_osds.return_value = range(1, 1001) + self.check_call.side_effect = CalledProcessError(returncode=1, + cmd='mock', + output=None) + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3) + self.assertRaises(CalledProcessError, p.create) + + @patch.object(ceph_utils, 'get_osds') + @patch.object(ceph_utils, 'pool_exists') + def test_replicated_pool_skips_creation(self, pool_exists, get_osds): + get_osds.return_value = range(1, 1001) + pool_exists.return_value = True + p = ceph_utils.ReplicatedPool(name='test', service='admin', replicas=3) + p.create() + self.check_call.assert_has_calls([]) + + def test_erasure_pool_create_failed(self): + self.check_output.side_effect = CalledProcessError(returncode=1, + cmd='ceph', + output=None) + p = ceph_utils.ErasurePool('test', 'admin', 'foo') + self.assertRaises(ceph_utils.PoolCreationError, p.create) + + @patch.object(ceph_utils, 'get_erasure_profile') + @patch.object(ceph_utils, 'get_osds') + def test_erasure_pool_create(self, get_osds, erasure_profile): + self.cmp_pkgrevno.return_value = 1 + get_osds.return_value = range(1, 60) + erasure_profile.return_value = { + 'directory': '/usr/lib/x86_64-linux-gnu/ceph/erasure-code', + 'k': '2', + 'technique': 'reed_sol_van', + 'm': '1', + 'plugin': 'jerasure'} + p = ceph_utils.ErasurePool(name='test', service='admin', + percent_data=100) + p.create() + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'create', + '--pg-num-min=32', 'test', + '2048', '2048', 'erasure', 'default']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'application', 'enable', 'test', 'unknown']) + ]) + + @patch.object(ceph_utils, 'get_erasure_profile') + @patch.object(ceph_utils, 'get_osds') + def test_erasure_pool_create_autoscaler(self, + get_osds, + erasure_profile): + self.enabled_manager_modules.return_value = ['pg_autoscaler'] + self.cmp_pkgrevno.return_value = 1 + get_osds.return_value = range(1, 60) + erasure_profile.return_value = { + 'directory': '/usr/lib/x86_64-linux-gnu/ceph/erasure-code', + 'k': '2', + 'technique': 'reed_sol_van', + 'm': '1', + 'plugin': 'jerasure'} + p = ceph_utils.ErasurePool(name='test', service='admin', + percent_data=100) + p.create() + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'create', '--pg-num-min=32', 'test', + '2048', '2048', 'erasure', 'default']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'application', 'enable', 'test', 'unknown']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'target_size_ratio', '1.0']), + call(['ceph', '--id', 'admin', 'osd', 'pool', + 'set', 'test', 'pg_autoscale_mode', 'on']), + ]) + + def test_get_erasure_profile_none(self): + self.check_output.side_effect = CalledProcessError(1, 'ceph') + return_value = ceph_utils.get_erasure_profile('admin', 'unknown') + self.assertEqual(None, return_value) + + def test_pool_set_int(self): + self.check_call.return_value = 0 + ceph_utils.pool_set(service='admin', pool_name='data', key='test', value=2) + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set', 'data', 'test', '2']) + ]) + + def test_pool_set_bool(self): + self.check_call.return_value = 0 + ceph_utils.pool_set(service='admin', pool_name='data', key='test', value=True) + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set', 'data', 'test', 'true']) + ]) + + def test_pool_set_str(self): + self.check_call.return_value = 0 + ceph_utils.pool_set(service='admin', pool_name='data', key='test', value='two') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set', 'data', 'test', 'two']) + ]) + + def test_pool_set_fails(self): + self.check_call.side_effect = CalledProcessError(returncode=1, cmd='mock', + output=None) + self.assertRaises(CalledProcessError, ceph_utils.pool_set, + service='admin', pool_name='data', key='test', value=2) + + def test_snapshot_pool(self): + self.check_call.return_value = 0 + ceph_utils.snapshot_pool(service='admin', pool_name='data', snapshot_name='test-snap-1') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'mksnap', 'data', 'test-snap-1']) + ]) + + def test_snapshot_pool_fails(self): + self.check_call.side_effect = CalledProcessError(returncode=1, cmd='mock', + output=None) + self.assertRaises(CalledProcessError, ceph_utils.snapshot_pool, + service='admin', pool_name='data', snapshot_name='test-snap-1') + + def test_remove_pool_snapshot(self): + self.check_call.return_value = 0 + ceph_utils.remove_pool_snapshot(service='admin', pool_name='data', snapshot_name='test-snap-1') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'rmsnap', 'data', 'test-snap-1']) + ]) + + def test_set_pool_quota(self): + self.check_call.return_value = 0 + ceph_utils.set_pool_quota(service='admin', pool_name='data', + max_bytes=1024) + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set-quota', 'data', + 'max_bytes', '1024']) + ]) + ceph_utils.set_pool_quota(service='admin', pool_name='data', + max_objects=1024) + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set-quota', 'data', + 'max_objects', '1024']) + ]) + ceph_utils.set_pool_quota(service='admin', pool_name='data', + max_bytes=1024, max_objects=1024) + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set-quota', 'data', + 'max_bytes', '1024', 'max_objects', '1024']) + ]) + + def test_remove_pool_quota(self): + self.check_call.return_value = 0 + ceph_utils.remove_pool_quota(service='admin', pool_name='data') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'admin', 'osd', 'pool', 'set-quota', 'data', 'max_bytes', '0']) + ]) + + @patch.object(ceph_utils, 'erasure_profile_exists') + def test_create_erasure_profile(self, existing_profile): + existing_profile.return_value = True + self.cmp_pkgrevno.return_value = -1 + ceph_utils.create_erasure_profile(service='admin', profile_name='super-profile', erasure_plugin_name='jerasure', + failure_domain='rack', data_chunks=10, coding_chunks=3) + + cmd = ['ceph', '--id', 'admin', 'osd', 'erasure-code-profile', 'set', 'super-profile', + 'plugin=' + 'jerasure', 'k=' + str(10), 'm=' + str(3), + 'ruleset-failure-domain=' + 'rack', '--force'] + self.check_call.assert_has_calls([call(cmd)]) + + self.cmp_pkgrevno.return_value = 1 + ceph_utils.create_erasure_profile(service='admin', profile_name='super-profile', erasure_plugin_name='jerasure', + failure_domain='rack', data_chunks=10, coding_chunks=3) + + cmd = ['ceph', '--id', 'admin', 'osd', 'erasure-code-profile', 'set', 'super-profile', + 'plugin=' + 'jerasure', 'k=' + str(10), 'm=' + str(3), + 'crush-failure-domain=' + 'rack', '--force'] + self.check_call.assert_has_calls([call(cmd)]) + + @patch.object(ceph_utils, 'erasure_profile_exists') + def test_create_erasure_profile_local(self, existing_profile): + self.cmp_pkgrevno.return_value = -1 + existing_profile.return_value = False + ceph_utils.create_erasure_profile(service='admin', profile_name='super-profile', erasure_plugin_name='local', + failure_domain='rack', data_chunks=10, coding_chunks=3, locality=1) + + cmd = ['ceph', '--id', 'admin', 'osd', 'erasure-code-profile', 'set', 'super-profile', + 'plugin=' + 'local', 'k=' + str(10), 'm=' + str(3), + 'ruleset-failure-domain=' + 'rack', 'l=' + str(1)] + self.check_call.assert_has_calls([call(cmd)]) + + @patch.object(ceph_utils, 'erasure_profile_exists') + def test_create_erasure_profile_shec(self, existing_profile): + self.cmp_pkgrevno.return_value = -1 + existing_profile.return_value = False + ceph_utils.create_erasure_profile(service='admin', profile_name='super-profile', erasure_plugin_name='shec', + failure_domain='rack', data_chunks=10, coding_chunks=3, + durability_estimator=1) + + cmd = ['ceph', '--id', 'admin', 'osd', 'erasure-code-profile', 'set', 'super-profile', + 'plugin=' + 'shec', 'k=' + str(10), 'm=' + str(3), + 'ruleset-failure-domain=' + 'rack', 'c=' + str(1)] + self.check_call.assert_has_calls([call(cmd)]) + + def test_rename_pool(self): + ceph_utils.rename_pool(service='admin', old_name='old-pool', new_name='new-pool') + cmd = ['ceph', '--id', 'admin', 'osd', 'pool', 'rename', 'old-pool', 'new-pool'] + self.check_call.assert_called_with(cmd) + + def test_erasure_profile_exists(self): + self.check_call.return_value = 0 + profile_exists = ceph_utils.erasure_profile_exists(service='admin', name='super-profile') + cmd = ['ceph', '--id', 'admin', + 'osd', 'erasure-code-profile', 'get', + 'super-profile'] + self.check_call.assert_called_with(cmd) + self.assertEqual(True, profile_exists) + + def test_set_monitor_key(self): + cmd = ['ceph', '--id', 'admin', + 'config-key', 'put', 'foo', 'bar'] + ceph_utils.monitor_key_set(service='admin', key='foo', value='bar') + self.check_output.assert_called_with(cmd) + + def test_get_monitor_key(self): + cmd = ['ceph', '--id', 'admin', + 'config-key', 'get', 'foo'] + ceph_utils.monitor_key_get(service='admin', key='foo') + self.check_output.assert_called_with(cmd) + + def test_get_monitor_key_failed(self): + self.check_output.side_effect = CalledProcessError( + returncode=2, + cmd='ceph', + output='key foo does not exist') + output = ceph_utils.monitor_key_get(service='admin', key='foo') + self.assertEqual(None, output) + + def test_monitor_key_exists(self): + cmd = ['ceph', '--id', 'admin', + 'config-key', 'exists', 'foo'] + ceph_utils.monitor_key_exists(service='admin', key='foo') + self.check_call.assert_called_with(cmd) + + def test_monitor_key_doesnt_exist(self): + self.check_call.side_effect = CalledProcessError( + returncode=2, + cmd='ceph', + output='key foo does not exist') + output = ceph_utils.monitor_key_exists(service='admin', key='foo') + self.assertEqual(False, output) + + def test_delete_monitor_key(self): + ceph_utils.monitor_key_delete(service='admin', key='foo') + cmd = ['ceph', '--id', 'admin', + 'config-key', 'del', 'foo'] + self.check_output.assert_called_with(cmd) + + def test_delete_monitor_key_failed(self): + self.check_output.side_effect = CalledProcessError( + returncode=2, + cmd='ceph', + output='deletion failed') + self.assertRaises(CalledProcessError, ceph_utils.monitor_key_delete, + service='admin', key='foo') + + def test_get_monmap(self): + self.check_output.return_value = MONMAP_DUMP + cmd = ['ceph', '--id', 'admin', + 'mon_status', '--format=json'] + ceph_utils.get_mon_map(service='admin') + self.check_output.assert_called_with(cmd) + + @patch.object(ceph_utils, 'get_mon_map') + def test_hash_monitor_names(self, monmap): + expected_hash_list = [ + '010d57d581604d411b315dd64112bff832ab92c7323fa06077134b50', + '8e0a9705c1aeafa1ce250cc9f1bb443fc6e5150e5edcbeb6eeb82e3c', + 'c3f8d36ba098c23ee920cb08cfb9beda6b639f8433637c190bdd56ec'] + _monmap_dump = MONMAP_DUMP + if six.PY3: + _monmap_dump = _monmap_dump.decode('UTF-8') + monmap.return_value = json.loads(_monmap_dump) + hashed_mon_list = ceph_utils.hash_monitor_names(service='admin') + self.assertEqual(expected=expected_hash_list, observed=hashed_mon_list) + + def test_get_cache_mode(self): + self.check_output.return_value = OSD_DUMP + cache_mode = ceph_utils.get_cache_mode(service='admin', pool_name='rbd') + self.assertEqual("writeback", cache_mode) + + @patch('os.path.exists') + def test_add_key(self, _exists): + """It creates a new ceph keyring""" + _exists.return_value = False + ceph_utils.add_key('cinder', 'cephkey') + _cmd = ['ceph-authtool', '/etc/ceph/ceph.client.cinder.keyring', + '--create-keyring', '--name=client.cinder', + '--add-key=cephkey'] + self.check_call.assert_called_with(_cmd) + + @patch('os.path.exists') + def test_add_key_already_exists(self, _exists): + """It should insert the key into the existing keyring""" + _exists.return_value = True + try: + with patch("__builtin__.open", mock_open(read_data="foo")): + ceph_utils.add_key('cinder', 'cephkey') + except ImportError: # Python3 + with patch("builtins.open", mock_open(read_data="foo")): + ceph_utils.add_key('cinder', 'cephkey') + self.assertTrue(self.log.called) + _cmd = ['ceph-authtool', '/etc/ceph/ceph.client.cinder.keyring', + '--create-keyring', '--name=client.cinder', + '--add-key=cephkey'] + self.check_call.assert_called_with(_cmd) + + @patch('os.path.exists') + def test_add_key_already_exists_and_key_exists(self, _exists): + """Nothing should happen, apart from a log message""" + _exists.return_value = True + try: + with patch("__builtin__.open", mock_open(read_data="cephkey")): + ceph_utils.add_key('cinder', 'cephkey') + except ImportError: # Python3 + with patch("builtins.open", mock_open(read_data="cephkey")): + ceph_utils.add_key('cinder', 'cephkey') + self.assertTrue(self.log.called) + self.check_call.assert_not_called() + + @patch('os.remove') + @patch('os.path.exists') + def test_delete_keyring(self, _exists, _remove): + """It deletes a ceph keyring.""" + _exists.return_value = True + ceph_utils.delete_keyring('cinder') + _remove.assert_called_with('/etc/ceph/ceph.client.cinder.keyring') + self.assertTrue(self.log.called) + + @patch('os.remove') + @patch('os.path.exists') + def test_delete_keyring_not_exists(self, _exists, _remove): + """It creates a new ceph keyring.""" + _exists.return_value = False + ceph_utils.delete_keyring('cinder') + self.assertTrue(self.log.called) + _remove.assert_not_called() + + @patch('os.path.exists') + def test_create_keyfile(self, _exists): + """It creates a new ceph keyfile""" + _exists.return_value = False + with patch_open() as (_open, _file): + ceph_utils.create_key_file('cinder', 'cephkey') + _file.write.assert_called_with('cephkey') + self.assertTrue(self.log.called) + + @patch('os.path.exists') + def test_create_key_file_already_exists(self, _exists): + """It creates a new ceph keyring""" + _exists.return_value = True + ceph_utils.create_key_file('cinder', 'cephkey') + self.assertTrue(self.log.called) + + @patch('os.mkdir') + @patch.object(ceph_utils, 'apt_install') + @patch('os.path.exists') + def test_install(self, _exists, _install, _mkdir): + _exists.return_value = False + ceph_utils.install() + _mkdir.assert_called_with('/etc/ceph') + _install.assert_called_with('ceph-common', fatal=True) + + def test_get_osds(self): + self.check_output.return_value = json.dumps([1, 2, 3]).encode('UTF-8') + self.assertEquals(ceph_utils.get_osds('test'), [1, 2, 3]) + + def test_get_osds_none(self): + self.check_output.return_value = json.dumps(None).encode('UTF-8') + self.assertEquals(ceph_utils.get_osds('test'), None) + + def test_get_osds_device_class(self): + self.check_output.return_value = json.dumps([1, 2, 3]).encode('UTF-8') + self.assertEquals(ceph_utils.get_osds('test', 'nvme'), [1, 2, 3]) + self.check_output.assert_called_once_with( + ['ceph', '--id', 'test', + 'osd', 'crush', 'class', + 'ls-osd', 'nvme', '--format=json'] + ) + + def test_get_osds_device_class_older(self): + self.check_output.return_value = json.dumps([1, 2, 3]).encode('UTF-8') + self.cmp_pkgrevno.return_value = -1 + self.assertEquals(ceph_utils.get_osds('test', 'nvme'), [1, 2, 3]) + self.check_output.assert_called_once_with( + ['ceph', '--id', 'test', 'osd', 'ls', '--format=json'] + ) + + @patch.object(ceph_utils, 'get_osds') + @patch.object(ceph_utils, 'pool_exists') + def test_create_pool(self, _exists, _get_osds): + """It creates rados pool correctly with default replicas """ + _exists.return_value = False + _get_osds.return_value = [1, 2, 3] + ceph_utils.create_pool(service='cinder', name='foo') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'cinder', 'osd', 'pool', + 'create', 'foo', '100']), + call(['ceph', '--id', 'cinder', 'osd', 'pool', 'set', + 'foo', 'size', '3']) + ]) + + @patch.object(ceph_utils, 'get_osds') + @patch.object(ceph_utils, 'pool_exists') + def test_create_pool_2_replicas(self, _exists, _get_osds): + """It creates rados pool correctly with 3 replicas""" + _exists.return_value = False + _get_osds.return_value = [1, 2, 3] + ceph_utils.create_pool(service='cinder', name='foo', replicas=2) + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'cinder', 'osd', 'pool', + 'create', 'foo', '150']), + call(['ceph', '--id', 'cinder', 'osd', 'pool', 'set', + 'foo', 'size', '2']) + ]) + + @patch.object(ceph_utils, 'get_osds') + @patch.object(ceph_utils, 'pool_exists') + def test_create_pool_argonaut(self, _exists, _get_osds): + """It creates rados pool correctly with 3 replicas""" + _exists.return_value = False + _get_osds.return_value = None + ceph_utils.create_pool(service='cinder', name='foo') + self.check_call.assert_has_calls([ + call(['ceph', '--id', 'cinder', 'osd', 'pool', + 'create', 'foo', '200']), + call(['ceph', '--id', 'cinder', 'osd', 'pool', 'set', + 'foo', 'size', '3']) + ]) + + def test_create_pool_already_exists(self): + self._patch('pool_exists') + self.pool_exists.return_value = True + ceph_utils.create_pool(service='cinder', name='foo') + self.assertTrue(self.log.called) + self.check_call.assert_not_called() + + def test_keyring_path(self): + """It correctly dervies keyring path from service name""" + result = ceph_utils._keyring_path('cinder') + self.assertEquals('/etc/ceph/ceph.client.cinder.keyring', result) + + def test_keyfile_path(self): + """It correctly dervies keyring path from service name""" + result = ceph_utils._keyfile_path('cinder') + self.assertEquals('/etc/ceph/ceph.client.cinder.key', result) + + def test_pool_exists(self): + """It detects an rbd pool exists""" + self.check_output.return_value = LS_POOLS + self.assertTrue(ceph_utils.pool_exists('cinder', 'volumes')) + self.assertTrue(ceph_utils.pool_exists('rgw', '.rgw.foo')) + + def test_pool_does_not_exist(self): + """It detects an rbd pool exists""" + self.check_output.return_value = LS_POOLS + self.assertFalse(ceph_utils.pool_exists('cinder', 'foo')) + self.assertFalse(ceph_utils.pool_exists('rgw', '.rgw')) + + def test_pool_exists_error(self): + """ Ensure subprocess errors and sandboxed with False """ + self.check_output.side_effect = CalledProcessError(1, 'rados') + self.assertFalse(ceph_utils.pool_exists('cinder', 'foo')) + + def test_rbd_exists(self): + self.check_output.return_value = LS_RBDS + self.assertTrue(ceph_utils.rbd_exists('service', 'pool', 'rbd1')) + self.check_output.assert_called_with( + ['rbd', 'list', '--id', 'service', '--pool', 'pool'] + ) + + def test_rbd_does_not_exist(self): + self.check_output.return_value = LS_RBDS + self.assertFalse(ceph_utils.rbd_exists('service', 'pool', 'rbd4')) + self.check_output.assert_called_with( + ['rbd', 'list', '--id', 'service', '--pool', 'pool'] + ) + + def test_rbd_exists_error(self): + """ Ensure subprocess errors and sandboxed with False """ + self.check_output.side_effect = CalledProcessError(1, 'rbd') + self.assertFalse(ceph_utils.rbd_exists('cinder', 'foo', 'rbd')) + + def test_create_rbd_image(self): + ceph_utils.create_rbd_image('service', 'pool', 'image', 128) + _cmd = ['rbd', 'create', 'image', + '--size', '128', + '--id', 'service', + '--pool', 'pool'] + self.check_call.assert_called_with(_cmd) + + def test_delete_pool(self): + ceph_utils.delete_pool('cinder', 'pool') + _cmd = [ + 'ceph', '--id', 'cinder', + 'osd', 'pool', 'delete', + 'pool', '--yes-i-really-really-mean-it' + ] + self.check_call.assert_called_with(_cmd) + + def test_get_ceph_nodes(self): + self._patch('relation_ids') + self._patch('related_units') + self._patch('relation_get') + units = ['ceph/1', 'ceph2', 'ceph/3'] + self.relation_ids.return_value = ['ceph:0'] + self.related_units.return_value = units + self.relation_get.return_value = '192.168.1.1' + self.assertEquals(len(ceph_utils.get_ceph_nodes()), 3) + + def test_get_ceph_nodes_not_related(self): + self._patch('relation_ids') + self.relation_ids.return_value = [] + self.assertEquals(ceph_utils.get_ceph_nodes(), []) + + def test_configure(self): + self._patch('add_key') + self._patch('create_key_file') + self._patch('get_ceph_nodes') + self._patch('modprobe') + _hosts = ['192.168.1.1', '192.168.1.2'] + self.get_ceph_nodes.return_value = _hosts + _conf = ceph_utils.CEPH_CONF.format( + auth='cephx', + keyring=ceph_utils._keyring_path('cinder'), + mon_hosts=",".join(map(str, _hosts)), + use_syslog='true' + ) + with patch_open() as (_open, _file): + ceph_utils.configure('cinder', 'key', 'cephx', 'true') + _file.write.assert_called_with(_conf) + _open.assert_called_with('/etc/ceph/ceph.conf', 'w') + self.modprobe.assert_called_with('rbd') + self.add_key.assert_called_with('cinder', 'key') + self.create_key_file.assert_called_with('cinder', 'key') + + def test_image_mapped(self): + self.check_output.return_value = IMG_MAP + self.assertTrue(ceph_utils.image_mapped('bar')) + + def test_image_not_mapped(self): + self.check_output.return_value = IMG_MAP + self.assertFalse(ceph_utils.image_mapped('foo')) + + def test_image_not_mapped_error(self): + self.check_output.side_effect = CalledProcessError(1, 'rbd') + self.assertFalse(ceph_utils.image_mapped('bar')) + + def test_map_block_storage(self): + _service = 'cinder' + _pool = 'bar' + _img = 'foo' + _cmd = [ + 'rbd', + 'map', + '{}/{}'.format(_pool, _img), + '--user', + _service, + '--secret', + ceph_utils._keyfile_path(_service), + ] + ceph_utils.map_block_storage(_service, _pool, _img) + self.check_call.assert_called_with(_cmd) + + def test_filesystem_mounted(self): + self._patch('mounts') + self.mounts.return_value = [['/afs', '/dev/sdb'], ['/bfs', '/dev/sdd']] + self.assertTrue(ceph_utils.filesystem_mounted('/afs')) + self.assertFalse(ceph_utils.filesystem_mounted('/zfs')) + + @patch('os.path.exists') + def test_make_filesystem(self, _exists): + _exists.return_value = True + ceph_utils.make_filesystem('/dev/sdd') + self.assertTrue(self.log.called) + self.check_call.assert_called_with(['mkfs', '-t', 'ext4', '/dev/sdd']) + + @patch('os.path.exists') + def test_make_filesystem_xfs(self, _exists): + _exists.return_value = True + ceph_utils.make_filesystem('/dev/sdd', 'xfs') + self.assertTrue(self.log.called) + self.check_call.assert_called_with(['mkfs', '-t', 'xfs', '/dev/sdd']) + + @patch('os.chown') + @patch('os.stat') + def test_place_data_on_block_device(self, _stat, _chown): + self._patch('mount') + self._patch('copy_files') + self._patch('umount') + _stat.return_value.st_uid = 100 + _stat.return_value.st_gid = 100 + ceph_utils.place_data_on_block_device('/dev/sdd', '/var/lib/mysql') + self.mount.assert_has_calls([ + call('/dev/sdd', '/mnt'), + call('/dev/sdd', '/var/lib/mysql', persist=True) + ]) + self.copy_files.assert_called_with('/var/lib/mysql', '/mnt') + self.umount.assert_called_with('/mnt') + _chown.assert_called_with('/var/lib/mysql', 100, 100) + + @patch('shutil.copytree') + @patch('os.listdir') + @patch('os.path.isdir') + def test_copy_files_is_dir(self, _isdir, _listdir, _copytree): + _isdir.return_value = True + subdirs = ['a', 'b', 'c'] + _listdir.return_value = subdirs + ceph_utils.copy_files('/source', '/dest') + for d in subdirs: + _copytree.assert_has_calls([ + call('/source/{}'.format(d), '/dest/{}'.format(d), + False, None) + ]) + + @patch('shutil.copytree') + @patch('os.listdir') + @patch('os.path.isdir') + def test_copy_files_include_symlinks(self, _isdir, _listdir, _copytree): + _isdir.return_value = True + subdirs = ['a', 'b', 'c'] + _listdir.return_value = subdirs + ceph_utils.copy_files('/source', '/dest', True) + for d in subdirs: + _copytree.assert_has_calls([ + call('/source/{}'.format(d), '/dest/{}'.format(d), + True, None) + ]) + + @patch('shutil.copytree') + @patch('os.listdir') + @patch('os.path.isdir') + def test_copy_files_ignore(self, _isdir, _listdir, _copytree): + _isdir.return_value = True + subdirs = ['a', 'b', 'c'] + _listdir.return_value = subdirs + ceph_utils.copy_files('/source', '/dest', True, False) + for d in subdirs: + _copytree.assert_has_calls([ + call('/source/{}'.format(d), '/dest/{}'.format(d), + True, False) + ]) + + @patch('shutil.copy2') + @patch('os.listdir') + @patch('os.path.isdir') + def test_copy_files_files(self, _isdir, _listdir, _copy2): + _isdir.return_value = False + files = ['a', 'b', 'c'] + _listdir.return_value = files + ceph_utils.copy_files('/source', '/dest') + for f in files: + _copy2.assert_has_calls([ + call('/source/{}'.format(f), '/dest/{}'.format(f)) + ]) + + def test_ensure_ceph_storage(self): + self._patch('pool_exists') + self.pool_exists.return_value = False + self._patch('create_pool') + self._patch('rbd_exists') + self.rbd_exists.return_value = False + self._patch('create_rbd_image') + self._patch('image_mapped') + self.image_mapped.return_value = False + self._patch('map_block_storage') + self._patch('filesystem_mounted') + self.filesystem_mounted.return_value = False + self._patch('make_filesystem') + self._patch('service_stop') + self._patch('service_start') + self._patch('service_running') + self.service_running.return_value = True + self._patch('place_data_on_block_device') + _service = 'mysql' + _pool = 'bar' + _rbd_img = 'foo' + _mount = '/var/lib/mysql' + _services = ['mysql'] + _blk_dev = '/dev/rbd1' + ceph_utils.ensure_ceph_storage(_service, _pool, + _rbd_img, 1024, _mount, + _blk_dev, 'ext4', _services, 3) + self.create_pool.assert_called_with(_service, _pool, replicas=3) + self.create_rbd_image.assert_called_with(_service, _pool, + _rbd_img, 1024) + self.map_block_storage.assert_called_with(_service, _pool, _rbd_img) + self.make_filesystem.assert_called_with(_blk_dev, 'ext4') + self.service_stop.assert_called_with(_services[0]) + self.place_data_on_block_device.assert_called_with(_blk_dev, _mount) + self.service_start.assert_called_with(_services[0]) + + def test_make_filesystem_default_filesystem(self): + """make_filesystem() uses ext4 as the default filesystem.""" + device = '/dev/zero' + ceph_utils.make_filesystem(device) + self.check_call.assert_called_with(['mkfs', '-t', 'ext4', device]) + + def test_make_filesystem_no_device(self): + """make_filesystem() raises an IOError if the device does not exist.""" + device = '/no/such/device' + e = self.assertRaises(IOError, ceph_utils.make_filesystem, device, + timeout=0) + self.assertEquals(device, e.filename) + self.assertEquals(errno.ENOENT, e.errno) + self.assertEquals(os.strerror(errno.ENOENT), e.strerror) + self.log.assert_called_with( + 'Gave up waiting on block device %s' % device, level='ERROR') + + @nose.plugins.attrib.attr('slow') + def test_make_filesystem_timeout(self): + """ + make_filesystem() allows to specify how long it should wait for the + device to appear before it fails. + """ + device = '/no/such/device' + timeout = 2 + before = time.time() + self.assertRaises(IOError, ceph_utils.make_filesystem, device, + timeout=timeout) + after = time.time() + duration = after - before + self.assertTrue(timeout - duration < 0.1) + self.log.assert_called_with( + 'Gave up waiting on block device %s' % device, level='ERROR') + + @nose.plugins.attrib.attr('slow') + def test_device_is_formatted_if_it_appears(self): + """ + The specified device is formatted if it appears before the timeout + is reached. + """ + + def create_my_device(filename): + with open(filename, "w") as device: + device.write("hello\n") + + temp_dir = mkdtemp() + self.addCleanup(rmtree, temp_dir) + device = "%s/mydevice" % temp_dir + fstype = 'xfs' + timeout = 4 + t = Timer(2, create_my_device, [device]) + t.start() + ceph_utils.make_filesystem(device, fstype, timeout) + self.check_call.assert_called_with(['mkfs', '-t', fstype, device]) + + def test_existing_device_is_formatted(self): + """ + make_filesystem() formats the given device if it exists with the + specified filesystem. + """ + device = '/dev/zero' + fstype = 'xfs' + ceph_utils.make_filesystem(device, fstype) + self.check_call.assert_called_with(['mkfs', '-t', fstype, device]) + self.log.assert_called_with( + 'Formatting block device %s as ' + 'filesystem %s.' % (device, fstype), level='INFO' + ) + + @patch.object(ceph_utils, 'relation_ids') + @patch.object(ceph_utils, 'related_units') + @patch.object(ceph_utils, 'relation_get') + def test_ensure_ceph_keyring_no_relation_no_data(self, rget, runits, rids): + rids.return_value = [] + self.assertEquals(False, ceph_utils.ensure_ceph_keyring(service='foo')) + rids.return_value = ['ceph:0'] + runits.return_value = ['ceph/0'] + rget.return_value = '' + self.assertEquals(False, ceph_utils.ensure_ceph_keyring(service='foo')) + + @patch.object(ceph_utils, '_keyring_path') + @patch.object(ceph_utils, 'add_key') + @patch.object(ceph_utils, 'relation_ids') + def test_ensure_ceph_keyring_no_relation_but_key(self, rids, + create, _path): + rids.return_value = [] + self.assertTrue(ceph_utils.ensure_ceph_keyring(service='foo', + key='testkey')) + create.assert_called_with(service='foo', key='testkey') + _path.assert_called_with('foo') + + @patch.object(ceph_utils, '_keyring_path') + @patch.object(ceph_utils, 'add_key') + @patch.object(ceph_utils, 'relation_ids') + @patch.object(ceph_utils, 'related_units') + @patch.object(ceph_utils, 'relation_get') + def test_ensure_ceph_keyring_with_data(self, rget, runits, + rids, create, _path): + rids.return_value = ['ceph:0'] + runits.return_value = ['ceph/0'] + rget.return_value = 'fookey' + self.assertEquals(True, + ceph_utils.ensure_ceph_keyring(service='foo')) + create.assert_called_with(service='foo', key='fookey') + _path.assert_called_with('foo') + self.assertFalse(self.check_call.called) + + _path.return_value = '/etc/ceph/client.foo.keyring' + self.assertEquals( + True, + ceph_utils.ensure_ceph_keyring( + service='foo', user='adam', group='users')) + create.assert_called_with(service='foo', key='fookey') + _path.assert_called_with('foo') + self.check_call.assert_called_with([ + 'chown', + 'adam.users', + '/etc/ceph/client.foo.keyring' + ]) + + @patch.object(ceph_utils, 'service_name') + @patch.object(ceph_utils, 'uuid') + def test_ceph_broker_rq_class(self, uuid, service_name): + service_name.return_value = 'service_test' + uuid.uuid1.return_value = 'uuid' + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool('pool1', replica_count=1) + rq.add_op_create_pool('pool1', replica_count=1) + rq.add_op_create_pool('pool2') + rq.add_op_create_pool('pool2') + rq.add_op_create_pool('pool3', group='test') + rq.add_op_request_access_to_group(name='test') + rq.add_op_request_access_to_group(name='test') + rq.add_op_request_access_to_group(name='objects', + key_name='test') + rq.add_op_request_access_to_group( + name='others', + object_prefix_permissions={'rwx': ['prefix1']}) + expected = { + 'api-version': 1, + 'request-id': 'uuid', + 'ops': [{'op': 'create-pool', 'name': 'pool1', 'replicas': 1}, + {'op': 'create-pool', 'name': 'pool2', 'replicas': 3}, + {'op': 'create-pool', 'name': 'pool3', 'replicas': 3, 'group': 'test'}, + {'op': 'add-permissions-to-key', 'group': 'test', 'name': 'service_test'}, + {'op': 'add-permissions-to-key', 'group': 'objects', 'name': 'test'}, + { + 'op': 'add-permissions-to-key', + 'group': 'others', + 'name': 'service_test', + 'object-prefix-permissions': {u'rwx': [u'prefix1']}}] + } + request_dict = json.loads(rq.request) + for key in ['api-version', 'request-id']: + self.assertEqual(request_dict[key], expected[key]) + for (op_no, expected_op) in enumerate(expected['ops']): + for key in expected_op.keys(): + self.assertEqual( + request_dict['ops'][op_no][key], + expected_op[key]) + + @patch.object(ceph_utils, 'service_name') + @patch.object(ceph_utils, 'uuid') + def test_ceph_broker_rq_class_test_not_equal(self, uuid, service_name): + service_name.return_value = 'service_test' + uuid.uuid1.return_value = 'uuid' + rq1 = ceph_utils.CephBrokerRq() + rq1.add_op_create_pool('pool1') + rq1.add_op_request_access_to_group(name='test') + rq1.add_op_request_access_to_group(name='objects', + permission='rwx') + rq2 = ceph_utils.CephBrokerRq() + rq2.add_op_create_pool('pool1') + rq2.add_op_request_access_to_group(name='test') + rq2.add_op_request_access_to_group(name='objects', + permission='r') + self.assertFalse(rq1 == rq2) + + def test_ceph_broker_rsp_class(self): + rsp = ceph_utils.CephBrokerRsp(json.dumps({'exit-code': 0, + 'stderr': "Success"})) + self.assertEqual(rsp.exit_code, 0) + self.assertEqual(rsp.exit_msg, "Success") + self.assertEqual(rsp.request_id, None) + + def test_ceph_broker_rsp_class_rqid(self): + rsp = ceph_utils.CephBrokerRsp(json.dumps({'exit-code': 0, + 'stderr': "Success", + 'request-id': 'reqid1'})) + self.assertEqual(rsp.exit_code, 0) + self.assertEqual(rsp.exit_msg, 'Success') + self.assertEqual(rsp.request_id, 'reqid1') + + def setup_client_relation(self, relation): + relation = FakeRelation(relation) + self.relation_get.side_effect = relation.get + self.relation_ids.side_effect = relation.relation_ids + self.related_units.side_effect = relation.related_units + + # @patch.object(ceph_utils, 'uuid') + # @patch.object(ceph_utils, 'local_unit') + # def test_get_request_states(self, mlocal_unit, muuid): + # muuid.uuid1.return_value = '0bc7dc54' + @patch.object(ceph_utils, 'local_unit') + def test_get_request_states(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + expect = {'ceph:8': {'complete': True, 'sent': True}} + self.assertEqual(ceph_utils.get_request_states(rq), expect) + + @patch.object(ceph_utils, 'local_unit') + def test_get_request_states_newrq(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=4) + expect = {'ceph:8': {'complete': False, 'sent': False}} + self.assertEqual(ceph_utils.get_request_states(rq), expect) + + @patch.object(ceph_utils, 'local_unit') + def test_get_request_states_pendingrq(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION) + del rel['ceph:8']['ceph/0']['broker-rsp-glance-0'] + self.setup_client_relation(rel) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + expect = {'ceph:8': {'complete': False, 'sent': True}} + self.assertEqual(ceph_utils.get_request_states(rq), expect) + + @patch.object(ceph_utils, 'local_unit') + def test_get_request_states_failedrq(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION) + rel['ceph:8']['ceph/0']['broker-rsp-glance-0'] = '{"request-id": "0bc7dc54", "exit-code": 1}' + self.setup_client_relation(rel) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + expect = {'ceph:8': {'complete': False, 'sent': True}} + self.assertEqual(ceph_utils.get_request_states(rq), expect) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_sent(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertTrue(ceph_utils.is_request_sent(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_sent_newrq(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=4) + self.assertFalse(ceph_utils.is_request_sent(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_sent_pending(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION) + del rel['ceph:8']['ceph/0']['broker-rsp-glance-0'] + self.setup_client_relation(rel) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertTrue(ceph_utils.is_request_sent(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_sent_legacy(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION_LEGACY) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertTrue(ceph_utils.is_request_sent(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_sent_legacy_newrq(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION_LEGACY) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=4) + self.assertFalse(ceph_utils.is_request_sent(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_sent_legacy_pending(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION_LEGACY) + del rel['ceph:8']['ceph/0']['broker_rsp'] + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertTrue(ceph_utils.is_request_sent(rq)) + + @patch.object(ceph_utils, 'uuid') + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete(self, mlocal_unit, muuid): + muuid.uuid1.return_value = '0bc7dc54' + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertTrue(ceph_utils.is_request_complete(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_newrq(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=4) + self.assertFalse(ceph_utils.is_request_complete(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_pending(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION) + del rel['ceph:8']['ceph/0']['broker-rsp-glance-0'] + self.setup_client_relation(rel) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertFalse(ceph_utils.is_request_complete(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_legacy(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION_LEGACY) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertTrue(ceph_utils.is_request_complete(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_legacy_newrq(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION_LEGACY) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=4) + self.assertFalse(ceph_utils.is_request_complete(rq)) + + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_legacy_pending(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION_LEGACY) + del rel['ceph:8']['ceph/0']['broker_rsp'] + self.setup_client_relation(rel) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + self.assertFalse(ceph_utils.is_request_complete(rq)) + + def test_equivalent_broker_requests(self): + rq1 = ceph_utils.CephBrokerRq() + rq1.add_op_create_pool(name='glance', replica_count=4) + rq2 = ceph_utils.CephBrokerRq() + rq2.add_op_create_pool(name='glance', replica_count=4) + self.assertTrue(rq1 == rq2) + + def test_equivalent_broker_requests_diff1(self): + rq1 = ceph_utils.CephBrokerRq() + rq1.add_op_create_pool(name='glance', replica_count=3) + rq2 = ceph_utils.CephBrokerRq() + rq2.add_op_create_pool(name='glance', replica_count=4) + self.assertFalse(rq1 == rq2) + + def test_equivalent_broker_requests_diff2(self): + rq1 = ceph_utils.CephBrokerRq() + rq1.add_op_create_pool(name='glance', replica_count=3) + rq2 = ceph_utils.CephBrokerRq() + rq2.add_op_create_pool(name='cinder', replica_count=3) + self.assertFalse(rq1 == rq2) + + def test_equivalent_broker_requests_diff3(self): + rq1 = ceph_utils.CephBrokerRq() + rq1.add_op_create_pool(name='glance', replica_count=3) + rq2 = ceph_utils.CephBrokerRq(api_version=2) + rq2.add_op_create_pool(name='glance', replica_count=3) + self.assertFalse(rq1 == rq2) + + @patch.object(ceph_utils, 'uuid') + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_for_rid(self, mlocal_unit, muuid): + muuid.uuid1.return_value = '0bc7dc54' + req = ceph_utils.CephBrokerRq() + req.add_op_create_pool(name='glance', replica_count=3) + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + self.assertTrue(ceph_utils.is_request_complete_for_rid(req, 'ceph:8')) + + @patch.object(ceph_utils, 'uuid') + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_for_rid_newrq(self, mlocal_unit, muuid): + muuid.uuid1.return_value = 'a44c0fa6' + req = ceph_utils.CephBrokerRq() + req.add_op_create_pool(name='glance', replica_count=4) + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + self.assertFalse(ceph_utils.is_request_complete_for_rid(req, 'ceph:8')) + + @patch.object(ceph_utils, 'uuid') + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_for_rid_failed(self, mlocal_unit, muuid): + muuid.uuid1.return_value = '0bc7dc54' + req = ceph_utils.CephBrokerRq() + req.add_op_create_pool(name='glance', replica_count=4) + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION) + rel['ceph:8']['ceph/0']['broker-rsp-glance-0'] = '{"request-id": "0bc7dc54", "exit-code": 1}' + self.setup_client_relation(rel) + self.assertFalse(ceph_utils.is_request_complete_for_rid(req, 'ceph:8')) + + @patch.object(ceph_utils, 'uuid') + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_for_rid_pending(self, mlocal_unit, muuid): + muuid.uuid1.return_value = '0bc7dc54' + req = ceph_utils.CephBrokerRq() + req.add_op_create_pool(name='glance', replica_count=4) + mlocal_unit.return_value = 'glance/0' + rel = copy.deepcopy(CEPH_CLIENT_RELATION) + del rel['ceph:8']['ceph/0']['broker-rsp-glance-0'] + self.setup_client_relation(rel) + self.assertFalse(ceph_utils.is_request_complete_for_rid(req, 'ceph:8')) + + @patch.object(ceph_utils, 'uuid') + @patch.object(ceph_utils, 'local_unit') + def test_is_request_complete_for_rid_legacy(self, mlocal_unit, muuid): + muuid.uuid1.return_value = '0bc7dc54' + req = ceph_utils.CephBrokerRq() + req.add_op_create_pool(name='glance', replica_count=3) + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION_LEGACY) + self.assertTrue(ceph_utils.is_request_complete_for_rid(req, 'ceph:8')) + + @patch.object(ceph_utils, 'local_unit') + def test_get_broker_rsp_key(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.assertEqual(ceph_utils.get_broker_rsp_key(), 'broker-rsp-glance-0') + + @patch.object(ceph_utils, 'local_unit') + def test_send_request_if_needed(self, mlocal_unit): + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=3) + ceph_utils.send_request_if_needed(rq) + self.relation_set.assert_has_calls([]) + + @patch.object(ceph_utils, 'uuid') + @patch.object(ceph_utils, 'local_unit') + def test_send_request_if_needed_newrq(self, mlocal_unit, muuid): + muuid.uuid1.return_value = 'de67511e' + mlocal_unit.return_value = 'glance/0' + self.setup_client_relation(CEPH_CLIENT_RELATION) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool(name='glance', replica_count=4) + ceph_utils.send_request_if_needed(rq) + actual = json.loads(self.relation_set.call_args_list[0][1]['broker_req']) + self.assertEqual(actual['api-version'], 1) + self.assertEqual(actual['request-id'], 'de67511e') + self.assertEqual(actual['ops'][0]['replicas'], 4) + self.assertEqual(actual['ops'][0]['op'], 'create-pool') + self.assertEqual(actual['ops'][0]['name'], 'glance') + + @patch.object(ceph_utils, 'config') + def test_ceph_conf_context(self, mock_config): + mock_config.return_value = "{'osd': {'foo': 1}}" + ctxt = ceph_utils.CephConfContext()() + self.assertEqual({'osd': {'foo': 1}}, ctxt) + ctxt = ceph_utils.CephConfContext(['osd', 'mon'])() + mock_config.return_value = ("{'osd': {'foo': 1}," + "'unknown': {'blah': 1}}") + self.assertEqual({'osd': {'foo': 1}}, ctxt) + + @patch.object(ceph_utils, 'get_osd_settings') + @patch.object(ceph_utils, 'config') + def test_ceph_osd_conf_context_conflict(self, mock_config, + mock_get_osd_settings): + mock_config.return_value = "{'osd': {'osd heartbeat grace': 20}}" + mock_get_osd_settings.return_value = { + 'osd heartbeat grace': 25, + 'osd heartbeat interval': 5} + ctxt = ceph_utils.CephOSDConfContext()() + self.assertEqual(ctxt, { + 'osd': collections.OrderedDict([('osd heartbeat grace', 20)]), + 'osd_from_client': collections.OrderedDict( + [('osd heartbeat interval', 5)]), + 'osd_from_client_conflict': collections.OrderedDict( + [('osd heartbeat grace', 25)])}) + + @patch.object(ceph_utils, 'local_unit', lambda: "nova-compute/0") + def test_is_broker_action_done(self): + tmpdir = mkdtemp() + try: + db_path = '{}/kv.db'.format(tmpdir) + with patch('charmhelpers.core.unitdata._KV', Storage(db_path)): + rq_id = "3d03e9f6-4c36-11e7-89ba-fa163e7c7ec6" + broker_key = ceph_utils.get_broker_rsp_key() + self.relation_get.return_value = {broker_key: + json.dumps({"request-id": + rq_id, + "exit-code": 0})} + action = 'restart_nova_compute' + ret = ceph_utils.is_broker_action_done(action, rid="ceph:1", + unit="ceph/0") + self.relation_get.assert_has_calls([call(rid='ceph:1', unit='ceph/0')]) + self.assertFalse(ret) + + ceph_utils.mark_broker_action_done(action) + self.assertTrue(os.path.exists(tmpdir)) + ret = ceph_utils.is_broker_action_done(action, rid="ceph:1", + unit="ceph/0") + self.assertTrue(ret) + finally: + if os.path.exists(tmpdir): + shutil.rmtree(tmpdir) + + def test_has_broker_rsp(self): + rq_id = "3d03e9f6-4c36-11e7-89ba-fa163e7c7ec6" + broker_key = ceph_utils.get_broker_rsp_key() + self.relation_get.return_value = {broker_key: + json.dumps({"request-id": + rq_id, + "exit-code": 0})} + ret = ceph_utils.has_broker_rsp(rid="ceph:1", unit="ceph/0") + self.assertTrue(ret) + self.relation_get.assert_has_calls([call(rid='ceph:1', unit='ceph/0')]) + + self.relation_get.return_value = {'something_else': + json.dumps({"request-id": + rq_id, + "exit-code": 0})} + ret = ceph_utils.has_broker_rsp(rid="ceph:1", unit="ceph/0") + self.assertFalse(ret) + + self.relation_get.return_value = None + ret = ceph_utils.has_broker_rsp(rid="ceph:1", unit="ceph/0") + self.assertFalse(ret) + + @patch.object(ceph_utils, 'local_unit', lambda: "nova-compute/0") + def test_mark_broker_action_done(self): + tmpdir = mkdtemp() + try: + db_path = '{}/kv.db'.format(tmpdir) + with patch('charmhelpers.core.unitdata._KV', Storage(db_path)): + rq_id = "3d03e9f6-4c36-11e7-89ba-fa163e7c7ec6" + broker_key = ceph_utils.get_broker_rsp_key() + self.relation_get.return_value = {broker_key: + json.dumps({"request-id": + rq_id})} + action = 'restart_nova_compute' + ceph_utils.mark_broker_action_done(action, rid="ceph:1", + unit="ceph/0") + key = 'unit_0_ceph_broker_action.{}'.format(action) + self.relation_get.assert_has_calls([call(rid='ceph:1', unit='ceph/0')]) + kvstore = Storage(db_path) + self.assertEqual(kvstore.get(key=key), rq_id) + finally: + if os.path.exists(tmpdir): + shutil.rmtree(tmpdir) + + def test_add_op_create_replicated_pool(self): + base_op = {'app-name': None, + 'group': None, + 'group-namespace': None, + 'max-bytes': None, + 'max-objects': None, + 'name': 'apool', + 'op': 'create-pool', + 'pg_num': None, + 'replicas': 3, + 'weight': None} + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool') + self.assertEqual(rq.ops, [base_op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_pool('apool', replica_count=42) + op = base_op.copy() + op['replicas'] = 42 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool', pg_num=42) + op = base_op.copy() + op['pg_num'] = 42 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool', weight=42) + op = base_op.copy() + op['weight'] = 42 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool', group=51) + op = base_op.copy() + op['group'] = 51 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool', namespace='sol-iii') + op = base_op.copy() + op['group-namespace'] = 'sol-iii' + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool', app_name='earth') + op = base_op.copy() + op['app-name'] = 'earth' + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool', max_bytes=42) + op = base_op.copy() + op['max-bytes'] = 42 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_replicated_pool('apool', max_objects=42) + op = base_op.copy() + op['max-objects'] = 42 + self.assertEqual(rq.ops, [op]) + + def test_add_op_create_erasure_pool(self): + base_op = {'app-name': None, + 'erasure-profile': None, + 'group': None, + 'max-bytes': None, + 'max-objects': None, + 'name': 'apool', + 'op': 'create-pool', + 'pool-type': 'erasure', + 'weight': None} + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_erasure_pool('apool') + self.assertEqual(rq.ops, [base_op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_erasure_pool('apool', weight=42) + op = base_op.copy() + op['weight'] = 42 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_erasure_pool('apool', group=51) + op = base_op.copy() + op['group'] = 51 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_erasure_pool('apool', app_name='earth') + op = base_op.copy() + op['app-name'] = 'earth' + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_erasure_pool('apool', max_bytes=42) + op = base_op.copy() + op['max-bytes'] = 42 + self.assertEqual(rq.ops, [op]) + rq = ceph_utils.CephBrokerRq() + rq.add_op_create_erasure_pool('apool', max_objects=42) + op = base_op.copy() + op['max-objects'] = 42 + self.assertEqual(rq.ops, [op]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_loopback.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_loopback.py new file mode 100644 index 0000000000000000000000000000000000000000..85a130d4570518883b724f8d8856aed2b2d41bf9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_loopback.py @@ -0,0 +1,82 @@ +import unittest + +from mock import patch + +import charmhelpers.contrib.storage.linux.loopback as loopback + +LOOPBACK_DEVICES = b""" +/dev/loop0: [0805]:2244465 (/tmp/foo.img) +/dev/loop1: [0805]:2244466 (/tmp/bar.img) +/dev/loop2: [0805]:2244467 (/tmp/baz.img (deleted)) +""" + +# It's a mouthful. +STORAGE_LINUX_LOOPBACK = 'charmhelpers.contrib.storage.linux.loopback' + + +class LoopbackStorageUtilsTests(unittest.TestCase): + @patch(STORAGE_LINUX_LOOPBACK + '.check_output') + def test_loopback_devices(self, output): + """It translates current loopback mapping to a dict""" + output.return_value = LOOPBACK_DEVICES + ex = { + '/dev/loop1': '/tmp/bar.img', + '/dev/loop0': '/tmp/foo.img', + '/dev/loop2': '/tmp/baz.img (deleted)' + } + self.assertEquals(loopback.loopback_devices(), ex) + + @patch(STORAGE_LINUX_LOOPBACK + '.create_loopback') + @patch('subprocess.check_call') + @patch(STORAGE_LINUX_LOOPBACK + '.loopback_devices') + def test_loopback_create_already_exists(self, loopbacks, check_call, + create): + """It finds existing loopback device for requested file""" + loopbacks.return_value = {'/dev/loop1': '/tmp/bar.img'} + res = loopback.ensure_loopback_device('/tmp/bar.img', '5G') + self.assertEquals(res, '/dev/loop1') + self.assertFalse(create.called) + self.assertFalse(check_call.called) + + @patch(STORAGE_LINUX_LOOPBACK + '.loopback_devices') + @patch(STORAGE_LINUX_LOOPBACK + '.create_loopback') + @patch('os.path.exists') + def test_loop_creation_no_truncate(self, path_exists, create_loopback, + loopbacks): + """It does not create a new sparse image for loopback if one exists""" + loopbacks.return_value = {} + path_exists.return_value = True + with patch('subprocess.check_call') as check_call: + loopback.ensure_loopback_device('/tmp/foo.img', '15G') + self.assertFalse(check_call.called) + + @patch(STORAGE_LINUX_LOOPBACK + '.loopback_devices') + @patch(STORAGE_LINUX_LOOPBACK + '.create_loopback') + @patch('os.path.exists') + def test_ensure_loopback_creation(self, path_exists, create_loopback, + loopbacks): + """It creates a new sparse image for loopback if one does not exists""" + loopbacks.return_value = {} + path_exists.return_value = False + create_loopback.return_value = '/dev/loop0' + with patch(STORAGE_LINUX_LOOPBACK + '.check_call') as check_call: + loopback.ensure_loopback_device('/tmp/foo.img', '15G') + check_call.assert_called_with(['truncate', '--size', '15G', + '/tmp/foo.img']) + + @patch.object(loopback, 'loopback_devices') + def test_create_loopback(self, _devs): + """It correctly calls losetup to create a loopback device""" + _devs.return_value = {'/dev/loop0': '/tmp/foo'} + with patch(STORAGE_LINUX_LOOPBACK + '.check_call') as check_call: + check_call.return_value = '' + result = loopback.create_loopback('/tmp/foo') + check_call.assert_called_with(['losetup', '--find', '/tmp/foo']) + self.assertEquals(result, '/dev/loop0') + + @patch.object(loopback, 'loopback_devices') + def test_create_is_mapped_loopback_device(self, devs): + devs.return_value = {'/dev/loop0': "/tmp/manco"} + self.assertEquals(loopback.is_mapped_loopback_device("/dev/loop0"), + "/tmp/manco") + self.assertFalse(loopback.is_mapped_loopback_device("/dev/loop1")) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_lvm.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_lvm.py new file mode 100644 index 0000000000000000000000000000000000000000..015d451dd2aad0f1d466f946ab8234934313e7d6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_lvm.py @@ -0,0 +1,212 @@ +import unittest +import subprocess + +from mock import patch + +import charmhelpers.contrib.storage.linux.lvm as lvm + +PVDISPLAY = b""" + --- Physical volume --- + PV Name /dev/loop0 + VG Name foo + PV Size 10.00 MiB / not usable 2.00 MiB + Allocatable yes + PE Size 4.00 MiB + Total PE 2 + Free PE 2 + Allocated PE 0 + PV UUID fyVqlr-pyrL-89On-f6MD-U91T-dEfc-SL0V2V + +""" + +EMPTY_VG_IN_PVDISPLAY = b""" + --- Physical volume --- + PV Name /dev/loop0 + VG Name + PV Size 10.00 MiB / not usable 2.00 MiB + Allocatable yes + PE Size 4.00 MiB + Total PE 2 + Free PE 2 + Allocated PE 0 + PV UUID fyVqlr-pyrL-89On-f6MD-U91T-dEfc-SL0V2V + +""" +LVS_DEFAULT = b""" + cinder-volumes-pool + testvol + volume-48be6ba0-84c3-4b8d-9be5-e68e47fc7682 + volume-f8c1d2fd-1fa1-4d84-b4e0-431dba7d582e +""" +LVS_WITH_VG = b""" + cinder-volumes cinder-volumes-pool + cinder-volumes testvol + cinder-volumes volume-48be6ba0-84c3-4b8d-9be5-e68e47fc7682 + cinder-volumes volume-f8c1d2fd-1fa1-4d84-b4e0-431dba7d582e +""" +LVS_THIN_POOLS = b""" + cinder-volumes-pool +""" +LVS_THIN_POOLS_WITH_VG = b""" + cinder-volumes cinder-volumes-pool +""" + +# It's a mouthful. +STORAGE_LINUX_LVM = 'charmhelpers.contrib.storage.linux.lvm' + + +class LVMStorageUtilsTests(unittest.TestCase): + def test_find_volume_group_on_pv(self): + """It determines any volume group assigned to a LVM PV""" + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = PVDISPLAY + vg = lvm.list_lvm_volume_group('/dev/loop0') + self.assertEquals(vg, 'foo') + + def test_find_empty_volume_group_on_pv(self): + """Return empty string when no volume group is assigned to the PV""" + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = EMPTY_VG_IN_PVDISPLAY + vg = lvm.list_lvm_volume_group('/dev/loop0') + self.assertEquals(vg, '') + + @patch(STORAGE_LINUX_LVM + '.list_lvm_volume_group') + def test_deactivate_lvm_volume_groups(self, ls_vg): + """It deactivates active volume groups on LVM PV""" + ls_vg.return_value = 'foo-vg' + with patch(STORAGE_LINUX_LVM + '.check_call') as check_call: + lvm.deactivate_lvm_volume_group('/dev/loop0') + check_call.assert_called_with(['vgchange', '-an', 'foo-vg']) + + def test_remove_lvm_physical_volume(self): + """It removes LVM physical volume signatures from block device""" + with patch(STORAGE_LINUX_LVM + '.Popen') as popen: + lvm.remove_lvm_physical_volume('/dev/foo') + popen.assert_called_with(['pvremove', '-ff', '/dev/foo'], stdin=-1) + + def test_is_physical_volume(self): + """It properly reports block dev is an LVM PV""" + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = PVDISPLAY + self.assertTrue(lvm.is_lvm_physical_volume('/dev/loop0')) + + def test_is_not_physical_volume(self): + """It properly reports block dev is an LVM PV""" + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.side_effect = subprocess.CalledProcessError('cmd', 2) + self.assertFalse(lvm.is_lvm_physical_volume('/dev/loop0')) + + def test_pvcreate(self): + """It correctly calls pvcreate for a given block dev""" + with patch(STORAGE_LINUX_LVM + '.check_call') as check_call: + lvm.create_lvm_physical_volume('/dev/foo') + check_call.assert_called_with(['pvcreate', '/dev/foo']) + + def test_vgcreate(self): + """It correctly calls vgcreate for given block dev and vol group""" + with patch(STORAGE_LINUX_LVM + '.check_call') as check_call: + lvm.create_lvm_volume_group('foo-vg', '/dev/foo') + check_call.assert_called_with(['vgcreate', 'foo-vg', '/dev/foo']) + + def test_list_logical_volumes(self): + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = LVS_DEFAULT + self.assertEqual(lvm.list_logical_volumes(), [ + 'cinder-volumes-pool', + 'testvol', + 'volume-48be6ba0-84c3-4b8d-9be5-e68e47fc7682', + 'volume-f8c1d2fd-1fa1-4d84-b4e0-431dba7d582e']) + check_output.assert_called_with([ + 'lvs', + '--options', + 'lv_name', + '--noheadings']) + + def test_list_logical_volumes_empty(self): + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = b'' + self.assertEqual(lvm.list_logical_volumes(), []) + + def test_list_logical_volumes_path_mode(self): + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = LVS_WITH_VG + self.assertEqual(lvm.list_logical_volumes(path_mode=True), [ + 'cinder-volumes/cinder-volumes-pool', + 'cinder-volumes/testvol', + 'cinder-volumes/volume-48be6ba0-84c3-4b8d-9be5-e68e47fc7682', + 'cinder-volumes/volume-f8c1d2fd-1fa1-4d84-b4e0-431dba7d582e']) + check_output.assert_called_with([ + 'lvs', + '--options', + 'vg_name,lv_name', + '--noheadings']) + + def test_list_logical_volumes_select_criteria(self): + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = LVS_THIN_POOLS + self.assertEqual( + lvm.list_logical_volumes(select_criteria='lv_attr =~ ^t'), + ['cinder-volumes-pool']) + check_output.assert_called_with([ + 'lvs', + '--options', + 'lv_name', + '--noheadings', + '--select', + 'lv_attr =~ ^t']) + + def test_list_thin_logical_volume_pools(self): + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = LVS_THIN_POOLS + self.assertEqual( + lvm.list_thin_logical_volume_pools(), + ['cinder-volumes-pool']) + check_output.assert_called_with([ + 'lvs', + '--options', + 'lv_name', + '--noheadings', + '--select', + 'lv_attr =~ ^t']) + + def test_list_thin_logical_volume_pools_path_mode(self): + with patch(STORAGE_LINUX_LVM + '.check_output') as check_output: + check_output.return_value = LVS_THIN_POOLS_WITH_VG + self.assertEqual( + lvm.list_thin_logical_volume_pools(path_mode=True), + ['cinder-volumes/cinder-volumes-pool']) + check_output.assert_called_with([ + 'lvs', + '--options', + 'vg_name,lv_name', + '--noheadings', + '--select', + 'lv_attr =~ ^t']) + + def test_extend_logical_volume_by_device(self): + """It correctly calls pvcreate for a given block dev""" + with patch(STORAGE_LINUX_LVM + '.check_call') as check_call: + lvm.extend_logical_volume_by_device('mylv', '/dev/foo') + check_call.assert_called_with(['lvextend', 'mylv', '/dev/foo']) + + def test_create_logical_volume_nosize(self): + with patch(STORAGE_LINUX_LVM + '.check_call') as check_call: + lvm.create_logical_volume('testlv', 'testvg') + check_call.assert_called_with([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', 'testlv', 'testvg' + ]) + + def test_create_logical_volume_size(self): + with patch(STORAGE_LINUX_LVM + '.check_call') as check_call: + lvm.create_logical_volume('testlv', 'testvg', '10G') + check_call.assert_called_with([ + 'lvcreate', + '--yes', + '-L', + '10G', + '-n', 'testlv', 'testvg' + ]) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf4da919807dba72eb4eeee00c07320b6359b1d7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/storage/test_linux_storage_utils.py @@ -0,0 +1,206 @@ +from mock import patch +import unittest + +import charmhelpers.contrib.storage.linux.utils as storage_utils + +# It's a mouthful. +STORAGE_LINUX_UTILS = 'charmhelpers.contrib.storage.linux.utils' + + +class MiscStorageUtilsTests(unittest.TestCase): + + @patch(STORAGE_LINUX_UTILS + '.check_output') + @patch(STORAGE_LINUX_UTILS + '.call') + @patch(STORAGE_LINUX_UTILS + '.check_call') + def test_zap_disk(self, check_call, call, check_output): + """It calls sgdisk correctly to zap disk""" + check_output.return_value = b'200\n' + storage_utils.zap_disk('/dev/foo') + call.assert_any_call(['sgdisk', '--zap-all', '--', '/dev/foo']) + call.assert_any_call(['sgdisk', '--clear', '--mbrtogpt', + '--', '/dev/foo']) + check_output.assert_any_call(['blockdev', '--getsz', '/dev/foo']) + check_call.assert_any_call(['dd', 'if=/dev/zero', 'of=/dev/foo', + 'bs=1M', 'count=1']) + check_call.assert_any_call(['dd', 'if=/dev/zero', 'of=/dev/foo', + 'bs=512', 'count=100', 'seek=100']) + + @patch(STORAGE_LINUX_UTILS + '.S_ISBLK') + @patch('os.path.exists') + @patch('os.stat') + def test_is_block_device(self, S_ISBLK, exists, stat): + """It detects device node is block device""" + class fake_stat: + st_mode = True + S_ISBLK.return_value = fake_stat() + exists.return_value = True + self.assertTrue(storage_utils.is_block_device('/dev/foo')) + + @patch(STORAGE_LINUX_UTILS + '.S_ISBLK') + @patch('os.path.exists') + @patch('os.stat') + def test_is_block_device_does_not_exist(self, S_ISBLK, exists, stat): + """It detects device node is block device""" + class fake_stat: + st_mode = True + S_ISBLK.return_value = fake_stat() + exists.return_value = False + self.assertFalse(storage_utils.is_block_device('/dev/foo')) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted(self, check_output): + """It detects mounted devices as mounted.""" + check_output.return_value = ( + b'NAME="sda" MAJ:MIN="8:16" RM="0" SIZE="238.5G" RO="0" TYPE="disk" MOUNTPOINT="/tmp"\n') + result = storage_utils.is_device_mounted('/dev/sda') + self.assertTrue(result) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted_partition(self, check_output): + """It detects mounted partitions as mounted.""" + check_output.return_value = ( + b'NAME="sda1" MAJ:MIN="8:16" RM="0" SIZE="238.5G" RO="0" TYPE="disk" MOUNTPOINT="/tmp"\n') + result = storage_utils.is_device_mounted('/dev/sda1') + self.assertTrue(result) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted_partition_with_device(self, check_output): + """It detects mounted devices as mounted if "mount" shows only a + partition as mounted.""" + check_output.return_value = ( + b'NAME="sda1" MAJ:MIN="8:16" RM="0" SIZE="238.5G" RO="0" TYPE="disk" MOUNTPOINT="/tmp"\n') + result = storage_utils.is_device_mounted('/dev/sda') + self.assertTrue(result) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted_not_mounted(self, check_output): + """It detects unmounted devices as not mounted.""" + check_output.return_value = ( + b'NAME="sda" MAJ:MIN="8:16" RM="0" SIZE="238.5G" RO="0" TYPE="disk" MOUNTPOINT=""\n') + result = storage_utils.is_device_mounted('/dev/sda') + self.assertFalse(result) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted_not_mounted_partition(self, check_output): + """It detects unmounted partitions as not mounted.""" + check_output.return_value = ( + b'NAME="sda" MAJ:MIN="8:16" RM="0" SIZE="238.5G" RO="0" TYPE="disk" MOUNTPOINT=""\n') + result = storage_utils.is_device_mounted('/dev/sda1') + self.assertFalse(result) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted_full_disks(self, check_output): + """It detects mounted full disks as mounted.""" + check_output.return_value = ( + b'NAME="sda" MAJ:MIN="8:16" RM="0" SIZE="238.5G" RO="0" TYPE="disk" MOUNTPOINT="/tmp"\n') + result = storage_utils.is_device_mounted('/dev/sda') + self.assertTrue(result) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted_cciss(self, check_output): + """It detects mounted cciss partitions as mounted.""" + check_output.return_value = ( + b'NAME="cciss!c0d0" MAJ:MIN="104:0" RM="0" SIZE="273.3G" RO="0" TYPE="disk" MOUNTPOINT="/root"\n') + result = storage_utils.is_device_mounted('/dev/cciss/c0d0') + self.assertTrue(result) + + @patch(STORAGE_LINUX_UTILS + '.check_output') + def test_is_device_mounted_cciss_not_mounted(self, check_output): + """It detects unmounted cciss partitions as not mounted.""" + check_output.return_value = ( + b'NAME="cciss!c0d0" MAJ:MIN="104:0" RM="0" SIZE="273.3G" RO="0" TYPE="disk" MOUNTPOINT=""\n') + result = storage_utils.is_device_mounted('/dev/cciss/c0d0') + self.assertFalse(result) + + @patch(STORAGE_LINUX_UTILS + '.check_call') + def test_mkfs_xfs(self, check_call): + storage_utils.mkfs_xfs('/dev/sdb') + check_call.assert_called_with( + ['mkfs.xfs', '-i', 'size=1024', '/dev/sdb'] + ) + + @patch(STORAGE_LINUX_UTILS + '.check_call') + def test_mkfs_xfs_force(self, check_call): + storage_utils.mkfs_xfs('/dev/sdb', force=True) + check_call.assert_called_with( + ['mkfs.xfs', '-f', '-i', 'size=1024', '/dev/sdb'] + ) + + @patch(STORAGE_LINUX_UTILS + '.check_call') + def test_mkfs_xfs_inode_size(self, check_call): + storage_utils.mkfs_xfs('/dev/sdb', inode_size=512) + check_call.assert_called_with( + ['mkfs.xfs', '-i', 'size=512', '/dev/sdb'] + ) + + +class CephLUKSDeviceTestCase(unittest.TestCase): + + @patch.object(storage_utils, '_luks_uuid') + def test_no_luks_header(self, _luks_uuid): + _luks_uuid.return_value = None + self.assertEqual(storage_utils.is_luks_device('/dev/sdb'), False) + + @patch.object(storage_utils, '_luks_uuid') + def test_luks_header(self, _luks_uuid): + _luks_uuid.return_value = '5e1e4c89-4f68-4b9a-bd93-e25eec34e80f' + self.assertEqual(storage_utils.is_luks_device('/dev/sdb'), True) + + +class CephMappedLUKSDeviceTestCase(unittest.TestCase): + + @patch.object(storage_utils.os, 'walk') + @patch.object(storage_utils, '_luks_uuid') + def test_no_luks_header_not_mapped(self, _luks_uuid, _walk): + _luks_uuid.return_value = None + + def os_walk_side_effect(path): + return { + '/sys/class/block/sdb/holders/': iter([('', [], [])]), + }[path] + _walk.side_effect = os_walk_side_effect + + self.assertEqual(storage_utils.is_mapped_luks_device('/dev/sdb'), False) + + @patch.object(storage_utils.os, 'walk') + @patch.object(storage_utils, '_luks_uuid') + def test_luks_header_mapped(self, _luks_uuid, _walk): + _luks_uuid.return_value = 'db76d142-4782-42f2-84c6-914f9db889a0' + + def os_walk_side_effect(path): + return { + '/sys/class/block/sdb/holders/': iter([('', ['dm-0'], [])]), + }[path] + _walk.side_effect = os_walk_side_effect + + self.assertEqual(storage_utils.is_mapped_luks_device('/dev/sdb'), True) + + @patch.object(storage_utils.os, 'walk') + @patch.object(storage_utils, '_luks_uuid') + def test_luks_header_not_mapped(self, _luks_uuid, _walk): + _luks_uuid.return_value = 'db76d142-4782-42f2-84c6-914f9db889a0' + + def os_walk_side_effect(path): + return { + '/sys/class/block/sdb/holders/': iter([('', [], [])]), + }[path] + _walk.side_effect = os_walk_side_effect + + self.assertEqual(storage_utils.is_mapped_luks_device('/dev/sdb'), False) + + @patch.object(storage_utils.os, 'walk') + @patch.object(storage_utils, '_luks_uuid') + def test_no_luks_header_mapped(self, _luks_uuid, _walk): + """ + This is an edge case where a device is mapped (i.e. used for something + else) but has no LUKS header. Should be handled by other checks. + """ + _luks_uuid.return_value = None + + def os_walk_side_effect(path): + return { + '/sys/class/block/sdb/holders/': iter([('', ['dm-0'], [])]), + }[path] + _walk.side_effect = os_walk_side_effect + + self.assertEqual(storage_utils.is_mapped_luks_device('/dev/sdb'), False) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_contexts.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_contexts.py new file mode 100644 index 0000000000000000000000000000000000000000..1abc3d5786995dd7a7f791b5b466661663679d4e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_contexts.py @@ -0,0 +1,252 @@ +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +import mock +import os +import shutil +import tempfile +import unittest +import yaml + +import six + +import charmhelpers.contrib.templating.contexts + + +class JujuState2YamlTestCase(unittest.TestCase): + maxDiff = None + + unit_data = { + 'private-address': '10.0.3.2', + 'public-address': '123.123.123.123', + } + + def setUp(self): + super(JujuState2YamlTestCase, self).setUp() + + # Hookenv patches (a single patch to hookenv doesn't work): + patcher = mock.patch('charmhelpers.core.hookenv.config') + self.mock_config = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.relation_get') + self.mock_relation_get = patcher.start() + self.mock_relation_get.return_value = {} + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.relations') + self.mock_relations = patcher.start() + self.mock_relations.return_value = { + 'wsgi-file': {}, + 'website': {}, + 'nrpe-external-master': {}, + } + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.relation_type') + self.mock_relation_type = patcher.start() + self.mock_relation_type.return_value = None + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.local_unit') + self.mock_local_unit = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('charmhelpers.core.hookenv.relations_of_type') + self.mock_relations_of_type = patcher.start() + self.addCleanup(patcher.stop) + self.mock_relations_of_type.return_value = [] + + def unit_get_data(argument): + "dummy unit_get that accesses dummy unit data" + return self.unit_data[argument] + + patcher = mock.patch( + 'charmhelpers.core.hookenv.unit_get', unit_get_data) + self.mock_unit_get = patcher.start() + self.addCleanup(patcher.stop) + + # patches specific to this test class. + etc_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, etc_dir) + self.context_path = os.path.join(etc_dir, 'some', 'context') + + patcher = mock.patch.object(charmhelpers.contrib.templating.contexts, + 'charm_dir', '/tmp/charm_dir') + patcher.start() + self.addCleanup(patcher.stop) + + def default_context(self): + return { + "charm_dir": "/tmp/charm_dir", + "group_code_owner": "webops_deploy", + "user_code_runner": "ubunet", + "current_relation": {}, + "relations_full": { + 'wsgi-file': {}, + 'website': {}, + 'nrpe-external-master': {}, + }, + "relations": { + 'wsgi-file': [], + 'website': [], + 'nrpe-external-master': [], + }, + "local_unit": "click-index/3", + "unit_private_address": "10.0.3.2", + "unit_public_address": "123.123.123.123", + } + + def test_output_with_empty_relation(self): + self.mock_config.return_value = { + 'group_code_owner': 'webops_deploy', + 'user_code_runner': 'ubunet', + } + self.mock_local_unit.return_value = "click-index/3" + + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + self.context_path) + + with open(self.context_path, 'r') as context_file: + result = yaml.safe_load(context_file.read()) + expected = self.default_context() + self.assertEqual(expected, result) + + def test_output_with_no_relation(self): + self.mock_config.return_value = { + 'group_code_owner': 'webops_deploy', + 'user_code_runner': 'ubunet', + } + self.mock_local_unit.return_value = "click-index/3" + self.mock_relation_get.return_value = None + + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + self.context_path) + + with open(self.context_path, 'r') as context_file: + result = yaml.safe_load(context_file.read()) + expected = self.default_context() + self.assertEqual(expected, result) + + def test_output_with_relation(self): + self.mock_config.return_value = { + 'group_code_owner': 'webops_deploy', + 'user_code_runner': 'ubunet', + } + self.mock_relation_type.return_value = 'wsgi-file' + self.mock_relation_get.return_value = { + 'relation_key1': 'relation_value1', + 'relation_key2': 'relation_value2', + } + self.mock_relations.return_value = { + 'wsgi-file': { + six.u('wsgi-file:0'): { + six.u('gunicorn/1'): { + six.u('private-address'): six.u('10.0.3.99'), + }, + 'click-index/3': { + six.u('wsgi_group'): six.u('ubunet'), + }, + }, + }, + 'website': {}, + 'nrpe-external-master': {}, + } + self.mock_local_unit.return_value = "click-index/3" + + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + self.context_path) + + with open(self.context_path, 'r') as context_file: + result = yaml.safe_load(context_file.read()) + expected = self.default_context() + expected['current_relation'] = { + "relation_key1": "relation_value1", + "relation_key2": "relation_value2", + } + expected["wsgi_file:relation_key1"] = "relation_value1" + expected["wsgi_file:relation_key2"] = "relation_value2" + expected["relations_full"]['wsgi-file'] = { + 'wsgi-file:0': { + 'gunicorn/1': { + six.u('private-address'): six.u('10.0.3.99')}, + 'click-index/3': {six.u('wsgi_group'): six.u('ubunet')}, + }, + } + expected["relations"]["wsgi-file"] = [ + { + '__relid__': 'wsgi-file:0', + '__unit__': 'gunicorn/1', + 'private-address': '10.0.3.99', + } + ] + self.assertEqual(expected, result) + + def test_relation_with_separator(self): + self.mock_config.return_value = { + 'group_code_owner': 'webops_deploy', + 'user_code_runner': 'ubunet', + } + self.mock_relation_type.return_value = 'wsgi-file' + self.mock_relation_get.return_value = { + 'relation_key1': 'relation_value1', + 'relation_key2': 'relation_value2', + } + self.mock_local_unit.return_value = "click-index/3" + + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + self.context_path, namespace_separator='__') + + with open(self.context_path, 'r') as context_file: + result = yaml.safe_load(context_file.read()) + expected = self.default_context() + expected['current_relation'] = { + "relation_key1": "relation_value1", + "relation_key2": "relation_value2", + } + expected["wsgi_file__relation_key1"] = "relation_value1" + expected["wsgi_file__relation_key2"] = "relation_value2" + self.assertEqual(expected, result) + + def test_keys_with_hyphens(self): + self.mock_config.return_value = { + 'group_code_owner': 'webops_deploy', + 'user_code_runner': 'ubunet', + 'private-address': '10.1.1.10', + } + self.mock_local_unit.return_value = "click-index/3" + self.mock_relation_get.return_value = None + + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + self.context_path) + + with open(self.context_path, 'r') as context_file: + result = yaml.safe_load(context_file.read()) + expected = self.default_context() + expected["private-address"] = "10.1.1.10" + self.assertEqual(expected, result) + + def test_keys_with_hypens_not_allowed_in_keys(self): + self.mock_config.return_value = { + 'group_code_owner': 'webops_deploy', + 'user_code_runner': 'ubunet', + 'private-address': '10.1.1.10', + } + self.mock_local_unit.return_value = "click-index/3" + self.mock_relation_type.return_value = 'wsgi-file' + self.mock_relation_get.return_value = { + 'relation-key1': 'relation_value1', + 'relation-key2': 'relation_value2', + } + + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + self.context_path, allow_hyphens_in_keys=False, + namespace_separator='__') + + with open(self.context_path, 'r') as context_file: + result = yaml.safe_load(context_file.read()) + expected = self.default_context() + expected["private_address"] = "10.1.1.10" + expected["wsgi_file__relation_key1"] = "relation_value1" + expected["wsgi_file__relation_key2"] = "relation_value2" + expected['current_relation'] = { + "relation-key1": "relation_value1", + "relation-key2": "relation_value2", + } + self.assertEqual(expected, result) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_jinja.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_jinja.py new file mode 100644 index 0000000000000000000000000000000000000000..00b0d47f257feda8954ff24672dbcc763aa45f41 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_jinja.py @@ -0,0 +1,48 @@ +import tempfile +import os + +from shutil import rmtree +from testtools import TestCase + +from charmhelpers.contrib.templating.jinja import render + + +SIMPLE_TEMPLATE = "{{ somevar }}" + + +LOOP_TEMPLATE = "{% for i in somevar %}{{ i }}{% endfor %}" + + +class Jinja2Test(TestCase): + + def setUp(self): + super(Jinja2Test, self).setUp() + # Create a "templates directory" in temp + self.templates_dir = tempfile.mkdtemp() + + def tearDown(self): + super(Jinja2Test, self).tearDown() + # Remove the temporary directory so as not to pollute /tmp + rmtree(self.templates_dir) + + def _write_template_to_file(self, name, contents): + path = os.path.join(self.templates_dir, name) + with open(path, "w") as thefile: + thefile.write(contents) + + def test_render_simple_template(self): + name = "simple" + self._write_template_to_file(name, SIMPLE_TEMPLATE) + expected = "hello" + result = render( + name, {"somevar": expected}, template_dir=self.templates_dir) + self.assertEqual(expected, result) + + def test_render_loop_template(self): + name = "loop" + self._write_template_to_file(name, LOOP_TEMPLATE) + expected = "12345" + result = render( + name, {"somevar": ["1", "2", "3", "4", "5"]}, + template_dir=self.templates_dir) + self.assertEqual(expected, result) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_pyformat.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_pyformat.py new file mode 100644 index 0000000000000000000000000000000000000000..0873a4ae25672b19101872c2ba98ca9586e37ae2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/templating/test_pyformat.py @@ -0,0 +1,36 @@ +from mock import patch +from testtools import TestCase + +from charmhelpers.contrib.templating.pyformat import render +from charmhelpers.core import hookenv + + +class PyFormatTest(TestCase): + @patch.object(hookenv, 'execution_environment') + def test_renders_using_environment(self, execution_environment): + execution_environment.return_value = { + 'foo': 'FOO', + } + + self.assertEqual(render('foo is {foo}'), 'foo is FOO') + + @patch.object(hookenv, 'execution_environment') + def test_extra_overrides(self, execution_environment): + execution_environment.return_value = { + 'foo': 'FOO', + } + + extra = {'foo': 'BAR'} + + self.assertEqual(render('foo is {foo}', extra=extra), 'foo is BAR') + + @patch.object(hookenv, 'execution_environment') + def test_kwargs_overrides(self, execution_environment): + execution_environment.return_value = { + 'foo': 'FOO', + } + + extra = {'foo': 'BAR'} + + self.assertEqual( + render('foo is {foo}', extra=extra, foo='BAZ'), 'foo is BAZ') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/unison/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/unison/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/unison/test_unison.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/unison/test_unison.py new file mode 100644 index 0000000000000000000000000000000000000000..a68114c5716be87cb1b1504e6b7850f12644f1f5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/contrib/unison/test_unison.py @@ -0,0 +1,441 @@ + +from mock import call, patch, MagicMock, sentinel +from testtools import TestCase + +from tests.helpers import patch_open, FakeRelation +from charmhelpers.contrib import unison + + +FAKE_RELATION_ENV = { + 'cluster:0': ['cluster/0', 'cluster/1'] +} + + +TO_PATCH = [ + 'log', 'check_call', 'check_output', 'relation_ids', + 'related_units', 'relation_get', 'relation_set', + 'hook_name', 'unit_private_ip', +] + +FAKE_LOCAL_UNIT = 'test_host' +FAKE_RELATION = { + 'cluster:0': { + 'cluster/0': { + 'private-address': 'cluster0.local', + 'ssh_authorized_hosts': 'someotherhost:test_host' + }, + 'clsuter/1': { + 'private-address': 'cluster1.local', + 'ssh_authorized_hosts': 'someotherhost' + }, + 'clsuter/3': { + 'private-address': 'cluster2.local', + 'ssh_authorized_hosts': 'someotherthirdhost' + }, + + }, + +} + + +class UnisonHelperTests(TestCase): + def setUp(self): + super(UnisonHelperTests, self).setUp() + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + self.fake_relation = FakeRelation(FAKE_RELATION) + self.unit_private_ip.return_value = FAKE_LOCAL_UNIT + self.relation_get.side_effect = self.fake_relation.get + self.relation_ids.side_effect = self.fake_relation.relation_ids + self.related_units.side_effect = self.fake_relation.related_units + + def _patch(self, method): + _m = patch('charmhelpers.contrib.unison.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + @patch('pwd.getpwnam') + def test_get_homedir(self, pwnam): + fake_user = MagicMock() + fake_user.pw_dir = '/home/foo' + pwnam.return_value = fake_user + self.assertEquals(unison.get_homedir('foo'), + '/home/foo') + + @patch('pwd.getpwnam') + def test_get_homedir_no_user(self, pwnam): + e = KeyError + pwnam.side_effect = e + self.assertRaises(Exception, unison.get_homedir, user='foo') + + def _ensure_calls_in(self, calls): + for _call in calls: + self.assertIn(call(_call), self.check_call.call_args_list) + + @patch('os.path.isfile') + def test_create_private_key_rsa(self, isfile): + create_cmd = [ + 'ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', + '-f', '/home/foo/.ssh/id_rsa'] + + def _ensure_perms(): + cmds = [ + ['chown', 'foo', '/home/foo/.ssh/id_rsa'], + ['chmod', '0600', '/home/foo/.ssh/id_rsa'], + ] + self._ensure_calls_in(cmds) + + isfile.return_value = False + unison.create_private_key( + user='foo', priv_key_path='/home/foo/.ssh/id_rsa') + self.assertIn(call(create_cmd), self.check_call.call_args_list) + _ensure_perms() + self.check_call.call_args_list = [] + + isfile.return_value = True + unison.create_private_key( + user='foo', priv_key_path='/home/foo/.ssh/id_rsa') + self.assertNotIn(call(create_cmd), self.check_call.call_args_list) + _ensure_perms() + + @patch('os.path.isfile') + def test_create_private_key_ecdsa(self, isfile): + create_cmd = [ + 'ssh-keygen', '-q', '-N', '', '-t', 'ecdsa', '-b', '521', + '-f', '/home/foo/.ssh/id_ecdsa'] + + def _ensure_perms(): + cmds = [ + ['chown', 'foo', '/home/foo/.ssh/id_ecdsa'], + ['chmod', '0600', '/home/foo/.ssh/id_ecdsa'], + ] + self._ensure_calls_in(cmds) + + isfile.return_value = False + unison.create_private_key( + user='foo', + priv_key_path='/home/foo/.ssh/id_ecdsa', + key_type='ecdsa') + self.assertIn(call(create_cmd), self.check_call.call_args_list) + _ensure_perms() + self.check_call.call_args_list = [] + + isfile.return_value = True + unison.create_private_key( + user='foo', + priv_key_path='/home/foo/.ssh/id_ecdsa', + key_type='ecdsa') + self.assertNotIn(call(create_cmd), self.check_call.call_args_list) + _ensure_perms() + + @patch('os.path.isfile') + def test_create_public_key(self, isfile): + create_cmd = ['ssh-keygen', '-y', '-f', '/home/foo/.ssh/id_rsa'] + isfile.return_value = True + unison.create_public_key( + user='foo', priv_key_path='/home/foo/.ssh/id_rsa', + pub_key_path='/home/foo/.ssh/id_rsa.pub') + self.assertNotIn(call(create_cmd), self.check_output.call_args_list) + + isfile.return_value = False + with patch_open() as (_open, _file): + self.check_output.return_value = b'fookey' + unison.create_public_key( + user='foo', priv_key_path='/home/foo/.ssh/id_rsa', + pub_key_path='/home/foo/.ssh/id_rsa.pub') + self.assertIn(call(create_cmd), self.check_output.call_args_list) + _open.assert_called_with('/home/foo/.ssh/id_rsa.pub', 'wb') + _file.write.assert_called_with(b'fookey') + + @patch('os.mkdir') + @patch('os.path.isdir') + @patch.object(unison, 'get_homedir') + @patch.multiple(unison, create_private_key=MagicMock(), + create_public_key=MagicMock()) + def test_get_keypair(self, get_homedir, isdir, mkdir): + get_homedir.return_value = '/home/foo' + isdir.return_value = False + with patch_open() as (_open, _file): + _file.read.side_effect = [ + 'foopriv', 'foopub' + ] + priv, pub = unison.get_keypair('adam') + for f in ['/home/foo/.ssh/id_rsa', + '/home/foo/.ssh/id_rsa.pub']: + self.assertIn(call(f, 'r'), _open.call_args_list) + self.assertEquals(priv, 'foopriv') + self.assertEquals(pub, 'foopub') + + @patch.object(unison, 'get_homedir') + def test_write_auth_keys(self, get_homedir): + get_homedir.return_value = '/home/foo' + keys = [ + 'ssh-rsa AAAB3Nz adam', + 'ssh-rsa ALKJFz adam@whereschuck.org', + ] + with patch_open() as (_open, _file): + unison.write_authorized_keys('foo', keys) + _open.assert_called_with('/home/foo/.ssh/authorized_keys', 'w') + for k in keys: + self.assertIn(call('%s\n' % k), _file.write.call_args_list) + + @patch.object(unison, 'get_homedir') + def test_write_known_hosts(self, get_homedir): + get_homedir.return_value = '/home/foo' + keys = [ + '10.0.0.1 ssh-rsa KJDSJF=', + '10.0.0.2 ssh-rsa KJDSJF=', + ] + self.check_output.side_effect = keys + with patch_open() as (_open, _file): + unison.write_known_hosts('foo', ['10.0.0.1', '10.0.0.2']) + _open.assert_called_with('/home/foo/.ssh/known_hosts', 'w') + for k in keys: + self.assertIn(call('%s\n' % k), _file.write.call_args_list) + + @patch.object(unison, 'remove_password_expiry') + @patch.object(unison, 'pwgen') + @patch.object(unison, 'add_user_to_group') + @patch.object(unison, 'adduser') + def test_ensure_user(self, adduser, to_group, pwgen, + remove_password_expiry): + pwgen.return_value = sentinel.password + unison.ensure_user('foo', group='foobar') + adduser.assert_called_with('foo', sentinel.password) + to_group.assert_called_with('foo', 'foobar') + remove_password_expiry.assert_called_with('foo') + + @patch.object(unison, '_run_as_user') + def test_run_as_user(self, _run): + with patch.object(unison, '_run_as_user') as _run: + fake_preexec = MagicMock() + _run.return_value = fake_preexec + unison.run_as_user('foo', ['echo', 'foo']) + self.check_output.assert_called_with( + ['echo', 'foo'], preexec_fn=fake_preexec, cwd='/') + + @patch('pwd.getpwnam') + def test_run_user_not_found(self, getpwnam): + e = KeyError + getpwnam.side_effect = e + self.assertRaises(Exception, unison._run_as_user, 'nouser') + + @patch('os.setuid') + @patch('os.setgid') + @patch('os.environ', spec=dict) + @patch('pwd.getpwnam') + def test_run_as_user_preexec(self, pwnam, environ, setgid, setuid): + fake_env = {'HOME': '/root'} + environ.__getitem__ = MagicMock() + environ.__setitem__ = MagicMock() + environ.__setitem__.side_effect = fake_env.__setitem__ + environ.__getitem__.side_effect = fake_env.__getitem__ + + fake_user = MagicMock() + fake_user.pw_uid = 1010 + fake_user.pw_gid = 1011 + fake_user.pw_dir = '/home/foo' + pwnam.return_value = fake_user + inner = unison._run_as_user('foo') + self.assertEquals(fake_env['HOME'], '/home/foo') + inner() + setgid.assert_called_with(1011) + setuid.assert_called_with(1010) + + @patch('os.setuid') + @patch('os.setgid') + @patch('os.environ', spec=dict) + @patch('pwd.getpwnam') + def test_run_as_user_preexec_with_group(self, pwnam, environ, setgid, setuid): + fake_env = {'HOME': '/root'} + environ.__getitem__ = MagicMock() + environ.__setitem__ = MagicMock() + environ.__setitem__.side_effect = fake_env.__setitem__ + environ.__getitem__.side_effect = fake_env.__getitem__ + + fake_user = MagicMock() + fake_user.pw_uid = 1010 + fake_user.pw_gid = 1011 + fake_user.pw_dir = '/home/foo' + fake_group_id = 2000 + pwnam.return_value = fake_user + inner = unison._run_as_user('foo', gid=fake_group_id) + self.assertEquals(fake_env['HOME'], '/home/foo') + inner() + setgid.assert_called_with(2000) + setuid.assert_called_with(1010) + + @patch.object(unison, 'get_keypair') + @patch.object(unison, 'ensure_user') + def test_ssh_auth_peer_joined(self, ensure_user, get_keypair): + get_keypair.return_value = ('privkey', 'pubkey') + self.hook_name.return_value = 'cluster-relation-joined' + unison.ssh_authorized_peers(peer_interface='cluster', + user='foo', group='foo', + ensure_local_user=True) + self.relation_set.assert_called_with(ssh_pub_key='pubkey') + self.assertFalse(self.relation_get.called) + ensure_user.assert_called_with('foo', 'foo') + get_keypair.assert_called_with('foo') + + @patch.object(unison, 'write_known_hosts') + @patch.object(unison, 'write_authorized_keys') + @patch.object(unison, 'get_keypair') + @patch.object(unison, 'ensure_user') + def test_ssh_auth_peer_changed(self, ensure_user, get_keypair, + write_keys, write_hosts): + get_keypair.return_value = ('privkey', 'pubkey') + + self.hook_name.return_value = 'cluster-relation-changed' + + self.relation_get.side_effect = [ + 'key1', + 'host1', + 'key2', + 'host2', + '', '' + ] + unison.ssh_authorized_peers(peer_interface='cluster', + user='foo', group='foo', + ensure_local_user=True) + + ensure_user.assert_called_with('foo', 'foo') + get_keypair.assert_called_with('foo') + write_keys.assert_called_with('foo', ['key1', 'key2']) + write_hosts.assert_called_with('foo', ['host1', 'host2']) + self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2') + + @patch.object(unison, 'write_known_hosts') + @patch.object(unison, 'write_authorized_keys') + @patch.object(unison, 'get_keypair') + @patch.object(unison, 'ensure_user') + def test_ssh_auth_peer_departed(self, ensure_user, get_keypair, + write_keys, write_hosts): + get_keypair.return_value = ('privkey', 'pubkey') + + self.hook_name.return_value = 'cluster-relation-departed' + + self.relation_get.side_effect = [ + 'key1', + 'host1', + 'key2', + 'host2', + '', '' + ] + unison.ssh_authorized_peers(peer_interface='cluster', + user='foo', group='foo', + ensure_local_user=True) + + ensure_user.assert_called_with('foo', 'foo') + get_keypair.assert_called_with('foo') + write_keys.assert_called_with('foo', ['key1', 'key2']) + write_hosts.assert_called_with('foo', ['host1', 'host2']) + self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2') + + def test_collect_authed_hosts(self): + # only one of the hosts in fake environment has auth'd + # the local peer + hosts = unison.collect_authed_hosts('cluster') + self.assertEquals(hosts, ['cluster0.local']) + + def test_collect_authed_hosts_none_authed(self): + with patch.object(unison, 'relation_get') as relation_get: + relation_get.return_value = '' + hosts = unison.collect_authed_hosts('cluster') + self.assertEquals(hosts, []) + + @patch.object(unison, 'run_as_user') + def test_sync_path_to_host(self, run_as_user, verbose=True, gid=None): + for path in ['/tmp/foo', '/tmp/foo/']: + unison.sync_path_to_host(path=path, host='clusterhost1', + user='foo', verbose=verbose, gid=gid) + ex_cmd = ['unison', '-auto', '-batch=true', + '-confirmbigdel=false', '-fastcheck=true', + '-group=false', '-owner=false', + '-prefer=newer', '-times=true'] + if not verbose: + ex_cmd.append('-silent') + ex_cmd += ['/tmp/foo', 'ssh://foo@clusterhost1//tmp/foo'] + run_as_user.assert_called_with('foo', ex_cmd, gid) + + @patch.object(unison, 'run_as_user') + def test_sync_path_to_host_error(self, run_as_user): + for i, path in enumerate(['/tmp/foo', '/tmp/foo/']): + run_as_user.side_effect = Exception + if i == 0: + unison.sync_path_to_host(path=path, host='clusterhost1', + user='foo', verbose=True, gid=None) + else: + self.assertRaises(Exception, unison.sync_path_to_host, + path=path, host='clusterhost1', + user='foo', verbose=True, gid=None, + fatal=True) + + ex_cmd = ['unison', '-auto', '-batch=true', + '-confirmbigdel=false', '-fastcheck=true', + '-group=false', '-owner=false', + '-prefer=newer', '-times=true', + '/tmp/foo', 'ssh://foo@clusterhost1//tmp/foo'] + run_as_user.assert_called_with('foo', ex_cmd, None) + + def test_sync_path_to_host_non_verbose(self): + return self.test_sync_path_to_host(verbose=False) + + def test_sync_path_to_host_with_gid(self): + return self.test_sync_path_to_host(gid=111) + + @patch.object(unison, 'sync_path_to_host') + def test_sync_to_peer(self, sync_path_to_host): + paths = ['/tmp/foo1', '/tmp/foo2'] + host = 'host1' + unison.sync_to_peer(host, 'foouser', paths, True) + calls = [call('/tmp/foo1', host, 'foouser', True, None, None, False), + call('/tmp/foo2', host, 'foouser', True, None, None, False)] + sync_path_to_host.assert_has_calls(calls) + + @patch.object(unison, 'sync_path_to_host') + def test_sync_to_peer_with_gid(self, sync_path_to_host): + paths = ['/tmp/foo1', '/tmp/foo2'] + host = 'host1' + unison.sync_to_peer(host, 'foouser', paths, True, gid=111) + calls = [call('/tmp/foo1', host, 'foouser', True, None, 111, False), + call('/tmp/foo2', host, 'foouser', True, None, 111, False)] + sync_path_to_host.assert_has_calls(calls) + + @patch.object(unison, 'collect_authed_hosts') + @patch.object(unison, 'sync_to_peer') + def test_sync_to_peers(self, sync_to_peer, collect_hosts): + collect_hosts.return_value = ['host1', 'host2', 'host3'] + paths = ['/tmp/foo'] + unison.sync_to_peers(peer_interface='cluster', user='foouser', + paths=paths, verbose=True) + calls = [call('host1', 'foouser', ['/tmp/foo'], True, None, None, False), + call('host2', 'foouser', ['/tmp/foo'], True, None, None, False), + call('host3', 'foouser', ['/tmp/foo'], True, None, None, False)] + sync_to_peer.assert_has_calls(calls) + + @patch.object(unison, 'collect_authed_hosts') + @patch.object(unison, 'sync_to_peer') + def test_sync_to_peers_with_gid(self, sync_to_peer, collect_hosts): + collect_hosts.return_value = ['host1', 'host2', 'host3'] + paths = ['/tmp/foo'] + unison.sync_to_peers(peer_interface='cluster', user='foouser', + paths=paths, verbose=True, gid=111) + calls = [call('host1', 'foouser', ['/tmp/foo'], True, None, 111, False), + call('host2', 'foouser', ['/tmp/foo'], True, None, 111, False), + call('host3', 'foouser', ['/tmp/foo'], True, None, 111, False)] + sync_to_peer.assert_has_calls(calls) + + @patch.object(unison, 'collect_authed_hosts') + @patch.object(unison, 'sync_to_peer') + def test_sync_to_peers_with_cmd(self, sync_to_peer, collect_hosts): + collect_hosts.return_value = ['host1', 'host2', 'host3'] + paths = ['/tmp/foo'] + cmd = ['dummy_cmd'] + unison.sync_to_peers(peer_interface='cluster', user='foouser', + paths=paths, verbose=True, cmd=cmd, gid=111) + calls = [call('host1', 'foouser', ['/tmp/foo'], True, cmd, 111, False), + call('host2', 'foouser', ['/tmp/foo'], True, cmd, 111, False), + call('host3', 'foouser', ['/tmp/foo'], True, cmd, 111, False)] + sync_to_peer.assert_has_calls(calls) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/coordinator/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/coordinator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/coordinator/test_coordinator.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/coordinator/test_coordinator.py new file mode 100644 index 0000000000000000000000000000000000000000..a57850a406c434eb9fb2a76336cfc9c9bc2c3eaf --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/coordinator/test_coordinator.py @@ -0,0 +1,533 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from datetime import datetime, timedelta +import json +import tempfile +import unittest +from mock import call, MagicMock, patch, sentinel + +from charmhelpers import coordinator +from charmhelpers.core import hookenv + + +class TestCoordinator(unittest.TestCase): + + def setUp(self): + del hookenv._atstart[:] + del hookenv._atexit[:] + hookenv.cache.clear() + coordinator.Singleton._instances.clear() + + def install(patch): + patch.start() + self.addCleanup(patch.stop) + + install(patch.object(hookenv, 'local_unit', return_value='foo/1')) + install(patch.object(hookenv, 'is_leader', return_value=False)) + install(patch.object(hookenv, 'metadata', + return_value={'peers': {'cluster': None}})) + install(patch.object(hookenv, 'log')) + + # Ensure _timestamp always increases. + install(patch.object(coordinator, '_utcnow', + side_effect=self._utcnow)) + + _last_utcnow = datetime(2015, 1, 1, 00, 00) + + def _utcnow(self, ts=coordinator._timestamp): + self._last_utcnow += timedelta(minutes=1) + return self._last_utcnow + + def test_is_singleton(self): + # BaseCoordinator and subclasses are singletons. Placing this + # burden on charm authors is impractical, particularly if + # libraries start wanting to use coordinator instances. + # With singletons, we don't need to worry about sharing state + # between instances or have them stomping on each other when they + # need to serialize their state. + self.assertTrue(coordinator.BaseCoordinator() + is coordinator.BaseCoordinator()) + self.assertTrue(coordinator.Serial() is coordinator.Serial()) + self.assertFalse(coordinator.BaseCoordinator() is coordinator.Serial()) + + @patch.object(hookenv, 'atstart') + def test_implicit_initialize_and_handle(self, atstart): + # When you construct a BaseCoordinator(), its initialize() and + # handle() method are invoked automatically every hook. This + # is done using hookenv.atstart + c = coordinator.BaseCoordinator() + atstart.assert_has_calls([call(c.initialize), call(c.handle)]) + + @patch.object(hookenv, 'has_juju_version', return_value=False) + def test_initialize_enforces_juju_version(self, has_juju_version): + c = coordinator.BaseCoordinator() + with self.assertRaises(AssertionError): + c.initialize() + has_juju_version.assert_called_once_with('1.23') + + @patch.object(hookenv, 'atexit') + @patch.object(hookenv, 'has_juju_version', return_value=True) + @patch.object(hookenv, 'relation_ids') + def test_initialize(self, relation_ids, ver, atexit): + # First initialization are done before there is a peer relation. + relation_ids.return_value = [] + c = coordinator.BaseCoordinator() + + with patch.object(c, '_load_state') as _load_state, \ + patch.object(c, '_emit_state') as _emit_state: # IGNORE: E127 + c.initialize() + _load_state.assert_called_once_with() + _emit_state.assert_called_once_with() + + self.assertEqual(c.relname, 'cluster') + self.assertIsNone(c.relid) + relation_ids.assert_called_once_with('cluster') + + # Methods installed to save state and release locks if the + # hook is successful. + atexit.assert_has_calls([call(c._save_state), + call(c._release_granted)]) + + # If we have a peer relation, the id is stored. + relation_ids.return_value = ['cluster:1'] + c = coordinator.BaseCoordinator() + with patch.object(c, '_load_state'), patch.object(c, '_emit_state'): + c.initialize() + self.assertEqual(c.relid, 'cluster:1') + + # If we are already initialized, nothing happens. + c.grants = {} + c.requests = {} + c.initialize() + + def test_acquire(self): + c = coordinator.BaseCoordinator() + lock = 'mylock' + c.grants = {} + c.requests = {hookenv.local_unit(): {}} + + # We are not the leader, so first acquire will return False. + self.assertFalse(c.acquire(lock)) + + # But the request is in the queue. + self.assertTrue(c.requested(lock)) + ts = c.request_timestamp(lock) + + # A further attempts at acquiring the lock do nothing, + # and the timestamp of the request remains unchanged. + self.assertFalse(c.acquire(lock)) + self.assertEqual(ts, c.request_timestamp(lock)) + + # Once the leader has granted the lock, acquire returns True. + with patch.object(c, 'granted') as granted: + granted.return_value = True + self.assertTrue(c.acquire(lock)) + granted.assert_called_once_with(lock) + + def test_acquire_leader(self): + # When acquire() is called by the leader, it needs + # to make a grant decision immediately. It can't defer + # making the decision until a future hook, as no future + # hooks will be triggered. + hookenv.is_leader.return_value = True + c = coordinator.Serial() # Not Base. Test hooks into default_grant. + lock = 'mylock' + unit = hookenv.local_unit() + c.grants = {} + c.requests = {unit: {}} + with patch.object(c, 'default_grant') as default_grant: + default_grant.side_effect = iter([False, True]) + + self.assertFalse(c.acquire(lock)) + ts = c.request_timestamp(lock) + + self.assertTrue(c.acquire(lock)) + self.assertEqual(ts, c.request_timestamp(lock)) + + # If it it granted, the leader doesn't make a decision again. + self.assertTrue(c.acquire(lock)) + self.assertEqual(ts, c.request_timestamp(lock)) + + self.assertEqual(default_grant.call_count, 2) + + def test_granted(self): + c = coordinator.BaseCoordinator() + unit = hookenv.local_unit() + lock = 'mylock' + ts = coordinator._timestamp() + c.grants = {} + + # Unit makes a request, but it isn't granted + c.requests = {unit: {lock: ts}} + self.assertFalse(c.granted(lock)) + + # Once the leader has granted the request, all good. + # It does this by mirroring the request timestamp. + c.grants = {unit: {lock: ts}} + self.assertTrue(c.granted(lock)) + + # The unit releases the lock by removing the request. + c.requests = {unit: {}} + self.assertFalse(c.granted(lock)) + + # If the unit makes a new request before the leader + # has had a chance to do its housekeeping, the timestamps + # do not match and the lock not considered granted. + ts = coordinator._timestamp() + c.requests = {unit: {lock: ts}} + self.assertFalse(c.granted(lock)) + + # Until the leader gets around to its duties. + c.grants = {unit: {lock: ts}} + self.assertTrue(c.granted(lock)) + + def test_requested(self): + c = coordinator.BaseCoordinator() + lock = 'mylock' + c.requests = {hookenv.local_unit(): {}} + c.grants = {} + + self.assertFalse(c.requested(lock)) + c.acquire(lock) + self.assertTrue(c.requested(lock)) + + def test_request_timestamp(self): + c = coordinator.BaseCoordinator() + lock = 'mylock' + unit = hookenv.local_unit() + + c.requests = {unit: {}} + c.grants = {} + self.assertIsNone(c.request_timestamp(lock)) + + now = datetime.utcnow() + fmt = coordinator._timestamp_format + c.requests = {hookenv.local_unit(): {lock: now.strftime(fmt)}} + + self.assertEqual(c.request_timestamp(lock), now) + + def test_handle_not_leader(self): + c = coordinator.BaseCoordinator() + # If we are not the leader, handle does nothing. We know this, + # because without mocks or initialization it would otherwise crash. + c.handle() + + def test_handle(self): + hookenv.is_leader.return_value = True + lock = 'mylock' + c = coordinator.BaseCoordinator() + c.relid = 'cluster:1' + + ts = coordinator._timestamp + ts1, ts2, ts3 = ts(), ts(), ts() + + # Grant one of these requests. + requests = {'foo/1': {lock: ts1}, + 'foo/2': {lock: ts2}, + 'foo/3': {lock: ts3}} + c.requests = requests.copy() + # Because the existing grant should be released. + c.grants = {'foo/2': {lock: ts()}} # No request, release. + + with patch.object(c, 'grant') as grant: + c.handle() + + # The requests are unchanged. This is normally state on the + # peer relation, and only the units themselves can change it. + self.assertDictEqual(requests, c.requests) + + # The grant without a corresponding requests was released. + self.assertDictEqual({'foo/2': {}}, c.grants) + + # A potential grant was made for each of the outstanding requests. + grant.assert_has_calls([call(lock, 'foo/1'), + call(lock, 'foo/2'), + call(lock, 'foo/3')], any_order=True) + + def test_grant_not_leader(self): + c = coordinator.BaseCoordinator() + c.grant(sentinel.whatever, sentinel.whatever) # Nothing happens. + + def test_grant(self): + hookenv.is_leader.return_value = True + c = coordinator.BaseCoordinator() + c.default_grant = MagicMock() + c.grant_other = MagicMock() + + ts = coordinator._timestamp + ts1, ts2 = ts(), ts() + + c.requests = {'foo/1': {'mylock': ts1, 'other': ts()}, + 'foo/2': {'mylock': ts2}, + 'foo/3': {'mylock': ts()}} + grants = {'foo/1': {'mylock': ts1}} + c.grants = grants.copy() + + # foo/1 already has a granted mylock, so returns True. + self.assertTrue(c.grant('mylock', 'foo/1')) + + # foo/2 does not have a granted mylock. default_grant will + # be called to make a decision (no) + c.default_grant.return_value = False + self.assertFalse(c.grant('mylock', 'foo/2')) + self.assertDictEqual(grants, c.grants) + c.default_grant.assert_called_once_with('mylock', 'foo/2', + set(['foo/1']), + ['foo/2', 'foo/3']) + c.default_grant.reset_mock() + + # Lets say yes. + c.default_grant.return_value = True + self.assertTrue(c.grant('mylock', 'foo/2')) + grants = {'foo/1': {'mylock': ts1}, 'foo/2': {'mylock': ts2}} + self.assertDictEqual(grants, c.grants) + c.default_grant.assert_called_once_with('mylock', 'foo/2', + set(['foo/1']), + ['foo/2', 'foo/3']) + + # The other lock has custom logic, in the form of the overridden + # grant_other method. + c.grant_other.return_value = False + self.assertFalse(c.grant('other', 'foo/1')) + c.grant_other.assert_called_once_with('other', 'foo/1', + set(), ['foo/1']) + + # If there is no request, grant returns False + c.grant_other.return_value = True + self.assertFalse(c.grant('other', 'foo/2')) + + def test_released(self): + c = coordinator.BaseCoordinator() + with patch.object(c, 'msg') as msg: + c.released('foo/2', 'mylock', coordinator._utcnow()) + expected = 'Leader released mylock from foo/2, held 0:01:00' + msg.assert_called_once_with(expected) + + def test_require(self): + c = coordinator.BaseCoordinator() + c.acquire = MagicMock() + c.granted = MagicMock() + guard = MagicMock() + + wrapped = MagicMock() + + @c.require('mylock', guard) + def func(*args, **kw): + wrapped(*args, **kw) + + # If the lock is granted, the wrapped function is called. + c.granted.return_value = True + func(arg=True) + wrapped.assert_called_once_with(arg=True) + wrapped.reset_mock() + + # If the lock is not granted, and the guard returns False, + # the lock is not acquired. + c.acquire.return_value = False + c.granted.return_value = False + guard.return_value = False + func() + self.assertFalse(wrapped.called) + self.assertFalse(c.acquire.called) + + # If the lock is not granted, and the guard returns True, + # the lock is acquired. But the function still isn't called if + # it cannot be acquired immediately. + guard.return_value = True + func() + self.assertFalse(wrapped.called) + c.acquire.assert_called_once_with('mylock') + + # Finally, if the lock is not granted, and the guard returns True, + # and the lock acquired immediately, the function is called. + c.acquire.return_value = True + func(sentinel.arg) + wrapped.assert_called_once_with(sentinel.arg) + + def test_msg(self): + c = coordinator.BaseCoordinator() + # Just a wrapper around hookenv.log + c.msg('hi') + hookenv.log.assert_called_once_with('coordinator.BaseCoordinator hi', + level=hookenv.INFO) + + def test_name(self): + # We use the class name in a few places to avoid conflicts. + # We assume we won't be using multiple BaseCoordinator subclasses + # with the same name at the same time. + c = coordinator.BaseCoordinator() + self.assertEqual(c._name(), 'BaseCoordinator') + c = coordinator.Serial() + self.assertEqual(c._name(), 'Serial') + + @patch.object(hookenv, 'leader_get') + def test_load_state(self, leader_get): + c = coordinator.BaseCoordinator() + unit = hookenv.local_unit() + + # c.granted is just the leader_get decoded. + leader_get.return_value = '{"json": true}' + c._load_state() + self.assertDictEqual(c.grants, {'json': True}) + + # With no relid, there is no peer relation so request state + # is pulled from a local stash. + with patch.object(c, '_load_local_state') as loc_state: + loc_state.return_value = {'local': True} + c._load_state() + self.assertDictEqual(c.requests, {unit: {'local': True}}) + + # With a relid, request details are pulled from the peer relation. + # If there is no data in the peer relation from the local unit, + # we still pull it from the local stash as it means this is the + # first time we have joined. + c.relid = 'cluster:1' + with patch.object(c, '_load_local_state') as loc_state, \ + patch.object(c, '_load_peer_state') as peer_state: + loc_state.return_value = {'local': True} + peer_state.return_value = {'foo/2': {'mylock': 'whatever'}} + c._load_state() + self.assertDictEqual(c.requests, {unit: {'local': True}, + 'foo/2': {'mylock': 'whatever'}}) + + # If there are local details in the peer relation, the local + # stash is ignored. + with patch.object(c, '_load_local_state') as loc_state, \ + patch.object(c, '_load_peer_state') as peer_state: + loc_state.return_value = {'local': True} + peer_state.return_value = {unit: {}, + 'foo/2': {'mylock': 'whatever'}} + c._load_state() + self.assertDictEqual(c.requests, {unit: {}, + 'foo/2': {'mylock': 'whatever'}}) + + def test_emit_state(self): + c = coordinator.BaseCoordinator() + unit = hookenv.local_unit() + c.requests = {unit: {'lock_a': sentinel.ts, + 'lock_b': sentinel.ts, + 'lock_c': sentinel.ts}} + c.grants = {unit: {'lock_a': sentinel.ts, + 'lock_b': sentinel.ts2}} + with patch.object(c, 'msg') as msg: + c._emit_state() + msg.assert_has_calls([call('Granted lock_a'), + call('Waiting on lock_b'), + call('Waiting on lock_c')], + any_order=True) + + @patch.object(hookenv, 'relation_set') + @patch.object(hookenv, 'leader_set') + def test_save_state(self, leader_set, relation_set): + c = coordinator.BaseCoordinator() + unit = hookenv.local_unit() + c.grants = {'directdump': True} + c.requests = {unit: 'data1', 'foo/2': 'data2'} + + # grants is dumped to leadership settings, if the unit is leader. + with patch.object(c, '_save_local_state') as save_loc: + c._save_state() + self.assertFalse(leader_set.called) + hookenv.is_leader.return_value = True + c._save_state() + leader_set.assert_called_once_with({c.key: '{"directdump": true}'}) + + # If there is no relation id, the local units requests is dumped + # to a local stash. + with patch.object(c, '_save_local_state') as save_loc: + c._save_state() + save_loc.assert_called_once_with('data1') + + # If there is a relation id, the local units requests is dumped + # to the peer relation. + with patch.object(c, '_save_local_state') as save_loc: + c.relid = 'cluster:1' + c._save_state() + self.assertFalse(save_loc.called) + relation_set.assert_called_once_with( + c.relid, relation_settings={c.key: '"data1"'}) # JSON encoded + + @patch.object(hookenv, 'relation_get') + @patch.object(hookenv, 'related_units') + def test_load_peer_state(self, related_units, relation_get): + # Standard relation-get loops, decoding results from JSON. + c = coordinator.BaseCoordinator() + c.key = sentinel.key + c.relid = sentinel.relid + related_units.return_value = ['foo/2', 'foo/3'] + d = {'foo/1': {'foo/1': True}, + 'foo/2': {'foo/2': True}, + 'foo/3': {'foo/3': True}} + + def _get(key, unit, relid): + assert key == sentinel.key + assert relid == sentinel.relid + return json.dumps(d[unit]) + relation_get.side_effect = _get + + self.assertDictEqual(c._load_peer_state(), d) + + def test_local_state_filename(self): + c = coordinator.BaseCoordinator() + self.assertEqual(c._local_state_filename(), + '.charmhelpers.coordinator.BaseCoordinator') + + def test_load_local_state(self): + c = coordinator.BaseCoordinator() + with tempfile.NamedTemporaryFile(mode='w') as f: + with patch.object(c, '_local_state_filename') as fn: + fn.return_value = f.name + d = 'some data' + json.dump(d, f) + f.flush() + d2 = c._load_local_state() + self.assertEqual(d, d2) + + def test_save_local_state(self): + c = coordinator.BaseCoordinator() + with tempfile.NamedTemporaryFile(mode='r') as f: + with patch.object(c, '_local_state_filename') as fn: + fn.return_value = f.name + c._save_local_state('some data') + self.assertEqual(json.load(f), 'some data') + + def test_release_granted(self): + c = coordinator.BaseCoordinator() + unit = hookenv.local_unit() + c.requests = {unit: {'lock1': sentinel.ts, 'lock2': sentinel.ts}, + 'foo/2': {'lock1': sentinel.ts}} + c.grants = {unit: {'lock1': sentinel.ts}, + 'foo/2': {'lock1': sentinel.ts}} + # The granted lock for the local unit is released. + c._release_granted() + self.assertDictEqual(c.requests, {unit: {'lock2': sentinel.ts}, + 'foo/2': {'lock1': sentinel.ts}}) + + def test_implicit_peer_relation_name(self): + self.assertEqual(coordinator._implicit_peer_relation_name(), + 'cluster') + + def test_default_grant(self): + c = coordinator.Serial() + # Lock not granted. First in the queue. + self.assertTrue(c.default_grant(sentinel.lock, sentinel.u1, + set(), [sentinel.u1, sentinel.u2])) + + # Lock not granted. Later in the queue. + self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1, + set(), [sentinel.u2, sentinel.u1])) + + # Lock already granted + self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1, + set([sentinel.u2]), [sentinel.u1])) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/cloud_controller_ng.yml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/cloud_controller_ng.yml new file mode 100644 index 0000000000000000000000000000000000000000..7f72f899dc890e3604e7bac51659ad09915c6c6c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/cloud_controller_ng.yml @@ -0,0 +1,173 @@ +--- +# TODO cc_ip cc public ip +local_route: {{ domain }} +port: {{ cc_port }} +pid_filename: /var/vcap/sys/run/cloud_controller_ng/cloud_controller_ng.pid +development_mode: false + +message_bus_servers: + - nats://{{ nats['user'] }}:{{ nats['password'] }}@{{ nats['address'] }}:{{ nats['port'] }} + +external_domain: + - api.{{ domain }} + +system_domain_organization: {{ default_organization }} +system_domain: {{ domain }} +app_domains: [ {{ domain }} ] +srv_api_uri: http://api.{{ domain }} + +default_app_memory: 1024 + +cc_partition: default + +bootstrap_admin_email: admin@{{ default_organization }} + +bulk_api: + auth_user: bulk_api + auth_password: "Password" + +nginx: + use_nginx: false + instance_socket: "/var/vcap/sys/run/cloud_controller_ng/cloud_controller.sock" + +index: 1 +name: cloud_controller_ng + +info: + name: vcap + build: "2222" + version: 2 + support_address: http://support.cloudfoundry.com + description: Cloud Foundry sponsored by Pivotal + api_version: 2.0.0 + + +directories: + tmpdir: /var/vcap/data/cloud_controller_ng/tmp + + +logging: + file: /var/vcap/sys/log/cloud_controller_ng/cloud_controller_ng.log + + syslog: vcap.cloud_controller_ng + + level: debug2 + max_retries: 1 + + + + + +db: &db + database: sqlite:///var/lib/cloudfoundry/cfcloudcontroller/db/cc.db + max_connections: 25 + pool_timeout: 10 + log_level: debug2 + + +login: + url: http://uaa.{{ domain }} + +uaa: + url: http://uaa.{{ domain }} + resource_id: cloud_controller + #symmetric_secret: cc-secret + verification_key: | + -----BEGIN PUBLIC KEY----- + MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDHFr+KICms+tuT1OXJwhCUmR2d + KVy7psa8xzElSyzqx7oJyfJ1JZyOzToj9T5SfTIq396agbHJWVfYphNahvZ/7uMX + qHxf+ZH9BL1gk9Y6kCnbM5R60gfwjyW1/dQPjOzn9N394zd2FJoFHwdq9Qs0wBug + spULZVNRxq7veq/fzwIDAQAB + -----END PUBLIC KEY----- + +# App staging parameters +staging: + max_staging_runtime: 900 + auth: + user: + password: "Password" + +maximum_health_check_timeout: 180 + +runtimes_file: /var/lib/cloudfoundry/cfcloudcontroller/jobs/config/runtimes.yml +stacks_file: /var/lib/cloudfoundry/cfcloudcontroller/jobs/config/stacks.yml + +quota_definitions: + free: + non_basic_services_allowed: false + total_services: 2 + total_routes: 1000 + memory_limit: 1024 + paid: + non_basic_services_allowed: true + total_services: 32 + total_routes: 1000 + memory_limit: 204800 + runaway: + non_basic_services_allowed: true + total_services: 500 + total_routes: 1000 + memory_limit: 204800 + trial: + non_basic_services_allowed: false + total_services: 10 + memory_limit: 2048 + total_routes: 1000 + trial_db_allowed: true + +default_quota_definition: free + +resource_pool: + minimum_size: 65536 + maximum_size: 536870912 + resource_directory_key: cc-resources + + cdn: + uri: + key_pair_id: + private_key: "" + + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} + +packages: + app_package_directory_key: cc-packages + + cdn: + uri: + key_pair_id: + private_key: "" + + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} + +droplets: + droplet_directory_key: cc-droplets + + cdn: + uri: + key_pair_id: + private_key: "" + + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} + +buildpacks: + buildpack_directory_key: cc-buildpacks + + cdn: + uri: + key_pair_id: + private_key: "" + + fog_connection: {"provider":"Local","local_root":"/var/vcap/nfs/store"} + +db_encryption_key: Password + +trial_db: + guid: "78ad16cf-3c22-4427-a982-b9d35d746914" + +tasks_disabled: false +hm9000_noop: true +flapping_crash_count_threshold: 3 + +disable_custom_buildpacks: false + +broker_client_timeout_seconds: 60 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/fake_cc.yml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/fake_cc.yml new file mode 100644 index 0000000000000000000000000000000000000000..5e3f8b6cd17d1c0426c8d3ae11265da881796104 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/fake_cc.yml @@ -0,0 +1,3 @@ +host: {{nats['host']}} +port: {{nats['port']}} +domain: {{router['domain']}} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/nginx.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/nginx.conf new file mode 100644 index 0000000000000000000000000000000000000000..95e02bf3c85564e768e810e86995deb1a3bc359a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/nginx.conf @@ -0,0 +1,154 @@ +# deployment cloudcontroller nginx.conf +#user vcap vcap; + +error_log /var/vcap/sys/log/nginx_ccng/nginx.error.log; +pid /var/vcap/sys/run/nginx_ccng/nginx.pid; + +events { + worker_connections 8192; + use epoll; +} + +http { + include mime.types; + default_type text/html; + server_tokens off; + variables_hash_max_size 1024; + + log_format main '$host - [$time_local] ' + '"$request" $status $bytes_sent ' + '"$http_referer" "$http_#user_agent" ' + '$proxy_add_x_forwarded_for response_time:$upstream_response_time'; + + access_log /var/vcap/sys/log/nginx_ccng/nginx.access.log main; + + sendfile on; #enable use of sendfile() + tcp_nopush on; + tcp_nodelay on; #disable nagel's algorithm + + keepalive_timeout 75 20; #inherited from router + + client_max_body_size 256M; #already enforced upstream/but doesn't hurt. + + upstream cloud_controller { + server unix:/var/vcap/sys/run/cloud_controller_ng/cloud_controller.sock; + } + + server { + listen {{ nginx_port }}; + server_name _; + server_name_in_redirect off; + proxy_send_timeout 300; + proxy_read_timeout 300; + + # proxy and log all CC traffic + location / { + access_log /var/vcap/sys/log/nginx_ccng/nginx.access.log main; + proxy_buffering off; + proxy_set_header Host $host; + proxy_set_header X-Real_IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_redirect off; + proxy_connect_timeout 10; + proxy_pass http://cloud_controller; + } + + + # used for x-accel-redirect uri://location/foo.txt + # nginx will serve the file root || location || foo.txt + location /droplets/ { + internal; + root /var/vcap/nfs/store; + } + + + + # used for x-accel-redirect uri://location/foo.txt + # nginx will serve the file root || location || foo.txt + location /cc-packages/ { + internal; + root /var/vcap/nfs/store; + } + + + # used for x-accel-redirect uri://location/foo.txt + # nginx will serve the file root || location || foo.txt + location /cc-droplets/ { + internal; + root /var/vcap/nfs/store; + } + + + location ~ (/apps/.*/application|/v2/apps/.*/bits|/services/v\d+/configurations/.*/serialized/data|/v2/buildpacks/.*/bits) { + # Pass altered request body to this location + upload_pass @cc_uploads; + upload_pass_args on; + + # Store files to this directory + upload_store /var/vcap/data/cloud_controller_ng/tmp/uploads; + + # No limit for output body forwarded to CC + upload_max_output_body_len 0; + + # Allow uploaded files to be read only by #user + #upload_store_access #user:r; + + # Set specified fields in request body + upload_set_form_field "${upload_field_name}_name" $upload_file_name; + upload_set_form_field "${upload_field_name}_path" $upload_tmp_path; + + #forward the following fields from existing body + upload_pass_form_field "^resources$"; + upload_pass_form_field "^_method$"; + + #on any error, delete uploaded files. + upload_cleanup 400-505; + } + + location ~ /staging/(buildpack_cache|droplets)/.*/upload { + + # Allow download the droplets and buildpacks + if ($request_method = GET){ + proxy_pass http://cloud_controller; + } + + # Pass along auth header + set $auth_header $upstream_http_x_auth; + proxy_set_header Authorization $auth_header; + + # Pass altered request body to this location + upload_pass @cc_uploads; + + # Store files to this directory + upload_store /var/vcap/data/cloud_controller_ng/tmp/staged_droplet_uploads; + + # Allow uploaded files to be read only by #user + upload_store_access user:r; + + # Set specified fields in request body + upload_set_form_field "droplet_path" $upload_tmp_path; + + #on any error, delete uploaded files. + upload_cleanup 400-505; + } + + # Pass altered request body to a backend + location @cc_uploads { + proxy_pass http://unix:/var/vcap/sys/run/cloud_controller_ng/cloud_controller.sock; + } + + location ~ ^/internal_redirect/(.*){ + # only allow internal redirects + internal; + + set $download_url $1; + + #have to manualy pass along auth header + set $auth_header $upstream_http_x_auth; + proxy_set_header Authorization $auth_header; + + # Download the file and send it to client + proxy_pass $download_url; + } + } +} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/test.conf b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/test.conf new file mode 100644 index 0000000000000000000000000000000000000000..bb02adc4a09a2dc7810351bf90a84f6a01d2ee4b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/templates/test.conf @@ -0,0 +1,3 @@ +something +listen {{nginx_port}} +something else diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_files.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_files.py new file mode 100644 index 0000000000000000000000000000000000000000..8b443a23d0bad1515f874e69734880932193778a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_files.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from charmhelpers.core import files + +import mock +import unittest +import tempfile +import os + + +class FileTests(unittest.TestCase): + + @mock.patch("subprocess.check_call") + def test_sed(self, check_call): + files.sed("/tmp/test-sed-file", "replace", "this") + check_call.assert_called_once_with( + ['sed', '-i', '-r', '-e', 's/replace/this/g', + '/tmp/test-sed-file'] + ) + + def test_sed_file(self): + tmp = tempfile.NamedTemporaryFile(mode='w', delete=False) + tmp.write("IPV6=yes") + tmp.close() + + files.sed(tmp.name, "IPV6=.*", "IPV6=no") + + with open(tmp.name) as tmp: + self.assertEquals(tmp.read(), "IPV6=no") + + os.unlink(tmp.name) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_fstab.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_fstab.py new file mode 100644 index 0000000000000000000000000000000000000000..c25844866926586e9caefcfb43b30e92ec9edf39 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_fstab.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from charmhelpers.core.fstab import Fstab +from nose.tools import (assert_is, + assert_is_not, + assert_equal) +import unittest +import tempfile +import os + +__author__ = 'Jorge Niedbalski R. ' + +DEFAULT_FSTAB_FILE = """/dev/sda /mnt/sda ext3 defaults 0 0 + # This is an indented comment, and the next line is entirely blank. + +/dev/sdb /mnt/sdb ext3 defaults 0 0 +/dev/sdc /mnt/sdc ext3 defaults 0 0 +UUID=3af44368-c50b-4768-8e58-aff003cef8be / ext4 errors=remount-ro 0 1 +""" + +GENERATED_FSTAB_FILE = '\n'.join( + # Helper will writeback with spaces instead of tabs + line.replace('\t', ' ') for line in DEFAULT_FSTAB_FILE.splitlines() + if line.strip() and not line.strip().startswith('#')) + + +class FstabTest(unittest.TestCase): + + def setUp(self): + self.tempfile = tempfile.NamedTemporaryFile('w+', delete=False) + self.tempfile.write(DEFAULT_FSTAB_FILE) + self.tempfile.close() + self.fstab = Fstab(path=self.tempfile.name) + + def tearDown(self): + os.unlink(self.tempfile.name) + + def test_entries(self): + """Test if entries are correctly readed from fstab file""" + assert_equal(sorted(GENERATED_FSTAB_FILE.splitlines()), + sorted(str(entry) for entry in self.fstab.entries)) + + def test_get_entry_by_device_attr(self): + """Test if the get_entry_by_attr method works for device attr""" + for device in ('sda', 'sdb', 'sdc', ): + assert_is_not(self.fstab.get_entry_by_attr('device', + '/dev/%s' % device), + None) + + def test_get_entry_by_mountpoint_attr(self): + """Test if the get_entry_by_attr method works for mountpoint attr""" + for mnt in ('sda', 'sdb', 'sdc', ): + assert_is_not(self.fstab.get_entry_by_attr('mountpoint', + '/mnt/%s' % mnt), None) + + def test_add_entry(self): + """Test if add_entry works for a new entry""" + for device in ('sdf', 'sdg', 'sdh'): + entry = Fstab.Entry('/dev/%s' % device, '/mnt/%s' % device, 'ext3', + None) + assert_is_not(self.fstab.add_entry(entry), None) + assert_is_not(self.fstab.get_entry_by_attr( + 'device', '/dev/%s' % device), None) + + assert_is(self.fstab.add_entry(entry), False, + "Check if adding an existing entry returns false") + + def test_remove_entry(self): + """Test if remove entry works for already existing entries""" + for entry in self.fstab.entries: + assert_is(self.fstab.remove_entry(entry), True) + + assert_equal(len([entry for entry in self.fstab.entries]), 0) + assert_equal(self.fstab.add_entry(entry), entry) + assert_equal(len([entry for entry in self.fstab.entries]), 1) + + def test_assert_remove_add_all(self): + """Test if removing/adding all the entries works""" + for entry in self.fstab.entries: + assert_is(self.fstab.remove_entry(entry), True) + + for device in ('sda', 'sdb', 'sdc', ): + self.fstab.add_entry( + Fstab.Entry('/dev/%s' % device, '/mnt/%s' % device, 'ext3', + None)) + + self.fstab.add_entry(Fstab.Entry( + 'UUID=3af44368-c50b-4768-8e58-aff003cef8be', + '/', 'ext4', 'errors=remount-ro', 0, 1)) + + assert_equal(sorted(GENERATED_FSTAB_FILE.splitlines()), + sorted(str(entry) for entry in self.fstab.entries)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_hookenv.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_hookenv.py new file mode 100644 index 0000000000000000000000000000000000000000..6d83a7a946011584b373e7a0816dbbbe5434f5a2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_hookenv.py @@ -0,0 +1,2378 @@ +import os +import json +from subprocess import CalledProcessError +import shutil +import tempfile +import types +from mock import call, MagicMock, mock_open, patch, sentinel +from testtools import TestCase +from enum import Enum +import yaml + +import six +import io + +from charmhelpers.core import hookenv + +if six.PY3: + import pickle +else: + import cPickle as pickle + + +CHARM_METADATA = b"""name: testmock +summary: test mock summary +description: test mock description +requires: + testreqs: + interface: mock +provides: + testprov: + interface: mock +peers: + testpeer: + interface: mock +""" + + +def _clean_globals(): + hookenv.cache.clear() + del hookenv._atstart[:] + del hookenv._atexit[:] + + +class ConfigTest(TestCase): + def setUp(self): + super(ConfigTest, self).setUp() + + _clean_globals() + self.addCleanup(_clean_globals) + + self.charm_dir = tempfile.mkdtemp() + self.addCleanup(lambda: shutil.rmtree(self.charm_dir)) + + patcher = patch.object(hookenv, 'charm_dir', lambda: self.charm_dir) + self.addCleanup(patcher.stop) + patcher.start() + + def test_init(self): + d = dict(foo='bar') + c = hookenv.Config(d) + + self.assertEqual(c['foo'], 'bar') + self.assertEqual(c._prev_dict, None) + + def test_init_empty_state_file(self): + d = dict(foo='bar') + c = hookenv.Config(d) + + state_path = os.path.join(self.charm_dir, hookenv.Config.CONFIG_FILE_NAME) + with open(state_path, 'w') as f: + f.close() + + self.assertEqual(c['foo'], 'bar') + self.assertEqual(c._prev_dict, None) + self.assertEqual(c.path, state_path) + + def test_init_invalid_state_file(self): + d = dict(foo='bar') + + state_path = os.path.join(self.charm_dir, hookenv.Config.CONFIG_FILE_NAME) + with open(state_path, 'w') as f: + f.write('blah') + + c = hookenv.Config(d) + + self.assertEqual(c['foo'], 'bar') + self.assertEqual(c._prev_dict, None) + self.assertEqual(c.path, state_path) + + def test_load_previous(self): + d = dict(foo='bar') + c = hookenv.Config() + + with open(c.path, 'w') as f: + json.dump(d, f) + + c.load_previous() + self.assertEqual(c._prev_dict, d) + + def test_load_previous_alternate_path(self): + d = dict(foo='bar') + c = hookenv.Config() + + alt_path = os.path.join(self.charm_dir, '.alt-config') + with open(alt_path, 'w') as f: + json.dump(d, f) + + c.load_previous(path=alt_path) + self.assertEqual(c._prev_dict, d) + self.assertEqual(c.path, alt_path) + + def test_changed_without_prev_dict(self): + d = dict(foo='bar') + c = hookenv.Config(d) + + self.assertTrue(c.changed('foo')) + + def test_changed_with_prev_dict(self): + c = hookenv.Config(dict(foo='bar', a='b')) + c.save() + c = hookenv.Config(dict(foo='baz', a='b')) + + self.assertTrue(c.changed('foo')) + self.assertFalse(c.changed('a')) + + def test_previous_without_prev_dict(self): + c = hookenv.Config() + + self.assertEqual(c.previous('foo'), None) + + def test_previous_with_prev_dict(self): + c = hookenv.Config(dict(foo='bar')) + c.save() + c = hookenv.Config(dict(foo='baz', a='b')) + + self.assertEqual(c.previous('foo'), 'bar') + self.assertEqual(c.previous('a'), None) + + def test_save_without_prev_dict(self): + c = hookenv.Config(dict(foo='bar')) + c.save() + + with open(c.path, 'r') as f: + self.assertEqual(c, json.load(f)) + self.assertEqual(c, dict(foo='bar')) + self.assertEqual(os.stat(c.path).st_mode & 0o777, 0o600) + + def test_save_with_prev_dict(self): + c = hookenv.Config(dict(foo='bar')) + c.save() + c = hookenv.Config(dict(a='b')) + c.save() + + with open(c.path, 'r') as f: + self.assertEqual(c, json.load(f)) + self.assertEqual(c, dict(foo='bar', a='b')) + self.assertEqual(os.stat(c.path).st_mode & 0o777, 0o600) + + def test_deep_change(self): + # After loading stored data into our previous dictionary, + # it gets copied into our current dictionary. If this is not + # a deep copy, then mappings and lists will be shared instances + # and changes will not be detected. + c = hookenv.Config(dict(ll=[])) + c.save() + c = hookenv.Config() + c['ll'].append(42) + self.assertTrue(c.changed('ll'), 'load_previous() did not deepcopy') + + def test_getitem(self): + c = hookenv.Config(dict(foo='bar')) + c.save() + c = hookenv.Config(dict(baz='bam')) + + self.assertRaises(KeyError, lambda: c['missing']) + self.assertEqual(c['foo'], 'bar') + self.assertEqual(c['baz'], 'bam') + + def test_get(self): + c = hookenv.Config(dict(foo='bar')) + c.save() + c = hookenv.Config(dict(baz='bam')) + + self.assertIsNone(c.get('missing')) + self.assertIs(c.get('missing', sentinel.missing), sentinel.missing) + self.assertEqual(c.get('foo'), 'bar') + self.assertEqual(c.get('baz'), 'bam') + + def test_keys(self): + c = hookenv.Config(dict(foo='bar')) + c["baz"] = "bar" + self.assertEqual(sorted([six.u("foo"), "baz"]), sorted(c.keys())) + + def test_in(self): + # Test behavior of the in operator. + + prev_path = os.path.join(hookenv.charm_dir(), + hookenv.Config.CONFIG_FILE_NAME) + with open(prev_path, 'w') as f: + json.dump(dict(user='one'), f) + c = hookenv.Config(dict(charm='one')) + + # Items that exist in the dict exist. Items that don't don't. + self.assertTrue('user' in c) + self.assertTrue('charm' in c) + self.assertFalse('bar' in c) + + # Adding items works as expected. + c['user'] = 'two' + c['charm'] = 'two' + c['bar'] = 'two' + self.assertTrue('user' in c) + self.assertTrue('charm' in c) + self.assertTrue('bar' in c) + c.save() + self.assertTrue('user' in c) + self.assertTrue('charm' in c) + self.assertTrue('bar' in c) + + # Removing items works as expected. + del c['user'] + del c['charm'] + self.assertTrue('user' not in c) + self.assertTrue('charm' not in c) + c.save() + self.assertTrue('user' not in c) + self.assertTrue('charm' not in c) + + +class SerializableTest(TestCase): + def test_serializes_object_to_json(self): + foo = { + 'bar': 'baz', + } + wrapped = hookenv.Serializable(foo) + self.assertEqual(wrapped.json(), json.dumps(foo)) + + def test_serializes_object_to_yaml(self): + foo = { + 'bar': 'baz', + } + wrapped = hookenv.Serializable(foo) + self.assertEqual(wrapped.yaml(), yaml.dump(foo)) + + def test_gets_attribute_from_inner_object_as_dict(self): + foo = { + 'bar': 'baz', + } + wrapped = hookenv.Serializable(foo) + + self.assertEqual(wrapped.bar, 'baz') + + def test_raises_error_from_inner_object_as_dict(self): + foo = { + 'bar': 'baz', + } + wrapped = hookenv.Serializable(foo) + + self.assertRaises(AttributeError, getattr, wrapped, 'baz') + + def test_dict_methods_from_inner_object(self): + foo = { + 'bar': 'baz', + } + wrapped = hookenv.Serializable(foo) + for meth in ('keys', 'values', 'items'): + self.assertEqual(sorted(list(getattr(wrapped, meth)())), + sorted(list(getattr(foo, meth)()))) + + self.assertEqual(wrapped.get('bar'), foo.get('bar')) + self.assertEqual(wrapped.get('baz', 42), foo.get('baz', 42)) + self.assertIn('bar', wrapped) + + def test_get_gets_from_inner_object(self): + foo = { + 'bar': 'baz', + } + wrapped = hookenv.Serializable(foo) + + self.assertEqual(wrapped.get('foo'), None) + self.assertEqual(wrapped.get('bar'), 'baz') + self.assertEqual(wrapped.get('zoo', 'bla'), 'bla') + + def test_gets_inner_object(self): + foo = { + 'bar': 'baz', + } + wrapped = hookenv.Serializable(foo) + + self.assertIs(wrapped.data, foo) + + def test_pickle(self): + foo = {'bar': 'baz'} + wrapped = hookenv.Serializable(foo) + pickled = pickle.dumps(wrapped) + unpickled = pickle.loads(pickled) + + self.assert_(isinstance(unpickled, hookenv.Serializable)) + self.assertEqual(unpickled, foo) + + def test_boolean(self): + true_dict = {'foo': 'bar'} + false_dict = {} + + self.assertIs(bool(hookenv.Serializable(true_dict)), True) + self.assertIs(bool(hookenv.Serializable(false_dict)), False) + + def test_equality(self): + foo = {'bar': 'baz'} + bar = {'baz': 'bar'} + wrapped_foo = hookenv.Serializable(foo) + + self.assertEqual(wrapped_foo, foo) + self.assertEqual(wrapped_foo, wrapped_foo) + self.assertNotEqual(wrapped_foo, bar) + + +class HelpersTest(TestCase): + def setUp(self): + super(HelpersTest, self).setUp() + _clean_globals() + self.addCleanup(_clean_globals) + + @patch('subprocess.call') + def test_logs_messages_to_juju_with_default_level(self, mock_call): + hookenv.log('foo') + + mock_call.assert_called_with(['juju-log', 'foo']) + + @patch('subprocess.call') + def test_logs_messages_object(self, mock_call): + hookenv.log(object) + mock_call.assert_called_with(['juju-log', repr(object)]) + + @patch('subprocess.call') + def test_logs_messages_with_alternative_levels(self, mock_call): + alternative_levels = [ + hookenv.CRITICAL, + hookenv.ERROR, + hookenv.WARNING, + hookenv.INFO, + ] + + for level in alternative_levels: + hookenv.log('foo', level) + mock_call.assert_called_with(['juju-log', '-l', level, 'foo']) + + @patch('subprocess.call') + def test_function_log_message(self, mock_call): + hookenv.function_log('foo') + mock_call.assert_called_with(['function-log', 'foo']) + + @patch('subprocess.call') + def test_function_log_message_object(self, mock_call): + hookenv.function_log(object) + mock_call.assert_called_with(['function-log', repr(object)]) + + @patch('charmhelpers.core.hookenv._cache_config', None) + @patch('charmhelpers.core.hookenv.charm_dir') + @patch('subprocess.check_output') + def test_gets_charm_config_with_scope(self, check_output, charm_dir): + check_output.return_value = json.dumps(dict(baz='bar')).encode('UTF-8') + charm_dir.return_value = '/nonexistent' + + result = hookenv.config(scope='baz') + + self.assertEqual(result, 'bar') + check_output.assert_called_with(['config-get', '--all', + '--format=json']) + + # The result can be used like a string + self.assertEqual(result[1], 'a') + + # ... because the result is actually a string + self.assert_(isinstance(result, six.string_types)) + + @patch('charmhelpers.core.hookenv.log', lambda *args, **kwargs: None) + @patch('charmhelpers.core.hookenv._cache_config', None) + @patch('subprocess.check_output') + def test_gets_missing_charm_config_with_scope(self, check_output): + check_output.return_value = b'' + + result = hookenv.config(scope='baz') + + self.assertEqual(result, None) + check_output.assert_called_with(['config-get', '--all', + '--format=json']) + + @patch('charmhelpers.core.hookenv._cache_config', None) + @patch('charmhelpers.core.hookenv.charm_dir') + @patch('subprocess.check_output') + def test_gets_config_without_scope(self, check_output, charm_dir): + check_output.return_value = json.dumps(dict(baz='bar')).encode('UTF-8') + charm_dir.return_value = '/nonexistent' + + result = hookenv.config() + + self.assertIsInstance(result, hookenv.Config) + self.assertEqual(result['baz'], 'bar') + check_output.assert_called_with(['config-get', '--all', + '--format=json']) + + @patch('charmhelpers.core.hookenv.log') + @patch('charmhelpers.core.hookenv._cache_config', None) + @patch('charmhelpers.core.hookenv.charm_dir') + @patch('subprocess.check_output') + def test_gets_charm_config_invalid_json_with_scope(self, + check_output, + charm_dir, + log): + check_output.return_value = '{"invalid: "json"}'.encode('UTF-8') + charm_dir.return_value = '/nonexistent' + + result = hookenv.config(scope='invalid') + + self.assertEqual(result, None) + cmd_line = ['config-get', '--all', '--format=json'] + check_output.assert_called_with(cmd_line) + log.assert_called_with( + 'Unable to parse output from config-get: ' + 'config_cmd_line="{}" message="{}"' + .format(str(cmd_line), + "Expecting ':' delimiter: line 1 column 13 (char 12)"), + level=hookenv.ERROR, + ) + + @patch('charmhelpers.core.hookenv.log') + @patch('charmhelpers.core.hookenv._cache_config', None) + @patch('charmhelpers.core.hookenv.charm_dir') + @patch('subprocess.check_output') + def test_gets_charm_config_invalid_utf8_with_scope(self, + check_output, + charm_dir, + log): + check_output.return_value = b'{"invalid: "json"}\x9D' + charm_dir.return_value = '/nonexistent' + + result = hookenv.config(scope='invalid') + + self.assertEqual(result, None) + cmd_line = ['config-get', '--all', '--format=json'] + check_output.assert_called_with(cmd_line) + try: + # Python3 + log.assert_called_with( + 'Unable to parse output from config-get: ' + 'config_cmd_line="{}" message="{}"' + .format(str(cmd_line), + "'utf8' codec can't decode byte 0x9d in position " + "18: invalid start byte"), + level=hookenv.ERROR, + ) + except AssertionError: + # Python2.7 + log.assert_called_with( + 'Unable to parse output from config-get: ' + 'config_cmd_line="{}" message="{}"' + .format(str(cmd_line), + "'utf-8' codec can't decode byte 0x9d in position " + "18: invalid start byte"), + level=hookenv.ERROR, + ) + + @patch('charmhelpers.core.hookenv._cache_config', {'baz': 'bar'}) + @patch('charmhelpers.core.hookenv.charm_dir') + @patch('subprocess.check_output') + def test_gets_config_from_cache_without_scope(self, + check_output, + charm_dir): + charm_dir.return_value = '/nonexistent' + + result = hookenv.config() + + self.assertEqual(result['baz'], 'bar') + self.assertFalse(check_output.called) + + @patch('charmhelpers.core.hookenv._cache_config', {'baz': 'bar'}) + @patch('charmhelpers.core.hookenv.charm_dir') + @patch('subprocess.check_output') + def test_gets_config_from_cache_with_scope(self, + check_output, + charm_dir): + charm_dir.return_value = '/nonexistent' + + result = hookenv.config('baz') + + self.assertEqual(result, 'bar') + + # The result can be used like a string + self.assertEqual(result[1], 'a') + + # ... because the result is actually a string + self.assert_(isinstance(result, six.string_types)) + + self.assertFalse(check_output.called) + + @patch('charmhelpers.core.hookenv._cache_config', {'foo': 'bar'}) + @patch('subprocess.check_output') + def test_gets_missing_charm_config_from_cache_with_scope(self, + check_output): + + result = hookenv.config(scope='baz') + + self.assertEqual(result, None) + self.assertFalse(check_output.called) + + @patch('charmhelpers.core.hookenv.os') + def test_gets_the_local_unit(self, os_): + os_.environ = { + 'JUJU_UNIT_NAME': 'foo', + } + + self.assertEqual(hookenv.local_unit(), 'foo') + + @patch('charmhelpers.core.hookenv.unit_get') + def test_gets_unit_public_ip(self, _unitget): + _unitget.return_value = sentinel.public_ip + self.assertEqual(sentinel.public_ip, hookenv.unit_public_ip()) + _unitget.assert_called_once_with('public-address') + + @patch('charmhelpers.core.hookenv.unit_get') + def test_gets_unit_private_ip(self, _unitget): + _unitget.return_value = sentinel.private_ip + self.assertEqual(sentinel.private_ip, hookenv.unit_private_ip()) + _unitget.assert_called_once_with('private-address') + + @patch('charmhelpers.core.hookenv.os') + def test_checks_that_is_running_in_relation_hook(self, os_): + os_.environ = { + 'JUJU_RELATION': 'foo', + } + + self.assertTrue(hookenv.in_relation_hook()) + + @patch('charmhelpers.core.hookenv.os') + def test_checks_that_is_not_running_in_relation_hook(self, os_): + os_.environ = { + 'bar': 'foo', + } + + self.assertFalse(hookenv.in_relation_hook()) + + @patch('charmhelpers.core.hookenv.os') + def test_gets_the_relation_type(self, os_): + os_.environ = { + 'JUJU_RELATION': 'foo', + } + + self.assertEqual(hookenv.relation_type(), 'foo') + + @patch('charmhelpers.core.hookenv.os') + def test_relation_type_none_if_not_in_environment(self, os_): + os_.environ = {} + self.assertEqual(hookenv.relation_type(), None) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_type') + def test_gets_relation_ids(self, relation_type, check_output): + ids = [1, 2, 3] + check_output.return_value = json.dumps(ids).encode('UTF-8') + reltype = 'foo' + relation_type.return_value = reltype + + result = hookenv.relation_ids() + + self.assertEqual(result, ids) + check_output.assert_called_with(['relation-ids', '--format=json', + reltype]) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_type') + def test_gets_relation_ids_empty_array(self, relation_type, check_output): + ids = [] + check_output.return_value = json.dumps(None).encode('UTF-8') + reltype = 'foo' + relation_type.return_value = reltype + + result = hookenv.relation_ids() + + self.assertEqual(result, ids) + check_output.assert_called_with(['relation-ids', '--format=json', + reltype]) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_type') + def test_relation_ids_no_relation_type(self, relation_type, check_output): + ids = [1, 2, 3] + check_output.return_value = json.dumps(ids).encode('UTF-8') + relation_type.return_value = None + + result = hookenv.relation_ids() + + self.assertEqual(result, []) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_type') + def test_gets_relation_ids_for_type(self, relation_type, check_output): + ids = [1, 2, 3] + check_output.return_value = json.dumps(ids).encode('UTF-8') + reltype = 'foo' + + result = hookenv.relation_ids(reltype) + + self.assertEqual(result, ids) + check_output.assert_called_with(['relation-ids', '--format=json', + reltype]) + self.assertFalse(relation_type.called) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_id') + def test_gets_related_units(self, relation_id, check_output): + relid = 123 + units = ['foo', 'bar'] + relation_id.return_value = relid + check_output.return_value = json.dumps(units).encode('UTF-8') + + result = hookenv.related_units() + + self.assertEqual(result, units) + check_output.assert_called_with(['relation-list', '--format=json', + '-r', relid]) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_id') + def test_gets_related_units_empty_array(self, relation_id, check_output): + relid = str(123) + units = [] + relation_id.return_value = relid + check_output.return_value = json.dumps(None).encode('UTF-8') + + result = hookenv.related_units() + + self.assertEqual(result, units) + check_output.assert_called_with(['relation-list', '--format=json', + '-r', relid]) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_id') + def test_related_units_no_relation(self, relation_id, check_output): + units = ['foo', 'bar'] + relation_id.return_value = None + check_output.return_value = json.dumps(units).encode('UTF-8') + + result = hookenv.related_units() + + self.assertEqual(result, units) + check_output.assert_called_with(['relation-list', '--format=json']) + + @patch('subprocess.check_output') + @patch('charmhelpers.core.hookenv.relation_id') + def test_gets_related_units_for_id(self, relation_id, check_output): + relid = 123 + units = ['foo', 'bar'] + check_output.return_value = json.dumps(units).encode('UTF-8') + + result = hookenv.related_units(relid) + + self.assertEqual(result, units) + check_output.assert_called_with(['relation-list', '--format=json', + '-r', relid]) + self.assertFalse(relation_id.called) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('charmhelpers.core.hookenv.goal_state') + @patch('charmhelpers.core.hookenv.has_juju_version') + def test_gets_expected_peer_units(self, has_juju_version, goal_state, + local_unit): + has_juju_version.return_value = True + goal_state.return_value = { + 'units': { + 'keystone/0': { + 'status': 'active', + 'since': '2018-09-27 11:38:28Z', + }, + 'keystone/1': { + 'status': 'active', + 'since': '2018-09-27 11:39:23Z', + }, + }, + } + local_unit.return_value = 'keystone/0' + + result = hookenv.expected_peer_units() + + self.assertIsInstance(result, types.GeneratorType) + self.assertEqual(sorted(result), ['keystone/1']) + has_juju_version.assertCalledOnceWith("2.4.0") + local_unit.assertCalledOnceWith() + + @patch('charmhelpers.core.hookenv.has_juju_version') + def test_gets_expected_peer_units_wrong_version(self, has_juju_version): + has_juju_version.return_value = False + + def x(): + # local helper function to make testtools.TestCase.assertRaises + # work with generator + list(hookenv.expected_peer_units()) + + self.assertRaises(NotImplementedError, x) + has_juju_version.assertCalledOnceWith("2.4.0") + + @patch('charmhelpers.core.hookenv.goal_state') + @patch('charmhelpers.core.hookenv.relation_type') + @patch('charmhelpers.core.hookenv.has_juju_version') + def test_gets_expected_related_units(self, has_juju_version, relation_type, + goal_state): + has_juju_version.return_value = True + relation_type.return_value = 'identity-service' + goal_state.return_value = { + 'relations': { + 'identity-service': { + 'glance': { + 'status': 'joined', + 'since': '2018-09-27 11:37:16Z' + }, + 'glance/0': { + 'status': 'active', + 'since': '2018-09-27 11:27:19Z' + }, + 'glance/1': { + 'status': 'active', + 'since': '2018-09-27 11:27:34Z' + }, + }, + }, + } + + result = hookenv.expected_related_units() + + self.assertIsInstance(result, types.GeneratorType) + self.assertEqual(sorted(result), ['glance/0', 'glance/1']) + + @patch('charmhelpers.core.hookenv.goal_state') + @patch('charmhelpers.core.hookenv.has_juju_version') + def test_gets_expected_related_units_for_type(self, has_juju_version, + goal_state): + has_juju_version.return_value = True + goal_state.return_value = { + 'relations': { + 'identity-service': { + 'glance': { + 'status': 'joined', + 'since': '2018-09-27 11:37:16Z' + }, + 'glance/0': { + 'status': 'active', + 'since': '2018-09-27 11:27:19Z' + }, + 'glance/1': { + 'status': 'active', + 'since': '2018-09-27 11:27:34Z' + }, + }, + }, + } + + result = hookenv.expected_related_units('identity-service') + + self.assertIsInstance(result, types.GeneratorType) + self.assertEqual(sorted(result), ['glance/0', 'glance/1']) + + @patch('charmhelpers.core.hookenv.has_juju_version') + def test_gets_expected_related_units_wrong_version(self, has_juju_version): + has_juju_version.return_value = False + + def x(): + # local helper function to make testtools.TestCase.assertRaises + # work with generator + list(hookenv.expected_related_units()) + + self.assertRaises(NotImplementedError, x) + has_juju_version.assertCalledOnceWith("2.4.4") + + @patch('charmhelpers.core.hookenv.os') + def test_gets_the_remote_unit(self, os_): + os_.environ = { + 'JUJU_REMOTE_UNIT': 'foo', + } + + self.assertEqual(hookenv.remote_unit(), 'foo') + + @patch('charmhelpers.core.hookenv.os') + def test_no_remote_unit(self, os_): + os_.environ = {} + self.assertEqual(hookenv.remote_unit(), None) + + @patch('charmhelpers.core.hookenv.remote_unit') + @patch('charmhelpers.core.hookenv.relation_get') + def test_gets_relation_for_unit(self, relation_get, remote_unit): + unit = 'foo-unit' + raw_relation = { + 'foo': 'bar', + } + remote_unit.return_value = unit + relation_get.return_value = raw_relation + + result = hookenv.relation_for_unit() + + self.assertEqual(result['__unit__'], unit) + self.assertEqual(result['foo'], 'bar') + relation_get.assert_called_with(unit=unit, rid=None) + + @patch('charmhelpers.core.hookenv.remote_unit') + @patch('charmhelpers.core.hookenv.relation_get') + def test_gets_relation_for_unit_with_list(self, relation_get, remote_unit): + unit = 'foo-unit' + raw_relation = { + 'foo-list': 'one two three', + } + remote_unit.return_value = unit + relation_get.return_value = raw_relation + + result = hookenv.relation_for_unit() + + self.assertEqual(result['__unit__'], unit) + self.assertEqual(result['foo-list'], ['one', 'two', 'three']) + relation_get.assert_called_with(unit=unit, rid=None) + + @patch('charmhelpers.core.hookenv.remote_unit') + @patch('charmhelpers.core.hookenv.relation_get') + def test_gets_relation_for_specific_unit(self, relation_get, remote_unit): + unit = 'foo-unit' + raw_relation = { + 'foo': 'bar', + } + relation_get.return_value = raw_relation + + result = hookenv.relation_for_unit(unit) + + self.assertEqual(result['__unit__'], unit) + self.assertEqual(result['foo'], 'bar') + relation_get.assert_called_with(unit=unit, rid=None) + self.assertFalse(remote_unit.called) + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_for_unit') + def test_gets_relations_for_id(self, relation_for_unit, related_units, + relation_ids): + relid = 123 + units = ['foo', 'bar'] + unit_data = [ + {'foo-item': 'bar-item'}, + {'foo-item2': 'bar-item2'}, + ] + relation_ids.return_value = relid + related_units.return_value = units + relation_for_unit.side_effect = unit_data + + result = hookenv.relations_for_id() + + self.assertEqual(result[0]['__relid__'], relid) + self.assertEqual(result[0]['foo-item'], 'bar-item') + self.assertEqual(result[1]['__relid__'], relid) + self.assertEqual(result[1]['foo-item2'], 'bar-item2') + related_units.assert_called_with(relid) + self.assertEqual(relation_for_unit.mock_calls, [ + call('foo', relid), + call('bar', relid), + ]) + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_for_unit') + def test_gets_relations_for_specific_id(self, relation_for_unit, + related_units, relation_ids): + relid = 123 + units = ['foo', 'bar'] + unit_data = [ + {'foo-item': 'bar-item'}, + {'foo-item2': 'bar-item2'}, + ] + related_units.return_value = units + relation_for_unit.side_effect = unit_data + + result = hookenv.relations_for_id(relid) + + self.assertEqual(result[0]['__relid__'], relid) + self.assertEqual(result[0]['foo-item'], 'bar-item') + self.assertEqual(result[1]['__relid__'], relid) + self.assertEqual(result[1]['foo-item2'], 'bar-item2') + related_units.assert_called_with(relid) + self.assertEqual(relation_for_unit.mock_calls, [ + call('foo', relid), + call('bar', relid), + ]) + self.assertFalse(relation_ids.called) + + @patch('charmhelpers.core.hookenv.in_relation_hook') + @patch('charmhelpers.core.hookenv.relation_type') + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.relations_for_id') + def test_gets_relations_for_type(self, relations_for_id, relation_ids, + relation_type, in_relation_hook): + reltype = 'foo-type' + relids = [123, 234] + relations = [ + [ + {'foo': 'bar'}, + {'foo2': 'bar2'}, + ], + [ + {'FOO': 'BAR'}, + {'FOO2': 'BAR2'}, + ], + ] + is_in_relation = True + + relation_type.return_value = reltype + relation_ids.return_value = relids + relations_for_id.side_effect = relations + in_relation_hook.return_value = is_in_relation + + result = hookenv.relations_of_type() + + self.assertEqual(result[0]['__relid__'], 123) + self.assertEqual(result[0]['foo'], 'bar') + self.assertEqual(result[1]['__relid__'], 123) + self.assertEqual(result[1]['foo2'], 'bar2') + self.assertEqual(result[2]['__relid__'], 234) + self.assertEqual(result[2]['FOO'], 'BAR') + self.assertEqual(result[3]['__relid__'], 234) + self.assertEqual(result[3]['FOO2'], 'BAR2') + relation_ids.assert_called_with(reltype) + self.assertEqual(relations_for_id.mock_calls, [ + call(123), + call(234), + ]) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('charmhelpers.core.hookenv.relation_types') + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_get') + def test_gets_relations(self, relation_get, related_units, + relation_ids, relation_types, local_unit): + local_unit.return_value = 'u0' + relation_types.return_value = ['t1', 't2'] + relation_ids.return_value = ['i1'] + related_units.return_value = ['u1', 'u2'] + relation_get.return_value = {'key': 'val'} + + result = hookenv.relations() + + self.assertEqual(result, { + 't1': { + 'i1': { + 'u0': {'key': 'val'}, + 'u1': {'key': 'val'}, + 'u2': {'key': 'val'}, + }, + }, + 't2': { + 'i1': { + 'u0': {'key': 'val'}, + 'u1': {'key': 'val'}, + 'u2': {'key': 'val'}, + }, + }, + }) + + @patch('charmhelpers.core.hookenv.relation_set') + @patch('charmhelpers.core.hookenv.relation_get') + @patch('charmhelpers.core.hookenv.local_unit') + def test_relation_clear(self, local_unit, + relation_get, + relation_set): + local_unit.return_value = 'local-unit' + relation_get.return_value = { + 'private-address': '10.5.0.1', + 'foo': 'bar', + 'public-address': '146.192.45.6' + } + hookenv.relation_clear('relation:1') + relation_get.assert_called_with(rid='relation:1', + unit='local-unit') + relation_set.assert_called_with( + relation_id='relation:1', + **{'private-address': '10.5.0.1', + 'foo': None, + 'public-address': '146.192.45.6'}) + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_get') + def test_is_relation_made(self, relation_get, related_units, + relation_ids): + relation_get.return_value = 'hostname' + related_units.return_value = ['test/1'] + relation_ids.return_value = ['test:0'] + self.assertTrue(hookenv.is_relation_made('test')) + relation_get.assert_called_with('private-address', + rid='test:0', unit='test/1') + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_get') + def test_is_relation_made_multi_unit(self, relation_get, related_units, + relation_ids): + relation_get.side_effect = [None, 'hostname'] + related_units.return_value = ['test/1', 'test/2'] + relation_ids.return_value = ['test:0'] + self.assertTrue(hookenv.is_relation_made('test')) + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_get') + def test_is_relation_made_different_key(self, + relation_get, related_units, + relation_ids): + relation_get.return_value = 'hostname' + related_units.return_value = ['test/1'] + relation_ids.return_value = ['test:0'] + self.assertTrue(hookenv.is_relation_made('test', keys='auth')) + relation_get.assert_called_with('auth', + rid='test:0', unit='test/1') + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_get') + def test_is_relation_made_multiple_keys(self, + relation_get, related_units, + relation_ids): + relation_get.side_effect = ['password', 'hostname'] + related_units.return_value = ['test/1'] + relation_ids.return_value = ['test:0'] + self.assertTrue(hookenv.is_relation_made('test', + keys=['auth', 'host'])) + relation_get.assert_has_calls( + [call('auth', rid='test:0', unit='test/1'), + call('host', rid='test:0', unit='test/1')] + ) + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_get') + def test_is_relation_made_not_made(self, + relation_get, related_units, + relation_ids): + relation_get.return_value = None + related_units.return_value = ['test/1'] + relation_ids.return_value = ['test:0'] + self.assertFalse(hookenv.is_relation_made('test')) + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.relation_get') + def test_is_relation_made_not_made_multiple_keys(self, + relation_get, + related_units, + relation_ids): + relation_get.side_effect = ['password', None] + related_units.return_value = ['test/1'] + relation_ids.return_value = ['test:0'] + self.assertFalse(hookenv.is_relation_made('test', + keys=['auth', 'host'])) + relation_get.assert_has_calls( + [call('auth', rid='test:0', unit='test/1'), + call('host', rid='test:0', unit='test/1')] + ) + + @patch('charmhelpers.core.hookenv.config') + @patch('charmhelpers.core.hookenv.relation_type') + @patch('charmhelpers.core.hookenv.local_unit') + @patch('charmhelpers.core.hookenv.relation_id') + @patch('charmhelpers.core.hookenv.relations') + @patch('charmhelpers.core.hookenv.relation_get') + @patch('charmhelpers.core.hookenv.os') + def test_gets_execution_environment(self, os_, relations_get, + relations, relation_id, local_unit, + relation_type, config): + config.return_value = 'some-config' + relation_type.return_value = 'some-type' + local_unit.return_value = 'some-unit' + relation_id.return_value = 'some-id' + relations.return_value = 'all-relations' + relations_get.return_value = 'some-relations' + os_.environ = 'some-environment' + + result = hookenv.execution_environment() + + self.assertEqual(result, { + 'conf': 'some-config', + 'reltype': 'some-type', + 'unit': 'some-unit', + 'relid': 'some-id', + 'rel': 'some-relations', + 'rels': 'all-relations', + 'env': 'some-environment', + }) + + @patch('charmhelpers.core.hookenv.config') + @patch('charmhelpers.core.hookenv.relation_type') + @patch('charmhelpers.core.hookenv.local_unit') + @patch('charmhelpers.core.hookenv.relation_id') + @patch('charmhelpers.core.hookenv.relations') + @patch('charmhelpers.core.hookenv.relation_get') + @patch('charmhelpers.core.hookenv.os') + def test_gets_execution_environment_no_relation( + self, os_, relations_get, relations, relation_id, + local_unit, relation_type, config): + config.return_value = 'some-config' + relation_type.return_value = 'some-type' + local_unit.return_value = 'some-unit' + relation_id.return_value = None + relations.return_value = 'all-relations' + relations_get.return_value = 'some-relations' + os_.environ = 'some-environment' + + result = hookenv.execution_environment() + + self.assertEqual(result, { + 'conf': 'some-config', + 'unit': 'some-unit', + 'rels': 'all-relations', + 'env': 'some-environment', + }) + + @patch('charmhelpers.core.hookenv.remote_service_name') + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.os') + def test_gets_the_relation_id(self, os_, relation_ids, remote_service_name): + os_.environ = { + 'JUJU_RELATION_ID': 'foo', + } + + self.assertEqual(hookenv.relation_id(), 'foo') + + relation_ids.return_value = ['r:1', 'r:2'] + remote_service_name.side_effect = ['other', 'service'] + self.assertEqual(hookenv.relation_id('rel', 'service/0'), 'r:2') + relation_ids.assert_called_once_with('rel') + self.assertEqual(remote_service_name.call_args_list, [ + call('r:1'), + call('r:2'), + ]) + remote_service_name.side_effect = ['other', 'service'] + self.assertEqual(hookenv.relation_id('rel', 'service'), 'r:2') + + @patch('charmhelpers.core.hookenv.os') + def test_relation_id_none_if_no_env(self, os_): + os_.environ = {} + self.assertEqual(hookenv.relation_id(), None) + + @patch('subprocess.check_output') + def test_gets_relation(self, check_output): + data = {"foo": "BAR"} + check_output.return_value = json.dumps(data).encode('UTF-8') + result = hookenv.relation_get() + + self.assertEqual(result['foo'], 'BAR') + check_output.assert_called_with(['relation-get', '--format=json', '-']) + + @patch('charmhelpers.core.hookenv.subprocess') + def test_relation_get_none(self, mock_subprocess): + mock_subprocess.check_output.return_value = b'null' + + result = hookenv.relation_get() + + self.assertIsNone(result) + + @patch('charmhelpers.core.hookenv.subprocess') + def test_relation_get_calledprocesserror(self, mock_subprocess): + """relation-get called outside a relation will errors without id.""" + mock_subprocess.check_output.side_effect = CalledProcessError( + 2, '/foo/bin/relation-get' + 'no relation id specified') + + result = hookenv.relation_get() + + self.assertIsNone(result) + + @patch('charmhelpers.core.hookenv.subprocess') + def test_relation_get_calledprocesserror_other(self, mock_subprocess): + """relation-get can fail for other more serious errors.""" + mock_subprocess.check_output.side_effect = CalledProcessError( + 1, '/foo/bin/relation-get' + 'connection refused') + + self.assertRaises(CalledProcessError, hookenv.relation_get) + + @patch('subprocess.check_output') + def test_gets_relation_with_scope(self, check_output): + check_output.return_value = json.dumps('bar').encode('UTF-8') + + result = hookenv.relation_get(attribute='baz-scope') + + self.assertEqual(result, 'bar') + check_output.assert_called_with(['relation-get', '--format=json', + 'baz-scope']) + + @patch('subprocess.check_output') + def test_gets_missing_relation_with_scope(self, check_output): + check_output.return_value = b"" + + result = hookenv.relation_get(attribute='baz-scope') + + self.assertEqual(result, None) + check_output.assert_called_with(['relation-get', '--format=json', + 'baz-scope']) + + @patch('subprocess.check_output') + def test_gets_relation_with_unit_name(self, check_output): + check_output.return_value = json.dumps('BAR').encode('UTF-8') + + result = hookenv.relation_get(attribute='baz-scope', unit='baz-unit') + + self.assertEqual(result, 'BAR') + check_output.assert_called_with(['relation-get', '--format=json', + 'baz-scope', 'baz-unit']) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('subprocess.check_call') + @patch('subprocess.check_output') + def test_relation_set_flushes_local_unit_cache(self, check_output, + check_call, local_unit): + check_output.return_value = json.dumps('BAR').encode('UTF-8') + local_unit.return_value = 'baz_unit' + hookenv.relation_get(attribute='baz_scope', unit='baz_unit') + hookenv.relation_get(attribute='bar_scope') + self.assertTrue(len(hookenv.cache) == 2) + check_output.return_value = "" + hookenv.relation_set(baz_scope='hello') + # relation_set should flush any entries for local_unit + self.assertTrue(len(hookenv.cache) == 1) + + @patch('subprocess.check_output') + def test_gets_relation_with_relation_id(self, check_output): + check_output.return_value = json.dumps('BAR').encode('UTF-8') + + result = hookenv.relation_get(attribute='baz-scope', unit='baz-unit', + rid=123) + + self.assertEqual(result, 'BAR') + check_output.assert_called_with(['relation-get', '--format=json', '-r', + 123, 'baz-scope', 'baz-unit']) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_sets_relation_with_kwargs(self, check_call_, check_output, + local_unit): + hookenv.relation_set(foo="bar") + check_call_.assert_called_with(['relation-set', 'foo=bar']) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_sets_relation_with_dict(self, check_call_, check_output, + local_unit): + hookenv.relation_set(relation_settings={"foo": "bar"}) + check_call_.assert_called_with(['relation-set', 'foo=bar']) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_sets_relation_with_relation_id(self, check_call_, check_output, + local_unit): + hookenv.relation_set(relation_id="foo", bar="baz") + check_call_.assert_called_with(['relation-set', '-r', 'foo', + 'bar=baz']) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_sets_relation_with_missing_value(self, check_call_, check_output, + local_unit): + hookenv.relation_set(foo=None) + check_call_.assert_called_with(['relation-set', 'foo=']) + + @patch('charmhelpers.core.hookenv.local_unit', MagicMock()) + @patch('os.remove') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_relation_set_file(self, check_call, check_output, remove): + """If relation-set accepts a --file parameter, it's used. + + Juju 1.23.2 introduced a --file parameter, which means you can + pass the data through a file. Not using --file would make + relation_set break if the relation data is too big. + """ + # check_output(["relation-set", "--help"]) is used to determine + # whether we can pass --file to it. + check_output.return_value = "--file" + hookenv.relation_set(foo="bar") + check_output.assert_called_with( + ["relation-set", "--help"], universal_newlines=True) + # relation-set is called with relation-set --file + # with data as YAML and the temp_file is then removed. + self.assertEqual(1, len(check_call.call_args[0])) + command = check_call.call_args[0][0] + self.assertEqual(3, len(command)) + self.assertEqual("relation-set", command[0]) + self.assertEqual("--file", command[1]) + temp_file = command[2] + with open(temp_file, "r") as f: + self.assertEqual("foo: bar", f.read().strip()) + remove.assert_called_with(temp_file) + + @patch('charmhelpers.core.hookenv.local_unit', MagicMock()) + @patch('os.remove') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_relation_set_file_non_str(self, check_call, check_output, remove): + """If relation-set accepts a --file parameter, it's used. + + Any value that is not a string is converted to a string before encoding + the settings to YAML. + """ + # check_output(["relation-set", "--help"]) is used to determine + # whether we can pass --file to it. + check_output.return_value = "--file" + hookenv.relation_set(foo={"bar": 1}) + check_output.assert_called_with( + ["relation-set", "--help"], universal_newlines=True) + # relation-set is called with relation-set --file + # with data as YAML and the temp_file is then removed. + self.assertEqual(1, len(check_call.call_args[0])) + command = check_call.call_args[0][0] + self.assertEqual(3, len(command)) + self.assertEqual("relation-set", command[0]) + self.assertEqual("--file", command[1]) + temp_file = command[2] + with open(temp_file, "r") as f: + self.assertEqual("foo: '{''bar'': 1}'", f.read().strip()) + remove.assert_called_with(temp_file) + + def test_lists_relation_types(self): + open_ = mock_open() + open_.return_value = io.BytesIO(CHARM_METADATA) + + with patch('charmhelpers.core.hookenv.open', open_, create=True): + with patch.dict('os.environ', {'CHARM_DIR': '/var/empty'}): + reltypes = set(hookenv.relation_types()) + open_.assert_called_once_with('/var/empty/metadata.yaml') + self.assertEqual(set(('testreqs', 'testprov', 'testpeer')), reltypes) + + def test_metadata(self): + open_ = mock_open() + open_.return_value = io.BytesIO(CHARM_METADATA) + + with patch('charmhelpers.core.hookenv.open', open_, create=True): + with patch.dict('os.environ', {'CHARM_DIR': '/var/empty'}): + metadata = hookenv.metadata() + self.assertEqual(metadata, yaml.safe_load(CHARM_METADATA)) + + @patch('charmhelpers.core.hookenv.relation_ids') + @patch('charmhelpers.core.hookenv.metadata') + def test_peer_relation_id(self, metadata, relation_ids): + metadata.return_value = {'peers': {sentinel.peer_relname: {}}} + relation_ids.return_value = [sentinel.pid1, sentinel.pid2] + self.assertEqual(hookenv.peer_relation_id(), sentinel.pid1) + relation_ids.assert_called_once_with(sentinel.peer_relname) + + def test_charm_name(self): + open_ = mock_open() + open_.return_value = io.BytesIO(CHARM_METADATA) + + with patch('charmhelpers.core.hookenv.open', open_, create=True): + with patch.dict('os.environ', {'CHARM_DIR': '/var/empty'}): + charm_name = hookenv.charm_name() + self.assertEqual("testmock", charm_name) + + @patch('subprocess.check_call') + def test_opens_port(self, check_call_): + hookenv.open_port(443, "TCP") + hookenv.open_port(80) + hookenv.open_port(100, "UDP") + hookenv.open_port(0, "ICMP") + calls = [ + call(['open-port', '443/TCP']), + call(['open-port', '80/TCP']), + call(['open-port', '100/UDP']), + call(['open-port', 'ICMP']), + ] + check_call_.assert_has_calls(calls) + + @patch('subprocess.check_call') + def test_closes_port(self, check_call_): + hookenv.close_port(443, "TCP") + hookenv.close_port(80) + hookenv.close_port(100, "UDP") + hookenv.close_port(0, "ICMP") + calls = [ + call(['close-port', '443/TCP']), + call(['close-port', '80/TCP']), + call(['close-port', '100/UDP']), + call(['close-port', 'ICMP']), + ] + check_call_.assert_has_calls(calls) + + @patch('subprocess.check_call') + def test_opens_ports(self, check_call_): + hookenv.open_ports(443, 447, "TCP") + hookenv.open_ports(80, 91) + hookenv.open_ports(100, 200, "UDP") + calls = [ + call(['open-port', '443-447/TCP']), + call(['open-port', '80-91/TCP']), + call(['open-port', '100-200/UDP']), + ] + check_call_.assert_has_calls(calls) + + @patch('subprocess.check_call') + def test_closes_ports(self, check_call_): + hookenv.close_ports(443, 447, "TCP") + hookenv.close_ports(80, 91) + hookenv.close_ports(100, 200, "UDP") + calls = [ + call(['close-port', '443-447/TCP']), + call(['close-port', '80-91/TCP']), + call(['close-port', '100-200/UDP']), + ] + check_call_.assert_has_calls(calls) + + @patch('subprocess.check_output') + def test_gets_opened_ports(self, check_output_): + prts = ['8080/tcp', '8081-8083/tcp'] + check_output_.return_value = json.dumps(prts).encode('UTF-8') + self.assertEqual(hookenv.opened_ports(), prts) + check_output_.assert_called_with(['opened-ports', '--format=json']) + + @patch('subprocess.check_output') + def test_gets_unit_attribute(self, check_output_): + check_output_.return_value = json.dumps('bar').encode('UTF-8') + self.assertEqual(hookenv.unit_get('foo'), 'bar') + check_output_.assert_called_with(['unit-get', '--format=json', 'foo']) + + @patch('subprocess.check_output') + def test_gets_missing_unit_attribute(self, check_output_): + check_output_.return_value = b"" + self.assertEqual(hookenv.unit_get('foo'), None) + check_output_.assert_called_with(['unit-get', '--format=json', 'foo']) + + def test_cached_decorator(self): + class Unserializable(object): + def __str__(self): + return 'unserializable' + + unserializable = Unserializable() + calls = [] + values = { + 'hello': 'world', + 'foo': 'bar', + 'baz': None, + unserializable: 'qux', + } + + @hookenv.cached + def cache_function(attribute): + calls.append(attribute) + return values[attribute] + + self.assertEquals(cache_function('hello'), 'world') + self.assertEquals(cache_function('hello'), 'world') + self.assertEquals(cache_function('foo'), 'bar') + self.assertEquals(cache_function('baz'), None) + self.assertEquals(cache_function('baz'), None) + self.assertEquals(cache_function(unserializable), 'qux') + self.assertEquals(calls, ['hello', 'foo', 'baz', unserializable]) + + def test_gets_charm_dir(self): + with patch.dict('os.environ', {}): + self.assertEqual(hookenv.charm_dir(), None) + with patch.dict('os.environ', {'CHARM_DIR': '/var/empty'}): + self.assertEqual(hookenv.charm_dir(), '/var/empty') + with patch.dict('os.environ', {'JUJU_CHARM_DIR': '/var/another'}): + self.assertEqual(hookenv.charm_dir(), '/var/another') + + @patch('subprocess.check_output') + def test_resource_get_unsupported(self, check_output_): + check_output_.side_effect = OSError(2, 'resource-get') + self.assertRaises(NotImplementedError, hookenv.resource_get, 'foo') + + @patch('subprocess.check_output') + def test_resource_get(self, check_output_): + check_output_.return_value = b'/tmp/file' + self.assertEqual(hookenv.resource_get('file'), '/tmp/file') + check_output_.side_effect = CalledProcessError( + 1, '/foo/bin/resource-get', + 'error: could not download resource: resource#foo/file not found') + self.assertFalse(hookenv.resource_get('no-file')) + self.assertFalse(hookenv.resource_get(None)) + + @patch('subprocess.check_output') + def test_goal_state_unsupported(self, check_output_): + check_output_.side_effect = OSError(2, 'goal-state') + self.assertRaises(NotImplementedError, hookenv.goal_state) + + @patch('subprocess.check_output') + def test_goal_state(self, check_output_): + expect = { + 'units': {}, + 'relations': {}, + } + check_output_.return_value = json.dumps(expect).encode('UTF-8') + result = hookenv.goal_state() + + self.assertEqual(result, expect) + check_output_.assert_called_with(['goal-state', '--format=json']) + + @patch('subprocess.check_output') + def test_is_leader_unsupported(self, check_output_): + check_output_.side_effect = OSError(2, 'is-leader') + self.assertRaises(NotImplementedError, hookenv.is_leader) + + @patch('subprocess.check_output') + def test_is_leader(self, check_output_): + check_output_.return_value = b'false' + self.assertFalse(hookenv.is_leader()) + check_output_.return_value = b'true' + self.assertTrue(hookenv.is_leader()) + + @patch('subprocess.check_call') + def test_payload_register(self, check_call_): + hookenv.payload_register('monitoring', 'docker', 'abc123') + check_call_.assert_called_with(['payload-register', 'monitoring', + 'docker', 'abc123']) + + @patch('subprocess.check_call') + def test_payload_unregister(self, check_call_): + hookenv.payload_unregister('monitoring', 'abc123') + check_call_.assert_called_with(['payload-unregister', 'monitoring', + 'abc123']) + + @patch('subprocess.check_call') + def test_payload_status_set(self, check_call_): + hookenv.payload_status_set('monitoring', 'abc123', 'Running') + check_call_.assert_called_with(['payload-status-set', 'monitoring', + 'abc123', 'Running']) + + @patch('subprocess.check_call') + def test_application_version_set(self, check_call_): + hookenv.application_version_set('v1.2.3') + check_call_.assert_called_with(['application-version-set', 'v1.2.3']) + + @patch.object(os, 'getenv') + @patch.object(hookenv, 'log') + def test_env_proxy_settings_juju_charm_all_selected(self, faux_log, + get_env): + expected_settings = { + 'HTTP_PROXY': 'http://squid.internal:3128', + 'http_proxy': 'http://squid.internal:3128', + 'HTTPS_PROXY': 'https://squid.internals:3128', + 'https_proxy': 'https://squid.internals:3128', + 'NO_PROXY': '192.0.2.0/24,198.51.100.0/24,.bar.com', + 'no_proxy': '192.0.2.0/24,198.51.100.0/24,.bar.com', + 'FTP_PROXY': 'ftp://ftp.internal:21', + 'ftp_proxy': 'ftp://ftp.internal:21', + } + + def get_env_side_effect(var): + return { + 'HTTP_PROXY': None, + 'HTTPS_PROXY': None, + 'NO_PROXY': None, + 'FTP_PROXY': None, + 'JUJU_CHARM_HTTP_PROXY': 'http://squid.internal:3128', + 'JUJU_CHARM_HTTPS_PROXY': 'https://squid.internals:3128', + 'JUJU_CHARM_FTP_PROXY': 'ftp://ftp.internal:21', + 'JUJU_CHARM_NO_PROXY': '192.0.2.0/24,198.51.100.0/24,.bar.com' + }[var] + get_env.side_effect = get_env_side_effect + + proxy_settings = hookenv.env_proxy_settings() + get_env.assert_has_calls([call("HTTP_PROXY"), + call("HTTPS_PROXY"), + call("NO_PROXY"), + call("FTP_PROXY"), + call("JUJU_CHARM_HTTP_PROXY"), + call("JUJU_CHARM_HTTPS_PROXY"), + call("JUJU_CHARM_FTP_PROXY"), + call("JUJU_CHARM_NO_PROXY")], + any_order=True) + self.assertEqual(expected_settings, proxy_settings) + # Verify that we logged a warning about the cidr in NO_PROXY. + faux_log.assert_called_with(hookenv.RANGE_WARNING, + level=hookenv.WARNING) + + @patch.object(os, 'getenv') + def test_env_proxy_settings_legacy_https(self, get_env): + expected_settings = { + 'HTTPS_PROXY': 'http://squid.internal:3128', + 'https_proxy': 'http://squid.internal:3128', + } + + def get_env_side_effect(var): + return { + 'HTTPS_PROXY': 'http://squid.internal:3128', + 'JUJU_CHARM_HTTPS_PROXY': None, + }[var] + get_env.side_effect = get_env_side_effect + + proxy_settings = hookenv.env_proxy_settings(['https']) + get_env.assert_has_calls([call("HTTPS_PROXY"), + call("JUJU_CHARM_HTTPS_PROXY")], + any_order=True) + self.assertEqual(expected_settings, proxy_settings) + + @patch.object(os, 'getenv') + def test_env_proxy_settings_juju_charm_https(self, get_env): + expected_settings = { + 'HTTPS_PROXY': 'http://squid.internal:3128', + 'https_proxy': 'http://squid.internal:3128', + } + + def get_env_side_effect(var): + return { + 'HTTPS_PROXY': None, + 'JUJU_CHARM_HTTPS_PROXY': 'http://squid.internal:3128', + }[var] + get_env.side_effect = get_env_side_effect + + proxy_settings = hookenv.env_proxy_settings(['https']) + get_env.assert_has_calls([call("HTTPS_PROXY"), + call("JUJU_CHARM_HTTPS_PROXY")], + any_order=True) + self.assertEqual(expected_settings, proxy_settings) + + @patch.object(os, 'getenv') + def test_env_proxy_settings_legacy_http(self, get_env): + expected_settings = { + 'HTTP_PROXY': 'http://squid.internal:3128', + 'http_proxy': 'http://squid.internal:3128', + } + + def get_env_side_effect(var): + return { + 'HTTP_PROXY': 'http://squid.internal:3128', + 'JUJU_CHARM_HTTP_PROXY': None, + }[var] + get_env.side_effect = get_env_side_effect + + proxy_settings = hookenv.env_proxy_settings(['http']) + get_env.assert_has_calls([call("HTTP_PROXY"), + call("JUJU_CHARM_HTTP_PROXY")], + any_order=True) + self.assertEqual(expected_settings, proxy_settings) + + @patch.object(os, 'getenv') + def test_env_proxy_settings_juju_charm_http(self, get_env): + expected_settings = { + 'HTTP_PROXY': 'http://squid.internal:3128', + 'http_proxy': 'http://squid.internal:3128', + } + + def get_env_side_effect(var): + return { + 'HTTP_PROXY': None, + 'JUJU_CHARM_HTTP_PROXY': 'http://squid.internal:3128', + }[var] + get_env.side_effect = get_env_side_effect + + proxy_settings = hookenv.env_proxy_settings(['http']) + get_env.assert_has_calls([call("HTTP_PROXY"), + call("JUJU_CHARM_HTTP_PROXY")], + any_order=True) + self.assertEqual(expected_settings, proxy_settings) + + +class HooksTest(TestCase): + def setUp(self): + super(HooksTest, self).setUp() + + _clean_globals() + self.addCleanup(_clean_globals) + + charm_dir = tempfile.mkdtemp() + self.addCleanup(lambda: shutil.rmtree(charm_dir)) + patcher = patch.object(hookenv, 'charm_dir', lambda: charm_dir) + self.addCleanup(patcher.stop) + patcher.start() + + config = hookenv.Config({}) + + def _mock_config(scope=None): + return config if scope is None else config[scope] + patcher = patch.object(hookenv, 'config', _mock_config) + self.addCleanup(patcher.stop) + patcher.start() + + def test_config_saved_after_execute(self): + config = hookenv.config() + config.implicit_save = True + + foo = MagicMock() + hooks = hookenv.Hooks() + hooks.register('foo', foo) + hooks.execute(['foo', 'some', 'other', 'args']) + self.assertTrue(os.path.exists(config.path)) + + def test_config_not_saved_after_execute(self): + config = hookenv.config() + config.implicit_save = False + + foo = MagicMock() + hooks = hookenv.Hooks() + hooks.register('foo', foo) + hooks.execute(['foo', 'some', 'other', 'args']) + self.assertFalse(os.path.exists(config.path)) + + def test_config_save_disabled(self): + config = hookenv.config() + config.implicit_save = True + + foo = MagicMock() + hooks = hookenv.Hooks(config_save=False) + hooks.register('foo', foo) + hooks.execute(['foo', 'some', 'other', 'args']) + self.assertFalse(os.path.exists(config.path)) + + def test_runs_a_registered_function(self): + foo = MagicMock() + hooks = hookenv.Hooks() + hooks.register('foo', foo) + + hooks.execute(['foo', 'some', 'other', 'args']) + + foo.assert_called_with() + + def test_cannot_run_unregistered_function(self): + foo = MagicMock() + hooks = hookenv.Hooks() + hooks.register('foo', foo) + + self.assertRaises(hookenv.UnregisteredHookError, hooks.execute, + ['bar']) + + def test_can_run_a_decorated_function_as_one_or_more_hooks(self): + execs = [] + hooks = hookenv.Hooks() + + @hooks.hook('bar', 'baz') + def func(): + execs.append(True) + + hooks.execute(['bar']) + hooks.execute(['baz']) + self.assertRaises(hookenv.UnregisteredHookError, hooks.execute, + ['brew']) + self.assertEqual(execs, [True, True]) + + def test_can_run_a_decorated_function_as_itself(self): + execs = [] + hooks = hookenv.Hooks() + + @hooks.hook() + def func(): + execs.append(True) + + hooks.execute(['func']) + self.assertRaises(hookenv.UnregisteredHookError, hooks.execute, + ['brew']) + self.assertEqual(execs, [True]) + + def test_magic_underscores(self): + # Juju hook names use hypens as separators. Python functions use + # underscores. If explicit names have not been provided, hooks + # are registered with both the function name and the function + # name with underscores replaced with hypens for convenience. + execs = [] + hooks = hookenv.Hooks() + + @hooks.hook() + def call_me_maybe(): + execs.append(True) + + hooks.execute(['call-me-maybe']) + hooks.execute(['call_me_maybe']) + self.assertEqual(execs, [True, True]) + + @patch('charmhelpers.core.hookenv.local_unit') + def test_gets_service_name(self, _unit): + _unit.return_value = 'mysql/3' + self.assertEqual(hookenv.service_name(), 'mysql') + + @patch('charmhelpers.core.hookenv.related_units') + @patch('charmhelpers.core.hookenv.remote_unit') + def test_gets_remote_service_name(self, remote_unit, related_units): + remote_unit.return_value = 'mysql/3' + related_units.return_value = ['pgsql/0', 'pgsql/1'] + self.assertEqual(hookenv.remote_service_name(), 'mysql') + self.assertEqual(hookenv.remote_service_name('pgsql:1'), 'pgsql') + + def test_gets_hook_name(self): + with patch.dict(os.environ, JUJU_HOOK_NAME='hook'): + self.assertEqual(hookenv.hook_name(), 'hook') + with patch('sys.argv', ['other-hook']): + self.assertEqual(hookenv.hook_name(), 'other-hook') + + @patch('subprocess.check_output') + def test_action_get_with_key(self, check_output): + action_data = 'bar' + check_output.return_value = json.dumps(action_data).encode('UTF-8') + + result = hookenv.action_get(key='foo') + + self.assertEqual(result, 'bar') + check_output.assert_called_with(['action-get', 'foo', '--format=json']) + + @patch('subprocess.check_output') + def test_action_get_without_key(self, check_output): + check_output.return_value = json.dumps(dict(foo='bar')).encode('UTF-8') + + result = hookenv.action_get() + + self.assertEqual(result['foo'], 'bar') + check_output.assert_called_with(['action-get', '--format=json']) + + @patch('subprocess.check_call') + def test_action_set(self, check_call): + values = {'foo': 'bar', 'fooz': 'barz'} + hookenv.action_set(values) + # The order of the key/value pairs can change, so sort them before test + called_args = check_call.call_args_list[0][0][0] + called_args.pop(0) + called_args.sort() + self.assertEqual(called_args, ['foo=bar', 'fooz=barz']) + + @patch('subprocess.check_call') + def test_action_fail(self, check_call): + message = "Ooops, the action failed" + hookenv.action_fail(message) + check_call.assert_called_with(['action-fail', message]) + + @patch('charmhelpers.core.hookenv.cmd_exists') + @patch('subprocess.check_output') + def test_function_get_with_key(self, check_output, cmd_exists): + function_data = 'bar' + check_output.return_value = json.dumps(function_data).encode('UTF-8') + cmd_exists.return_value = True + + result = hookenv.function_get(key='foo') + + self.assertEqual(result, 'bar') + check_output.assert_called_with(['function-get', 'foo', '--format=json']) + + @patch('charmhelpers.core.hookenv.cmd_exists') + @patch('subprocess.check_output') + def test_function_get_without_key(self, check_output, cmd_exists): + check_output.return_value = json.dumps(dict(foo='bar')).encode('UTF-8') + cmd_exists.return_value = True + + result = hookenv.function_get() + + self.assertEqual(result['foo'], 'bar') + check_output.assert_called_with(['function-get', '--format=json']) + + @patch('subprocess.check_call') + def test_function_set(self, check_call): + values = {'foo': 'bar', 'fooz': 'barz'} + hookenv.function_set(values) + # The order of the key/value pairs can change, so sort them before test + called_args = check_call.call_args_list[0][0][0] + called_args.pop(0) + called_args.sort() + self.assertEqual(called_args, ['foo=bar', 'fooz=barz']) + + @patch('charmhelpers.core.hookenv.cmd_exists') + @patch('subprocess.check_call') + def test_function_fail(self, check_call, cmd_exists): + cmd_exists.return_value = True + + message = "Ooops, the function failed" + hookenv.function_fail(message) + check_call.assert_called_with(['function-fail', message]) + + def test_status_set_invalid_state(self): + self.assertRaises(ValueError, hookenv.status_set, 'random', 'message') + + def test_status_set_invalid_state_enum(self): + + class RandomEnum(Enum): + FOO = 1 + self.assertRaises( + ValueError, + hookenv.status_set, + RandomEnum.FOO, + 'message') + + @patch('subprocess.call') + def test_status(self, call): + call.return_value = 0 + hookenv.status_set('active', 'Everything is Awesome!') + call.assert_called_with(['status-set', 'active', 'Everything is Awesome!']) + + @patch('subprocess.call') + def test_status_enum(self, call): + call.return_value = 0 + hookenv.status_set( + hookenv.WORKLOAD_STATES.ACTIVE, + 'Everything is Awesome!') + call.assert_called_with(['status-set', 'active', 'Everything is Awesome!']) + + @patch('subprocess.call') + def test_status_app(self, call): + call.return_value = 0 + hookenv.status_set( + 'active', + 'Everything is Awesome!', + application=True) + call.assert_called_with([ + 'status-set', + '--application', + 'active', + 'Everything is Awesome!']) + + @patch('subprocess.call') + @patch.object(hookenv, 'log') + def test_status_enoent(self, log, call): + call.side_effect = OSError(2, 'fail') + hookenv.status_set('active', 'Everything is Awesome!') + log.assert_called_with('status-set failed: active Everything is Awesome!', level='INFO') + + @patch('subprocess.call') + @patch.object(hookenv, 'log') + def test_status_statuscmd_fail(self, log, call): + call.side_effect = OSError(3, 'fail') + self.assertRaises(OSError, hookenv.status_set, 'active', 'msg') + call.assert_called_with(['status-set', 'active', 'msg']) + + @patch('subprocess.check_output') + def test_status_get(self, check_output): + check_output.return_value = json.dumps( + {"message": "foo", + "status": "active", + "status-data": {}}).encode("UTF-8") + result = hookenv.status_get() + self.assertEqual(result, ('active', 'foo')) + check_output.assert_called_with( + ['status-get', "--format=json", "--include-data"]) + + @patch('subprocess.check_output') + def test_status_get_nostatus(self, check_output): + check_output.side_effect = OSError(2, 'fail') + result = hookenv.status_get() + self.assertEqual(result, ('unknown', '')) + + @patch('subprocess.check_output') + def test_status_get_status_error(self, check_output): + check_output.side_effect = OSError(3, 'fail') + self.assertRaises(OSError, hookenv.status_get) + + @patch('subprocess.check_output') + @patch('glob.glob') + def test_juju_version(self, glob, check_output): + glob.return_value = [sentinel.jujud] + check_output.return_value = '1.23.3.1-trusty-amd64\n' + self.assertEqual(hookenv.juju_version(), '1.23.3.1-trusty-amd64') + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + glob.assert_called_once_with('/var/lib/juju/tools/machine-*/jujud') + check_output.assert_called_once_with([sentinel.jujud, 'version'], + universal_newlines=True) + + @patch('charmhelpers.core.hookenv.juju_version') + def test_has_juju_version(self, juju_version): + juju_version.return_value = '1.23.1.2.3.4.5-with-a-cherry-on-top.amd64' + self.assertTrue(hookenv.has_juju_version('1.23')) + self.assertTrue(hookenv.has_juju_version('1.23.1')) + self.assertTrue(hookenv.has_juju_version('1.23.1.1')) + self.assertFalse(hookenv.has_juju_version('1.23.2.1')) + self.assertFalse(hookenv.has_juju_version('1.24')) + + juju_version.return_value = '1.24-beta5.1-trusty-amd64' + self.assertTrue(hookenv.has_juju_version('1.23')) + self.assertTrue(hookenv.has_juju_version('1.24')) # Better if this was false! + self.assertTrue(hookenv.has_juju_version('1.24-beta5')) + self.assertTrue(hookenv.has_juju_version('1.24-beta5.1')) + self.assertFalse(hookenv.has_juju_version('1.25')) + self.assertTrue(hookenv.has_juju_version('1.18-backport6')) + + @patch.object(hookenv, 'relation_to_role_and_interface') + def test_relation_to_interface(self, rtri): + rtri.return_value = (None, 'foo') + self.assertEqual(hookenv.relation_to_interface('rel'), 'foo') + + @patch.object(hookenv, 'metadata') + def test_relation_to_role_and_interface(self, metadata): + metadata.return_value = { + 'provides': { + 'pro-rel': { + 'interface': 'pro-int', + }, + 'pro-rel2': { + 'interface': 'pro-int', + }, + }, + 'requires': { + 'req-rel': { + 'interface': 'req-int', + }, + }, + 'peers': { + 'pee-rel': { + 'interface': 'pee-int', + }, + }, + } + rtri = hookenv.relation_to_role_and_interface + self.assertEqual(rtri('pro-rel'), ('provides', 'pro-int')) + self.assertEqual(rtri('req-rel'), ('requires', 'req-int')) + self.assertEqual(rtri('pee-rel'), ('peers', 'pee-int')) + + @patch.object(hookenv, 'metadata') + def test_role_and_interface_to_relations(self, metadata): + metadata.return_value = { + 'provides': { + 'pro-rel': { + 'interface': 'pro-int', + }, + 'pro-rel2': { + 'interface': 'pro-int', + }, + }, + 'requires': { + 'req-rel': { + 'interface': 'int', + }, + }, + 'peers': { + 'pee-rel': { + 'interface': 'int', + }, + }, + } + ritr = hookenv.role_and_interface_to_relations + assertItemsEqual = getattr(self, 'assertItemsEqual', getattr(self, 'assertCountEqual', None)) + assertItemsEqual(ritr('provides', 'pro-int'), ['pro-rel', 'pro-rel2']) + assertItemsEqual(ritr('requires', 'int'), ['req-rel']) + assertItemsEqual(ritr('peers', 'int'), ['pee-rel']) + + @patch.object(hookenv, 'metadata') + def test_interface_to_relations(self, metadata): + metadata.return_value = { + 'provides': { + 'pro-rel': { + 'interface': 'pro-int', + }, + 'pro-rel2': { + 'interface': 'pro-int', + }, + }, + 'requires': { + 'req-rel': { + 'interface': 'req-int', + }, + }, + 'peers': { + 'pee-rel': { + 'interface': 'pee-int', + }, + }, + } + itr = hookenv.interface_to_relations + assertItemsEqual = getattr(self, 'assertItemsEqual', getattr(self, 'assertCountEqual', None)) + assertItemsEqual(itr('pro-int'), ['pro-rel', 'pro-rel2']) + assertItemsEqual(itr('req-int'), ['req-rel']) + assertItemsEqual(itr('pee-int'), ['pee-rel']) + + def test_action_name(self): + with patch.dict('os.environ', JUJU_ACTION_NAME='action-jack'): + self.assertEqual(hookenv.action_name(), 'action-jack') + + def test_action_uuid(self): + with patch.dict('os.environ', JUJU_ACTION_UUID='action-jack'): + self.assertEqual(hookenv.action_uuid(), 'action-jack') + + def test_action_tag(self): + with patch.dict('os.environ', JUJU_ACTION_TAG='action-jack'): + self.assertEqual(hookenv.action_tag(), 'action-jack') + + @patch('subprocess.check_output') + def test_storage_list(self, check_output): + ids = ['data/0', 'data/1', 'data/2'] + check_output.return_value = json.dumps(ids).encode('UTF-8') + + storage_name = 'arbitrary' + result = hookenv.storage_list(storage_name) + + self.assertEqual(result, ids) + check_output.assert_called_with(['storage-list', '--format=json', + storage_name]) + + @patch('subprocess.check_output') + def test_storage_list_notexist(self, check_output): + import errno + e = OSError() + e.errno = errno.ENOENT + check_output.side_effect = e + + result = hookenv.storage_list() + + self.assertEqual(result, []) + check_output.assert_called_with(['storage-list', '--format=json']) + + @patch('subprocess.check_output') + def test_storage_get_notexist(self, check_output): + # storage_get does not catch ENOENT, because there's no reason why you + # should be calling storage_get except from a storage hook, or with + # the result of storage_list (which will return [] as shown above). + + import errno + e = OSError() + e.errno = errno.ENOENT + check_output.side_effect = e + self.assertRaises(OSError, hookenv.storage_get) + + @patch('subprocess.check_output') + def test_storage_get(self, check_output): + expect = { + 'location': '/dev/sda', + 'kind': 'block', + } + check_output.return_value = json.dumps(expect).encode('UTF-8') + + result = hookenv.storage_get() + + self.assertEqual(result, expect) + check_output.assert_called_with(['storage-get', '--format=json']) + + @patch('subprocess.check_output') + def test_storage_get_attr(self, check_output): + expect = '/dev/sda' + check_output.return_value = json.dumps(expect).encode('UTF-8') + + attribute = 'location' + result = hookenv.storage_get(attribute) + + self.assertEqual(result, expect) + check_output.assert_called_with(['storage-get', '--format=json', + attribute]) + + @patch('subprocess.check_output') + def test_storage_get_with_id(self, check_output): + expect = { + 'location': '/dev/sda', + 'kind': 'block', + } + check_output.return_value = json.dumps(expect).encode('UTF-8') + + storage_id = 'data/0' + result = hookenv.storage_get(storage_id=storage_id) + + self.assertEqual(result, expect) + check_output.assert_called_with(['storage-get', '--format=json', + '-s', storage_id]) + + @patch('subprocess.check_output') + def test_network_get_primary(self, check_output): + """Ensure that network-get is called correctly and output is returned""" + check_output.return_value = b'192.168.22.1' + ip = hookenv.network_get_primary_address('mybinding') + check_output.assert_called_with( + ['network-get', '--primary-address', 'mybinding'], stderr=-2) + self.assertEqual(ip, '192.168.22.1') + + @patch('subprocess.check_output') + def test_network_get_primary_unsupported(self, check_output): + """Ensure that NotImplementedError is thrown when run on Juju < 2.0""" + check_output.side_effect = OSError(2, 'network-get') + self.assertRaises(NotImplementedError, hookenv.network_get_primary_address, + 'mybinding') + + @patch('subprocess.check_output') + def test_network_get_primary_no_binding_found(self, check_output): + """Ensure that NotImplementedError when no binding is found""" + check_output.side_effect = CalledProcessError( + 1, 'network-get', + output='no network config found for binding'.encode('UTF-8')) + self.assertRaises(hookenv.NoNetworkBinding, + hookenv.network_get_primary_address, + 'doesnotexist') + check_output.assert_called_with( + ['network-get', '--primary-address', 'doesnotexist'], stderr=-2) + + @patch('subprocess.check_output') + def test_network_get_primary_other_exception(self, check_output): + """Ensure that CalledProcessError still thrown when not + a missing binding""" + check_output.side_effect = CalledProcessError( + 1, 'network-get', + output='any other message'.encode('UTF-8')) + self.assertRaises(CalledProcessError, + hookenv.network_get_primary_address, + 'mybinding') + + @patch('charmhelpers.core.hookenv.juju_version') + @patch('subprocess.check_output') + def test_network_get(self, check_output, juju_version): + """Ensure that network-get is called correctly""" + juju_version.return_value = '2.2.0' + check_output.return_value = b'result' + hookenv.network_get('endpoint') + check_output.assert_called_with( + ['network-get', 'endpoint', '--format', 'yaml'], stderr=-2) + + @patch('charmhelpers.core.hookenv.juju_version') + @patch('subprocess.check_output') + def test_network_get_primary_required(self, check_output, juju_version): + """Ensure that NotImplementedError is thrown with Juju < 2.2.0""" + check_output.return_value = b'result' + + juju_version.return_value = '2.1.4' + self.assertRaises(NotImplementedError, hookenv.network_get, 'binding') + juju_version.return_value = '2.2.0' + self.assertEquals(hookenv.network_get('endpoint'), 'result') + + @patch('charmhelpers.core.hookenv.juju_version') + @patch('subprocess.check_output') + def test_network_get_relation_bound(self, check_output, juju_version): + """Ensure that network-get supports relation context, requires Juju 2.3""" + juju_version.return_value = '2.3.0' + check_output.return_value = b'result' + hookenv.network_get('endpoint', 'db') + check_output.assert_called_with( + ['network-get', 'endpoint', '--format', 'yaml', '-r', 'db'], + stderr=-2) + juju_version.return_value = '2.2.8' + self.assertRaises(NotImplementedError, hookenv.network_get, 'endpoint', 'db') + + @patch('charmhelpers.core.hookenv.juju_version') + @patch('subprocess.check_output') + def test_network_get_parses_yaml(self, check_output, juju_version): + """network-get returns loaded YAML output.""" + juju_version.return_value = '2.3.0' + check_output.return_value = b""" +bind-addresses: +- macaddress: "" + interfacename: "" + addresses: + - address: 10.136.107.33 + cidr: "" +ingress-addresses: +- 10.136.107.33 + """ + ip = hookenv.network_get('mybinding') + self.assertEqual(len(ip['bind-addresses']), 1) + self.assertEqual(ip['ingress-addresses'], ['10.136.107.33']) + + @patch('subprocess.check_call') + def test_add_metric(self, check_call_): + hookenv.add_metric(flips='1.5', flops='2.1') + hookenv.add_metric('juju-units=6') + hookenv.add_metric('foo-bar=3.333', 'baz-quux=8', users='2') + calls = [ + call(['add-metric', 'flips=1.5', 'flops=2.1']), + call(['add-metric', 'juju-units=6']), + call(['add-metric', 'baz-quux=8', 'foo-bar=3.333', 'users=2']), + ] + check_call_.assert_has_calls(calls) + + @patch('subprocess.check_call') + @patch.object(hookenv, 'log') + def test_add_metric_enoent(self, log, _check_call): + _check_call.side_effect = OSError(2, 'fail') + hookenv.add_metric(flips='1') + log.assert_called_with('add-metric failed: flips=1', level='INFO') + + @patch('charmhelpers.core.hookenv.os') + def test_meter_status(self, os_): + os_.environ = { + 'JUJU_METER_STATUS': 'GREEN', + 'JUJU_METER_INFO': 'all good', + } + self.assertEqual(hookenv.meter_status(), 'GREEN') + self.assertEqual(hookenv.meter_info(), 'all good') + + @patch.object(hookenv, 'related_units') + @patch.object(hookenv, 'relation_ids') + def test_iter_units_for_relation_name(self, relation_ids, related_units): + relation_ids.return_value = ['rel:1'] + related_units.return_value = ['unit/0', 'unit/1', 'unit/2'] + expected = [('rel:1', 'unit/0'), + ('rel:1', 'unit/1'), + ('rel:1', 'unit/2')] + related_units_data = [ + (u.rid, u.unit) + for u in hookenv.iter_units_for_relation_name('rel')] + self.assertEqual(expected, related_units_data) + + @patch.object(hookenv, 'relation_get') + def test_ingress_address(self, relation_get): + """Ensure ingress_address returns the ingress-address when available + and returns the private-address when not. + """ + _with_ingress = {'egress-subnets': '10.5.0.23/32', + 'ingress-address': '10.5.0.23', + 'private-address': '172.16.5.10'} + + _without_ingress = {'private-address': '172.16.5.10'} + + # Return the ingress-address + relation_get.return_value = _with_ingress + self.assertEqual(hookenv.ingress_address(rid='test:1', unit='unit/1'), + '10.5.0.23') + relation_get.assert_called_with(rid='test:1', unit='unit/1') + # Return the private-address + relation_get.return_value = _without_ingress + self.assertEqual(hookenv.ingress_address(rid='test:1'), + '172.16.5.10') + + @patch.object(hookenv, 'relation_get') + def test_egress_subnets(self, relation_get): + """Ensure egress_subnets returns the decoded egress-subnets when available + and falls back correctly when not. + """ + d = {'egress-subnets': '10.5.0.23/32,2001::F00F/64', + 'ingress-address': '10.5.0.23', + 'private-address': '2001::D0:F00D'} + + # Return the egress-subnets + relation_get.return_value = d + self.assertEqual(hookenv.egress_subnets(rid='test:1', unit='unit/1'), + ['10.5.0.23/32', '2001::F00F/64']) + relation_get.assert_called_with(rid='test:1', unit='unit/1') + + # Return the ingress-address + del d['egress-subnets'] + self.assertEqual(hookenv.egress_subnets(), ['10.5.0.23/32']) + + # Return the private-address + del d['ingress-address'] + self.assertEqual(hookenv.egress_subnets(), ['2001::D0:F00D/128']) + + @patch('charmhelpers.core.hookenv.local_unit') + @patch('charmhelpers.core.hookenv.goal_state') + @patch('charmhelpers.core.hookenv.has_juju_version') + def test_unit_doomed(self, has_juju_version, goal_state, local_unit): + # We need to test for a minimum patch level, or we risk + # data loss by returning bogus results with Juju 2.4.0 + has_juju_version.return_value = False + self.assertRaises(NotImplementedError, hookenv.unit_doomed) + has_juju_version.assertCalledOnceWith("2.4.1") + has_juju_version.return_value = True + + goal_state.return_value = json.loads(''' + { + "units": { + "postgresql/0": { + "status": "dying", + "since": "2018-07-30 10:01:06Z" + }, + "postgresql/1": { + "status": "active", + "since": "2018-07-30 10:22:39Z" + } + }, + "relations": {} + } + ''') + self.assertTrue(hookenv.unit_doomed('postgresql/0')) # unit removed, status "dying" + self.assertFalse(hookenv.unit_doomed('postgresql/1')) # unit exists, status "active", maybe other states + self.assertTrue(hookenv.unit_doomed('postgresql/2')) # unit does not exist + + local_unit.return_value = 'postgresql/0' + self.assertTrue(hookenv.unit_doomed()) + + def test_contains_addr_range(self): + # Contains cidr + self.assertTrue(hookenv._contains_range("192.168.1/20")) + self.assertTrue(hookenv._contains_range("192.168.0/24")) + self.assertTrue( + hookenv._contains_range("10.40.50.1,192.168.1/20,10.56.78.9")) + self.assertTrue(hookenv._contains_range("192.168.22/24")) + self.assertTrue(hookenv._contains_range("2001:db8::/32")) + self.assertTrue(hookenv._contains_range("*.foo.com")) + self.assertTrue(hookenv._contains_range(".foo.com")) + self.assertTrue( + hookenv._contains_range("192.168.1.20,.foo.com")) + self.assertTrue( + hookenv._contains_range("192.168.1.20, .foo.com")) + self.assertTrue( + hookenv._contains_range("192.168.1.20,*.foo.com")) + + # Doesn't contain cidr + self.assertFalse(hookenv._contains_range("192.168.1")) + self.assertFalse(hookenv._contains_range("192.168.145")) + self.assertFalse(hookenv._contains_range("192.16.14")) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_host.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_host.py new file mode 100644 index 0000000000000000000000000000000000000000..19617e8fdebdbccf99a2be722c1dc41beb686030 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_host.py @@ -0,0 +1,2008 @@ +import os.path +from collections import OrderedDict +import subprocess +from tempfile import mkdtemp +from shutil import rmtree +from textwrap import dedent + +import imp + +from charmhelpers import osplatform +from mock import patch, call, mock_open +from testtools import TestCase +from tests.helpers import patch_open +from tests.helpers import mock_open as mocked_open +import six + +from charmhelpers.core import host +from charmhelpers.fetch import ubuntu_apt_pkg + + +MOUNT_LINES = (""" +rootfs / rootfs rw 0 0 +sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +udev /dev devtmpfs rw,relatime,size=8196788k,nr_inodes=2049197,mode=755 0 0 +devpts /dev/pts devpts """ + """rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 +""").strip().split('\n') + +LSB_RELEASE = '''DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=13.10 +DISTRIB_CODENAME=saucy +DISTRIB_DESCRIPTION="Ubuntu Saucy Salamander (development branch)" +''' + +OS_RELEASE = '''NAME="CentOS Linux" +ANSI_COLOR="0;31" +ID_LIKE="rhel fedora" +VERSION_ID="7" +BUG_REPORT_URL="https://bugs.centos.org/" +CENTOS_MANTISBT_PROJECT="CentOS-7" +PRETTY_NAME="CentOS Linux 7 (Core)" +VERSION="7 (Core)" +REDHAT_SUPPORT_PRODUCT_VERSION="7" +CENTOS_MANTISBT_PROJECT_VERSION="7" +REDHAT_SUPPORT_PRODUCT="centos" +HOME_URL="https://www.centos.org/" +CPE_NAME="cpe:/o:centos:centos:7" +ID="centos" +''' + +IP_LINE_ETH0 = b""" +2: eth0: mtu 1500 qdisc mq master bond0 state UP qlen 1000 + link/ether e4:11:5b:ab:a7:3c brd ff:ff:ff:ff:ff:ff +""" + +IP_LINE_ETH100 = b""" +2: eth100: mtu 1500 qdisc mq master bond0 state UP qlen 1000 + link/ether e4:11:5b:ab:a7:3d brd ff:ff:ff:ff:ff:ff +""" + +IP_LINE_ETH0_VLAN = b""" +6: eth0.10@eth0: mtu 1500 qdisc noqueue state UP group default + link/ether 08:00:27:16:b9:5f brd ff:ff:ff:ff:ff:ff +""" + +IP_LINE_ETH1 = b""" +3: eth1: mtu 1546 qdisc noop state DOWN qlen 1000 + link/ether e4:11:5b:ab:a7:3c brd ff:ff:ff:ff:ff:ff +""" + +IP_LINE_HWADDR = b"""2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000\\ link/ether e4:11:5b:ab:a7:3c brd ff:ff:ff:ff:ff:ff""" + +IP_LINES = IP_LINE_ETH0 + IP_LINE_ETH1 + IP_LINE_ETH0_VLAN + IP_LINE_ETH100 + +IP_LINE_BONDS = b""" +6: bond0.10@bond0: mtu 1500 qdisc noqueue state UP group default +link/ether 08:00:27:16:b9:5f brd ff:ff:ff:ff:ff:ff +""" + + +class HelpersTest(TestCase): + + @patch('charmhelpers.core.host.lsb_release') + @patch('os.path') + def test_init_is_systemd_upstart(self, path, lsb_release): + """Upstart based init is correctly detected""" + lsb_release.return_value = {'DISTRIB_CODENAME': 'whatever'} + path.isdir.return_value = False + self.assertFalse(host.init_is_systemd()) + path.isdir.assert_called_with('/run/systemd/system') + + @patch('charmhelpers.core.host.lsb_release') + @patch('os.path') + def test_init_is_systemd_system(self, path, lsb_release): + """Systemd based init is correctly detected""" + lsb_release.return_value = {'DISTRIB_CODENAME': 'whatever'} + path.isdir.return_value = True + self.assertTrue(host.init_is_systemd()) + path.isdir.assert_called_with('/run/systemd/system') + + @patch('charmhelpers.core.host.lsb_release') + @patch('os.path') + def test_init_is_systemd_trusty(self, path, lsb_release): + # Never returns true under trusty, even if the systemd + # packages have been installed. lp:1670944 + lsb_release.return_value = {'DISTRIB_CODENAME': 'trusty'} + path.isdir.return_value = True + self.assertFalse(host.init_is_systemd()) + self.assertFalse(path.isdir.called) + + @patch.object(host, 'init_is_systemd') + @patch('subprocess.call') + def test_runs_service_action(self, mock_call, systemd): + systemd.return_value = False + mock_call.return_value = 0 + action = 'some-action' + service_name = 'foo-service' + + result = host.service(action, service_name) + + self.assertTrue(result) + mock_call.assert_called_with(['service', service_name, action]) + + @patch.object(host, 'init_is_systemd') + @patch('subprocess.call') + def test_runs_systemctl_action(self, mock_call, systemd): + """Ensure that service calls under systemd call 'systemctl'.""" + systemd.return_value = True + mock_call.return_value = 0 + action = 'some-action' + service_name = 'foo-service' + + result = host.service(action, service_name) + + self.assertTrue(result) + mock_call.assert_called_with(['systemctl', action, service_name]) + + @patch.object(host, 'init_is_systemd') + @patch('subprocess.call') + def test_returns_false_when_service_fails(self, mock_call, systemd): + systemd.return_value = False + mock_call.return_value = 1 + action = 'some-action' + service_name = 'foo-service' + + result = host.service(action, service_name) + + self.assertFalse(result) + mock_call.assert_called_with(['service', service_name, action]) + + @patch.object(host, 'service') + def test_starts_a_service(self, service): + service_name = 'foo-service' + service.side_effect = [True] + self.assertTrue(host.service_start(service_name)) + + service.assert_called_with('start', service_name) + + @patch.object(host, 'service') + def test_starts_a_service_with_parms(self, service): + service_name = 'foo-service' + service.side_effect = [True] + self.assertTrue(host.service_start(service_name, id=4)) + + service.assert_called_with('start', service_name, id=4) + + @patch.object(host, 'service') + def test_stops_a_service(self, service): + service_name = 'foo-service' + service.side_effect = [True] + self.assertTrue(host.service_stop(service_name)) + + service.assert_called_with('stop', service_name) + + @patch.object(host, 'service') + def test_restarts_a_service(self, service): + service_name = 'foo-service' + service.side_effect = [True] + self.assertTrue(host.service_restart(service_name)) + + service.assert_called_with('restart', service_name) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch.object(host, 'service') + def test_pauses_a_running_systemd_unit(self, service, systemd, + service_running): + """Pause on a running systemd unit will be stopped and disabled.""" + service_name = 'foo-service' + service_running.return_value = True + systemd.return_value = True + self.assertTrue(host.service_pause(service_name)) + service.assert_has_calls([ + call('stop', service_name), + call('disable', service_name), + call('mask', service_name)]) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch.object(host, 'service') + def test_resumes_a_stopped_systemd_unit(self, service, systemd, + service_running): + """Resume on a stopped systemd unit will be started and enabled.""" + service_name = 'foo-service' + service_running.return_value = False + systemd.return_value = True + self.assertTrue(host.service_resume(service_name)) + service.assert_has_calls([ + call('unmask', service_name), + # Ensures a package starts up if disabled but not masked, + # per lp:1692178 + call('enable', service_name), + call('start', service_name)]) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch.object(host, 'service') + def test_pauses_a_running_upstart_service(self, service, systemd, + service_running): + """Pause on a running service will call service stop.""" + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = True + tempdir = mkdtemp(prefix="test_pauses_an_upstart_service") + conf_path = os.path.join(tempdir, "{}.conf".format(service_name)) + # Just needs to exist + with open(conf_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_pause(service_name, init_dir=tempdir)) + + service.assert_called_with('stop', service_name) + override_path = os.path.join( + tempdir, "{}.override".format(service_name)) + with open(override_path, "r") as fh: + override_contents = fh.read() + self.assertEqual("manual\n", override_contents) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch.object(host, 'service') + def test_pauses_a_stopped_upstart_service(self, service, systemd, + service_running): + """Pause on a stopped service will not call service stop.""" + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = False + tempdir = mkdtemp(prefix="test_pauses_an_upstart_service") + conf_path = os.path.join(tempdir, "{}.conf".format(service_name)) + # Just needs to exist + with open(conf_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_pause(service_name, init_dir=tempdir)) + + # Stop isn't called because service is already stopped + self.assertRaises( + AssertionError, service.assert_called_with, 'stop', service_name) + override_path = os.path.join( + tempdir, "{}.override".format(service_name)) + with open(override_path, "r") as fh: + override_contents = fh.read() + self.assertEqual("manual\n", override_contents) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_call') + @patch.object(host, 'service') + def test_pauses_a_running_sysv_service(self, service, check_call, + systemd, service_running): + """Pause calls service stop on a running sysv service.""" + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = True + tempdir = mkdtemp(prefix="test_pauses_a_sysv_service") + sysv_path = os.path.join(tempdir, service_name) + # Just needs to exist + with open(sysv_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_pause( + service_name, init_dir=tempdir, initd_dir=tempdir)) + + service.assert_called_with('stop', service_name) + check_call.assert_called_with(["update-rc.d", service_name, "disable"]) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_call') + @patch.object(host, 'service') + def test_pauses_a_stopped_sysv_service(self, service, check_call, + systemd, service_running): + """Pause does not call service stop on a stopped sysv service.""" + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = False + tempdir = mkdtemp(prefix="test_pauses_a_sysv_service") + sysv_path = os.path.join(tempdir, service_name) + # Just needs to exist + with open(sysv_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_pause( + service_name, init_dir=tempdir, initd_dir=tempdir)) + + # Stop isn't called because service is already stopped + self.assertRaises( + AssertionError, service.assert_called_with, 'stop', service_name) + check_call.assert_called_with(["update-rc.d", service_name, "disable"]) + + @patch.object(host, 'init_is_systemd') + @patch.object(host, 'service') + def test_pause_with_unknown_service(self, service, systemd): + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + tempdir = mkdtemp(prefix="test_pauses_with_unknown_service") + self.addCleanup(rmtree, tempdir) + exception = self.assertRaises( + ValueError, host.service_pause, + service_name, init_dir=tempdir, initd_dir=tempdir) + self.assertIn( + "Unable to detect {0}".format(service_name), str(exception)) + self.assertIn(tempdir, str(exception)) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_output') + @patch.object(host, 'service') + def test_resumes_a_running_upstart_service(self, service, check_output, + systemd, service_running): + """When the service is already running, service start isn't called.""" + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = True + tempdir = mkdtemp(prefix="test_resumes_an_upstart_service") + conf_path = os.path.join(tempdir, "{}.conf".format(service_name)) + with open(conf_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_resume(service_name, init_dir=tempdir)) + + # Start isn't called because service is already running + self.assertFalse(service.called) + override_path = os.path.join( + tempdir, "{}.override".format(service_name)) + self.assertFalse(os.path.exists(override_path)) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_output') + @patch.object(host, 'service') + def test_resumes_a_stopped_upstart_service(self, service, check_output, + systemd, service_running): + """When the service is stopped, service start is called.""" + check_output.return_value = b'foo-service stop/waiting' + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = False + tempdir = mkdtemp(prefix="test_resumes_an_upstart_service") + conf_path = os.path.join(tempdir, "{}.conf".format(service_name)) + with open(conf_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_resume(service_name, init_dir=tempdir)) + + service.assert_called_with('start', service_name) + override_path = os.path.join( + tempdir, "{}.override".format(service_name)) + self.assertFalse(os.path.exists(override_path)) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_call') + @patch.object(host, 'service') + def test_resumes_a_sysv_service(self, service, check_call, systemd, + service_running): + """When process is in a stop/waiting state, service start is called.""" + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = False + tempdir = mkdtemp(prefix="test_resumes_a_sysv_service") + sysv_path = os.path.join(tempdir, service_name) + # Just needs to exist + with open(sysv_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_resume( + service_name, init_dir=tempdir, initd_dir=tempdir)) + + service.assert_called_with('start', service_name) + check_call.assert_called_with(["update-rc.d", service_name, "enable"]) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_call') + @patch.object(host, 'service') + def test_resume_a_running_sysv_service(self, service, check_call, + systemd, service_running): + """When process is already running, service start isn't called.""" + service_name = 'foo-service' + systemd.return_value = False + service_running.return_value = True + tempdir = mkdtemp(prefix="test_resumes_a_sysv_service") + sysv_path = os.path.join(tempdir, service_name) + # Just needs to exist + with open(sysv_path, "w") as fh: + fh.write("") + self.addCleanup(rmtree, tempdir) + self.assertTrue(host.service_resume( + service_name, init_dir=tempdir, initd_dir=tempdir)) + + # Start isn't called because service is already running + self.assertFalse(service.called) + check_call.assert_called_with(["update-rc.d", service_name, "enable"]) + + @patch.object(host, 'service_running') + @patch.object(host, 'init_is_systemd') + @patch.object(host, 'service') + def test_resume_with_unknown_service(self, service, systemd, + service_running): + service_name = 'foo-service' + service.side_effect = [True] + systemd.return_value = False + service_running.return_value = False + tempdir = mkdtemp(prefix="test_resumes_with_unknown_service") + self.addCleanup(rmtree, tempdir) + exception = self.assertRaises( + ValueError, host.service_resume, + service_name, init_dir=tempdir, initd_dir=tempdir) + self.assertIn( + "Unable to detect {0}".format(service_name), str(exception)) + self.assertIn(tempdir, str(exception)) + + @patch.object(host, 'service') + def test_reloads_a_service(self, service): + service_name = 'foo-service' + service.side_effect = [True] + self.assertTrue(host.service_reload(service_name)) + + service.assert_called_with('reload', service_name) + + @patch.object(host, 'service') + def test_failed_reload_restarts_a_service(self, service): + service_name = 'foo-service' + service.side_effect = [False, True] + self.assertTrue( + host.service_reload(service_name, restart_on_failure=True)) + + service.assert_has_calls([ + call('reload', service_name), + call('restart', service_name) + ]) + + @patch.object(host, 'service') + def test_failed_reload_without_restart(self, service): + service_name = 'foo-service' + service.side_effect = [False] + self.assertFalse(host.service_reload(service_name)) + + service.assert_called_with('reload', service_name) + + @patch.object(host, 'service') + def test_start_a_service_fails(self, service): + service_name = 'foo-service' + service.side_effect = [False] + self.assertFalse(host.service_start(service_name)) + + service.assert_called_with('start', service_name) + + @patch.object(host, 'service') + def test_stop_a_service_fails(self, service): + service_name = 'foo-service' + service.side_effect = [False] + self.assertFalse(host.service_stop(service_name)) + + service.assert_called_with('stop', service_name) + + @patch.object(host, 'service') + def test_restart_a_service_fails(self, service): + service_name = 'foo-service' + service.side_effect = [False] + self.assertFalse(host.service_restart(service_name)) + + service.assert_called_with('restart', service_name) + + @patch.object(host, 'service') + def test_reload_a_service_fails(self, service): + service_name = 'foo-service' + service.side_effect = [False] + self.assertFalse(host.service_reload(service_name)) + + service.assert_called_with('reload', service_name) + + @patch.object(host, 'service') + def test_failed_reload_restarts_a_service_fails(self, service): + service_name = 'foo-service' + service.side_effect = [False, False] + self.assertFalse( + host.service_reload(service_name, restart_on_failure=True)) + + service.assert_has_calls([ + call('reload', service_name), + call('restart', service_name) + ]) + + @patch.object(host, 'os') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_output') + def test_service_running_on_stopped_service(self, check_output, systemd, + os): + systemd.return_value = False + os.path.exists.return_value = True + check_output.return_value = b'foo stop/waiting' + self.assertFalse(host.service_running('foo')) + + @patch.object(host, 'os') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_output') + def test_service_running_on_running_service(self, check_output, systemd, + os): + systemd.return_value = False + os.path.exists.return_value = True + check_output.return_value = b'foo start/running, process 23871' + self.assertTrue(host.service_running('foo')) + + @patch.object(host, 'os') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.check_output') + def test_service_running_on_unknown_service(self, check_output, systemd, + os): + systemd.return_value = False + os.path.exists.return_value = True + exc = subprocess.CalledProcessError(1, ['status']) + check_output.side_effect = exc + self.assertFalse(host.service_running('foo')) + + @patch.object(host, 'os') + @patch.object(host, 'service') + @patch.object(host, 'init_is_systemd') + def test_service_systemv_running(self, systemd, service, os): + systemd.return_value = False + service.return_value = True + os.path.exists.side_effect = [False, True] + self.assertTrue(host.service_running('rabbitmq-server')) + service.assert_called_with('status', 'rabbitmq-server') + + @patch.object(host, 'os') + @patch.object(host, 'service') + @patch.object(host, 'init_is_systemd') + def test_service_systemv_not_running(self, systemd, service, + os): + systemd.return_value = False + service.return_value = False + os.path.exists.side_effect = [False, True] + self.assertFalse(host.service_running('keystone')) + service.assert_called_with('status', 'keystone') + + @patch('subprocess.call') + @patch.object(host, 'init_is_systemd') + def test_service_start_with_params(self, systemd, call): + systemd.return_value = False + call.return_value = 0 + self.assertTrue(host.service_start('ceph-osd', id=4)) + call.assert_called_with(['service', 'ceph-osd', 'start', 'id=4']) + + @patch('subprocess.call') + @patch.object(host, 'init_is_systemd') + def test_service_stop_with_params(self, systemd, call): + systemd.return_value = False + call.return_value = 0 + self.assertTrue(host.service_stop('ceph-osd', id=4)) + call.assert_called_with(['service', 'ceph-osd', 'stop', 'id=4']) + + @patch('subprocess.call') + @patch.object(host, 'init_is_systemd') + def test_service_start_systemd_with_params(self, systemd, call): + systemd.return_value = True + call.return_value = 0 + self.assertTrue(host.service_start('ceph-osd', id=4)) + call.assert_called_with(['systemctl', 'start', 'ceph-osd']) + + @patch('grp.getgrnam') + @patch('pwd.getpwnam') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_adds_a_user_if_it_doesnt_exist(self, log, check_call, + getpwnam, getgrnam): + username = 'johndoe' + password = 'eodnhoj' + shell = '/bin/bash' + existing_user_pwnam = KeyError('user not found') + new_user_pwnam = 'some user pwnam' + + getpwnam.side_effect = [existing_user_pwnam, new_user_pwnam] + + result = host.adduser(username, password=password) + + self.assertEqual(result, new_user_pwnam) + check_call.assert_called_with([ + 'useradd', + '--create-home', + '--shell', shell, + '--password', password, + '-g', username, + username + ]) + getpwnam.assert_called_with(username) + + @patch('pwd.getpwnam') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_doesnt_add_user_if_it_already_exists(self, log, check_call, + getpwnam): + username = 'johndoe' + password = 'eodnhoj' + existing_user_pwnam = 'some user pwnam' + + getpwnam.return_value = existing_user_pwnam + + result = host.adduser(username, password=password) + + self.assertEqual(result, existing_user_pwnam) + self.assertFalse(check_call.called) + getpwnam.assert_called_with(username) + + @patch('grp.getgrnam') + @patch('pwd.getpwnam') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_adds_a_user_with_different_shell(self, log, check_call, getpwnam, + getgrnam): + username = 'johndoe' + password = 'eodnhoj' + shell = '/bin/zsh' + existing_user_pwnam = KeyError('user not found') + new_user_pwnam = 'some user pwnam' + + getpwnam.side_effect = [existing_user_pwnam, new_user_pwnam] + getgrnam.side_effect = KeyError('group not found') + + result = host.adduser(username, password=password, shell=shell) + + self.assertEqual(result, new_user_pwnam) + check_call.assert_called_with([ + 'useradd', + '--create-home', + '--shell', shell, + '--password', password, + username + ]) + getpwnam.assert_called_with(username) + + @patch('grp.getgrnam') + @patch('pwd.getpwnam') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_adduser_with_groups(self, log, check_call, getpwnam, getgrnam): + username = 'johndoe' + password = 'eodnhoj' + shell = '/bin/bash' + existing_user_pwnam = KeyError('user not found') + new_user_pwnam = 'some user pwnam' + + getpwnam.side_effect = [existing_user_pwnam, new_user_pwnam] + + result = host.adduser(username, password=password, + primary_group='foo', secondary_groups=[ + 'bar', 'qux', + ]) + + self.assertEqual(result, new_user_pwnam) + check_call.assert_called_with([ + 'useradd', + '--create-home', + '--shell', shell, + '--password', password, + '-g', 'foo', + '-G', 'bar,qux', + username + ]) + getpwnam.assert_called_with(username) + assert not getgrnam.called + + @patch('pwd.getpwnam') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_adds_a_systemuser(self, log, check_call, getpwnam): + username = 'johndoe' + existing_user_pwnam = KeyError('user not found') + new_user_pwnam = 'some user pwnam' + + getpwnam.side_effect = [existing_user_pwnam, new_user_pwnam] + + result = host.adduser(username, system_user=True) + + self.assertEqual(result, new_user_pwnam) + check_call.assert_called_with([ + 'useradd', + '--system', + username + ]) + getpwnam.assert_called_with(username) + + @patch('pwd.getpwnam') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_adds_a_systemuser_with_home_dir(self, log, check_call, getpwnam): + username = 'johndoe' + existing_user_pwnam = KeyError('user not found') + new_user_pwnam = 'some user pwnam' + + getpwnam.side_effect = [existing_user_pwnam, new_user_pwnam] + + result = host.adduser(username, system_user=True, + home_dir='/var/lib/johndoe') + + self.assertEqual(result, new_user_pwnam) + check_call.assert_called_with([ + 'useradd', + '--home', + '/var/lib/johndoe', + '--system', + username + ]) + getpwnam.assert_called_with(username) + + @patch('pwd.getpwnam') + @patch('pwd.getpwuid') + @patch('grp.getgrnam') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_add_user_uid(self, log, check_call, getgrnam, getpwuid, getpwnam): + user_name = 'james' + user_id = 1111 + uid_key_error = KeyError('user not found') + getpwuid.side_effect = uid_key_error + host.adduser(user_name, uid=user_id) + + check_call.assert_called_with([ + 'useradd', + '--uid', + str(user_id), + '--system', + '-g', + user_name, + user_name + ]) + getpwnam.assert_called_with(user_name) + getpwuid.assert_called_with(user_id) + + @patch('grp.getgrnam') + @patch('grp.getgrgid') + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_add_group_gid(self, log, check_call, getgrgid, getgrnam): + group_name = 'darkhorse' + group_id = 1005 + existing_group_gid = KeyError('group not found') + new_group_gid = 1006 + getgrgid.side_effect = [existing_group_gid, new_group_gid] + + host.add_group(group_name, gid=group_id) + check_call.assert_called_with([ + 'addgroup', + '--gid', + str(group_id), + '--group', + group_name + ]) + getgrgid.assert_called_with(group_id) + getgrnam.assert_called_with(group_name) + + @patch('pwd.getpwnam') + def test_user_exists_true(self, getpwnam): + getpwnam.side_effect = 'pw info' + self.assertTrue(host.user_exists('bob')) + + @patch('pwd.getpwnam') + def test_user_exists_false(self, getpwnam): + getpwnam.side_effect = KeyError('user not found') + self.assertFalse(host.user_exists('bob')) + + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_adds_a_user_to_a_group(self, log, check_call): + username = 'foo' + group = 'bar' + + host.add_user_to_group(username, group) + + check_call.assert_called_with([ + 'gpasswd', '-a', + username, + group + ]) + + @patch.object(osplatform, 'get_platform') + @patch('grp.getgrnam') + @patch('subprocess.check_call') + def test_add_a_group_if_it_doesnt_exist_ubuntu(self, check_call, + getgrnam, platform): + platform.return_value = 'ubuntu' + imp.reload(host) + + group_name = 'testgroup' + existing_group_grnam = KeyError('group not found') + new_group_grnam = 'some group grnam' + + getgrnam.side_effect = [existing_group_grnam, new_group_grnam] + with patch("charmhelpers.core.host.log"): + result = host.add_group(group_name) + + self.assertEqual(result, new_group_grnam) + check_call.assert_called_with(['addgroup', '--group', group_name]) + getgrnam.assert_called_with(group_name) + + @patch.object(osplatform, 'get_platform') + @patch('grp.getgrnam') + @patch('subprocess.check_call') + def test_add_a_group_if_it_doesnt_exist_centos(self, check_call, + getgrnam, platform): + platform.return_value = 'centos' + imp.reload(host) + + group_name = 'testgroup' + existing_group_grnam = KeyError('group not found') + new_group_grnam = 'some group grnam' + + getgrnam.side_effect = [existing_group_grnam, new_group_grnam] + + with patch("charmhelpers.core.host.log"): + result = host.add_group(group_name) + + self.assertEqual(result, new_group_grnam) + check_call.assert_called_with(['groupadd', group_name]) + getgrnam.assert_called_with(group_name) + + @patch.object(osplatform, 'get_platform') + @patch('grp.getgrnam') + @patch('subprocess.check_call') + def test_doesnt_add_group_if_it_already_exists_ubuntu(self, check_call, + getgrnam, platform): + platform.return_value = 'ubuntu' + imp.reload(host) + + group_name = 'testgroup' + existing_group_grnam = 'some group grnam' + + getgrnam.return_value = existing_group_grnam + + with patch("charmhelpers.core.host.log"): + result = host.add_group(group_name) + + self.assertEqual(result, existing_group_grnam) + self.assertFalse(check_call.called) + getgrnam.assert_called_with(group_name) + + @patch.object(osplatform, 'get_platform') + @patch('grp.getgrnam') + @patch('subprocess.check_call') + def test_doesnt_add_group_if_it_already_exists_centos(self, check_call, + getgrnam, platform): + platform.return_value = 'centos' + imp.reload(host) + + group_name = 'testgroup' + existing_group_grnam = 'some group grnam' + + getgrnam.return_value = existing_group_grnam + + with patch("charmhelpers.core.host.log"): + result = host.add_group(group_name) + + self.assertEqual(result, existing_group_grnam) + self.assertFalse(check_call.called) + getgrnam.assert_called_with(group_name) + + @patch.object(osplatform, 'get_platform') + @patch('grp.getgrnam') + @patch('subprocess.check_call') + def test_add_a_system_group_ubuntu(self, check_call, getgrnam, platform): + platform.return_value = 'ubuntu' + imp.reload(host) + + group_name = 'testgroup' + existing_group_grnam = KeyError('group not found') + new_group_grnam = 'some group grnam' + + getgrnam.side_effect = [existing_group_grnam, new_group_grnam] + + with patch("charmhelpers.core.host.log"): + result = host.add_group(group_name, system_group=True) + + self.assertEqual(result, new_group_grnam) + check_call.assert_called_with([ + 'addgroup', + '--system', + group_name + ]) + getgrnam.assert_called_with(group_name) + + @patch.object(osplatform, 'get_platform') + @patch('grp.getgrnam') + @patch('subprocess.check_call') + def test_add_a_system_group_centos(self, check_call, getgrnam, platform): + platform.return_value = 'centos' + imp.reload(host) + + group_name = 'testgroup' + existing_group_grnam = KeyError('group not found') + new_group_grnam = 'some group grnam' + + getgrnam.side_effect = [existing_group_grnam, new_group_grnam] + + with patch("charmhelpers.core.host.log"): + result = host.add_group(group_name, system_group=True) + + self.assertEqual(result, new_group_grnam) + check_call.assert_called_with([ + 'groupadd', + '-r', + group_name + ]) + getgrnam.assert_called_with(group_name) + + @patch('subprocess.check_call') + def test_chage_no_chroot(self, check_call): + host.chage('usera', expiredate='2019-09-28', maxdays='11') + check_call.assert_called_with([ + 'chage', + '--expiredate', '2019-09-28', + '--maxdays', '11', + 'usera' + ]) + + @patch('subprocess.check_call') + def test_chage_chroot(self, check_call): + host.chage('usera', expiredate='2019-09-28', maxdays='11', + root='mychroot') + check_call.assert_called_with([ + 'chage', + '--root', 'mychroot', + '--expiredate', '2019-09-28', + '--maxdays', '11', + 'usera' + ]) + + @patch('subprocess.check_call') + def test_remove_password_expiry(self, check_call): + host.remove_password_expiry('usera') + check_call.assert_called_with([ + 'chage', + '--expiredate', '-1', + '--inactive', '-1', + '--mindays', '0', + '--maxdays', '-1', + 'usera' + ]) + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_rsyncs_a_path(self, log, check_output): + from_path = '/from/this/path/foo' + to_path = '/to/this/path/bar' + check_output.return_value = b' some output ' # Spaces will be stripped + + result = host.rsync(from_path, to_path) + + self.assertEqual(result, 'some output') + check_output.assert_called_with(['/usr/bin/rsync', '-r', '--delete', + '--executability', + '/from/this/path/foo', + '/to/this/path/bar'], stderr=subprocess.STDOUT) + + @patch('subprocess.check_call') + @patch.object(host, 'log') + def test_creates_a_symlink(self, log, check_call): + source = '/from/this/path/foo' + destination = '/to/this/path/bar' + + host.symlink(source, destination) + + check_call.assert_called_with(['ln', '-sf', + '/from/this/path/foo', + '/to/this/path/bar']) + + @patch('pwd.getpwnam') + @patch('grp.getgrnam') + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_creates_a_directory_if_it_doesnt_exist(self, os_, log, + getgrnam, getpwnam): + uid = 123 + gid = 234 + owner = 'some-user' + group = 'some-group' + path = '/some/other/path/from/link' + realpath = '/some/path' + path_exists = False + perms = 0o644 + + getpwnam.return_value.pw_uid = uid + getgrnam.return_value.gr_gid = gid + os_.path.abspath.return_value = realpath + os_.path.exists.return_value = path_exists + + host.mkdir(path, owner=owner, group=group, perms=perms) + + getpwnam.assert_called_with('some-user') + getgrnam.assert_called_with('some-group') + os_.path.abspath.assert_called_with(path) + os_.path.exists.assert_called_with(realpath) + os_.makedirs.assert_called_with(realpath, perms) + os_.chown.assert_called_with(realpath, uid, gid) + + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_creates_a_directory_with_defaults(self, os_, log): + uid = 0 + gid = 0 + path = '/some/other/path/from/link' + realpath = '/some/path' + path_exists = False + perms = 0o555 + + os_.path.abspath.return_value = realpath + os_.path.exists.return_value = path_exists + + host.mkdir(path) + + os_.path.abspath.assert_called_with(path) + os_.path.exists.assert_called_with(realpath) + os_.makedirs.assert_called_with(realpath, perms) + os_.chown.assert_called_with(realpath, uid, gid) + + @patch('pwd.getpwnam') + @patch('grp.getgrnam') + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_removes_file_with_same_path_before_mkdir(self, os_, log, + getgrnam, getpwnam): + uid = 123 + gid = 234 + owner = 'some-user' + group = 'some-group' + path = '/some/other/path/from/link' + realpath = '/some/path' + path_exists = True + force = True + is_dir = False + perms = 0o644 + + getpwnam.return_value.pw_uid = uid + getgrnam.return_value.gr_gid = gid + os_.path.abspath.return_value = realpath + os_.path.exists.return_value = path_exists + os_.path.isdir.return_value = is_dir + + host.mkdir(path, owner=owner, group=group, perms=perms, force=force) + + getpwnam.assert_called_with('some-user') + getgrnam.assert_called_with('some-group') + os_.path.abspath.assert_called_with(path) + os_.path.exists.assert_called_with(realpath) + os_.unlink.assert_called_with(realpath) + os_.makedirs.assert_called_with(realpath, perms) + os_.chown.assert_called_with(realpath, uid, gid) + + @patch('pwd.getpwnam') + @patch('grp.getgrnam') + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_writes_content_to_a_file(self, os_, log, getgrnam, getpwnam): + # Curly brackets here demonstrate that we are *not* rendering + # these strings with Python's string formatting. This is a + # change from the original behavior per Bug #1195634. + uid = 123 + gid = 234 + owner = 'some-user-{foo}' + group = 'some-group-{bar}' + path = '/some/path/{baz}' + contents = b'what is {juju}' + perms = 0o644 + fileno = 'some-fileno' + + getpwnam.return_value.pw_uid = uid + getgrnam.return_value.gr_gid = gid + + with patch_open() as (mock_open, mock_file): + mock_file.fileno.return_value = fileno + + host.write_file(path, contents, owner=owner, group=group, + perms=perms) + + getpwnam.assert_called_with('some-user-{foo}') + getgrnam.assert_called_with('some-group-{bar}') + mock_open.assert_called_with('/some/path/{baz}', 'wb') + os_.fchown.assert_called_with(fileno, uid, gid) + os_.fchmod.assert_called_with(fileno, perms) + mock_file.write.assert_called_with(b'what is {juju}') + + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_writes_content_with_default(self, os_, log): + uid = 0 + gid = 0 + path = '/some/path/{baz}' + fmtstr = b'what is {juju}' + perms = 0o444 + fileno = 'some-fileno' + + with patch_open() as (mock_open, mock_file): + mock_file.fileno.return_value = fileno + + host.write_file(path, fmtstr) + + mock_open.assert_called_with('/some/path/{baz}', 'wb') + os_.fchown.assert_called_with(fileno, uid, gid) + os_.fchmod.assert_called_with(fileno, perms) + mock_file.write.assert_called_with(b'what is {juju}') + + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_does_not_write_duplicate_content(self, os_, log): + uid = 0 + gid = 0 + path = '/some/path/{baz}' + fmtstr = b'what is {juju}' + perms = 0o444 + fileno = 'some-fileno' + + os_.stat.return_value.st_uid = 1 + os_.stat.return_value.st_gid = 1 + os_.stat.return_value.st_mode = 0o777 + + with patch_open() as (mock_open, mock_file): + mock_file.fileno.return_value = fileno + mock_file.read.return_value = fmtstr + + host.write_file(path, fmtstr) + + self.assertEqual(mock_open.call_count, 1) # Called to read + os_.chown.assert_has_calls([ + call(path, uid, -1), + call(path, -1, gid), + ]) + os_.chmod.assert_called_with(path, perms) + + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_only_changes_incorrect_ownership(self, os_, log): + uid = 0 + gid = 0 + path = '/some/path/{baz}' + fmtstr = b'what is {juju}' + perms = 0o444 + fileno = 'some-fileno' + + os_.stat.return_value.st_uid = uid + os_.stat.return_value.st_gid = gid + os_.stat.return_value.st_mode = perms + + with patch_open() as (mock_open, mock_file): + mock_file.fileno.return_value = fileno + mock_file.read.return_value = fmtstr + + host.write_file(path, fmtstr) + + self.assertEqual(mock_open.call_count, 1) # Called to read + self.assertEqual(os_.chown.call_count, 0) + + @patch.object(host, 'log') + @patch.object(host, 'os') + def test_writes_binary_contents(self, os_, log): + path = '/some/path/{baz}' + fmtstr = six.u('what is {juju}\N{TRADE MARK SIGN}').encode('UTF-8') + fileno = 'some-fileno' + + with patch_open() as (mock_open, mock_file): + mock_file.fileno.return_value = fileno + + host.write_file(path, fmtstr) + + mock_open.assert_called_with('/some/path/{baz}', 'wb') + mock_file.write.assert_called_with(fmtstr) + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_mounts_a_device(self, log, check_output): + device = '/dev/guido' + mountpoint = '/mnt/guido' + options = 'foo,bar' + + result = host.mount(device, mountpoint, options) + + self.assertTrue(result) + check_output.assert_called_with(['mount', '-o', 'foo,bar', + '/dev/guido', '/mnt/guido']) + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_doesnt_mount_on_error(self, log, check_output): + device = '/dev/guido' + mountpoint = '/mnt/guido' + options = 'foo,bar' + + error = subprocess.CalledProcessError(123, 'mount it', 'Oops...') + check_output.side_effect = error + + result = host.mount(device, mountpoint, options) + + self.assertFalse(result) + check_output.assert_called_with(['mount', '-o', 'foo,bar', + '/dev/guido', '/mnt/guido']) + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_mounts_a_device_without_options(self, log, check_output): + device = '/dev/guido' + mountpoint = '/mnt/guido' + + result = host.mount(device, mountpoint) + + self.assertTrue(result) + check_output.assert_called_with(['mount', '/dev/guido', '/mnt/guido']) + + @patch.object(host, 'Fstab') + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_mounts_and_persist_a_device(self, log, check_output, fstab): + """Check if a mount works with the persist flag set to True + """ + device = '/dev/guido' + mountpoint = '/mnt/guido' + options = 'foo,bar' + + result = host.mount(device, mountpoint, options, persist=True) + + self.assertTrue(result) + check_output.assert_called_with(['mount', '-o', 'foo,bar', + '/dev/guido', '/mnt/guido']) + + fstab.add.assert_called_with('/dev/guido', '/mnt/guido', 'ext3', + options='foo,bar') + + result = host.mount(device, mountpoint, options, persist=True, + filesystem="xfs") + + self.assertTrue(result) + fstab.add.assert_called_with('/dev/guido', '/mnt/guido', 'xfs', + options='foo,bar') + + @patch.object(host, 'Fstab') + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_umounts_a_device(self, log, check_output, fstab): + mountpoint = '/mnt/guido' + + result = host.umount(mountpoint, persist=True) + + self.assertTrue(result) + check_output.assert_called_with(['umount', mountpoint]) + fstab.remove_by_mountpoint_called_with(mountpoint) + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_umounts_and_persist_device(self, log, check_output): + mountpoint = '/mnt/guido' + + result = host.umount(mountpoint) + + self.assertTrue(result) + check_output.assert_called_with(['umount', '/mnt/guido']) + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_doesnt_umount_on_error(self, log, check_output): + mountpoint = '/mnt/guido' + + error = subprocess.CalledProcessError(123, 'mount it', 'Oops...') + check_output.side_effect = error + + result = host.umount(mountpoint) + + self.assertFalse(result) + check_output.assert_called_with(['umount', '/mnt/guido']) + + def test_lists_the_mount_points(self): + with patch_open() as (mock_open, mock_file): + mock_file.readlines.return_value = MOUNT_LINES + result = host.mounts() + + self.assertEqual(result, [ + ['/', 'rootfs'], + ['/sys', 'sysfs'], + ['/proc', 'proc'], + ['/dev', 'udev'], + ['/dev/pts', 'devpts'] + ]) + mock_open.assert_called_with('/proc/mounts') + + _hash_files = { + '/etc/exists.conf': 'lots of nice ceph configuration', + '/etc/missing.conf': None + } + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_fstab_mount(self, log, check_output): + self.assertTrue(host.fstab_mount('/mnt/mymntpnt')) + check_output.assert_called_with(['mount', '/mnt/mymntpnt']) + + @patch('subprocess.check_output') + @patch.object(host, 'log') + def test_fstab_mount_fail(self, log, check_output): + error = subprocess.CalledProcessError(123, 'mount it', 'Oops...') + check_output.side_effect = error + self.assertFalse(host.fstab_mount('/mnt/mymntpnt')) + check_output.assert_called_with(['mount', '/mnt/mymntpnt']) + + @patch('hashlib.md5') + @patch('os.path.exists') + def test_file_hash_exists(self, exists, md5): + filename = '/etc/exists.conf' + exists.side_effect = [True] + m = md5() + m.hexdigest.return_value = self._hash_files[filename] + with patch_open() as (mock_open, mock_file): + mock_file.read.return_value = self._hash_files[filename] + result = host.file_hash(filename) + self.assertEqual(result, self._hash_files[filename]) + + @patch('os.path.exists') + def test_file_hash_missing(self, exists): + filename = '/etc/missing.conf' + exists.side_effect = [False] + with patch_open() as (mock_open, mock_file): + mock_file.read.return_value = self._hash_files[filename] + result = host.file_hash(filename) + self.assertEqual(result, None) + + @patch('hashlib.sha1') + @patch('os.path.exists') + def test_file_hash_sha1(self, exists, sha1): + filename = '/etc/exists.conf' + exists.side_effect = [True] + m = sha1() + m.hexdigest.return_value = self._hash_files[filename] + with patch_open() as (mock_open, mock_file): + mock_file.read.return_value = self._hash_files[filename] + result = host.file_hash(filename, hash_type='sha1') + self.assertEqual(result, self._hash_files[filename]) + + @patch.object(host, 'file_hash') + def test_check_hash(self, file_hash): + file_hash.return_value = 'good-hash' + self.assertRaises(host.ChecksumError, host.check_hash, + 'file', 'bad-hash') + host.check_hash('file', 'good-hash', 'sha256') + self.assertEqual(file_hash.call_args_list, [ + call('file', 'md5'), + call('file', 'sha256'), + ]) + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_restart_no_changes(self, iglob, exists, service): + file_name = '/etc/missing.conf' + restart_map = { + file_name: ['test-service'] + } + iglob.return_value = [] + + @host.restart_on_change(restart_map) + def make_no_changes(): + pass + + make_no_changes() + + assert not service.called + assert not exists.called + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_restart_on_change(self, iglob, exists, service): + file_name = '/etc/missing.conf' + restart_map = { + file_name: ['test-service'] + } + iglob.side_effect = [[], [file_name]] + exists.return_value = True + + @host.restart_on_change(restart_map) + def make_some_changes(mock_file): + mock_file.read.return_value = b"newstuff" + + with patch_open() as (mock_open, mock_file): + make_some_changes(mock_file) + + for service_name in restart_map[file_name]: + service.assert_called_with('restart', service_name) + + exists.assert_has_calls([ + call(file_name), + ]) + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_multiservice_restart_on_change(self, iglob, exists, service): + file_name_one = '/etc/missing.conf' + file_name_two = '/etc/exists.conf' + restart_map = { + file_name_one: ['test-service'], + file_name_two: ['test-service', 'test-service2'] + } + iglob.side_effect = [[], [file_name_two], + [file_name_one], [file_name_two]] + exists.return_value = True + + @host.restart_on_change(restart_map) + def make_some_changes(): + pass + + with patch_open() as (mock_open, mock_file): + mock_file.read.side_effect = [b'exists', b'missing', b'exists2'] + make_some_changes() + + # Restart should only happen once per service + for svc in ['test-service2', 'test-service']: + c = call('restart', svc) + self.assertEquals(1, service.call_args_list.count(c)) + + exists.assert_has_calls([ + call(file_name_one), + call(file_name_two) + ]) + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_multiservice_restart_on_change_in_order(self, iglob, exists, + service): + file_name_one = '/etc/cinder/cinder.conf' + file_name_two = '/etc/haproxy/haproxy.conf' + restart_map = OrderedDict([ + (file_name_one, ['some-api']), + (file_name_two, ['haproxy']) + ]) + iglob.side_effect = [[], [file_name_two], + [file_name_one], [file_name_two]] + exists.return_value = True + + @host.restart_on_change(restart_map) + def make_some_changes(): + pass + + with patch_open() as (mock_open, mock_file): + mock_file.read.side_effect = [b'exists', b'missing', b'exists2'] + make_some_changes() + + # Restarts should happen in the order they are described in the + # restart map. + expected = [ + call('restart', 'some-api'), + call('restart', 'haproxy') + ] + self.assertEquals(expected, service.call_args_list) + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_glob_no_restart(self, iglob, exists, service): + glob_path = '/etc/service/*.conf' + file_name_one = '/etc/service/exists.conf' + file_name_two = '/etc/service/exists2.conf' + restart_map = { + glob_path: ['service'] + } + iglob.side_effect = [[file_name_one, file_name_two], + [file_name_one, file_name_two]] + exists.return_value = True + + @host.restart_on_change(restart_map) + def make_some_changes(): + pass + + with patch_open() as (mock_open, mock_file): + mock_file.read.side_effect = [b'content', b'content2', + b'content', b'content2'] + make_some_changes() + + self.assertEquals([], service.call_args_list) + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_glob_restart_on_change(self, iglob, exists, service): + glob_path = '/etc/service/*.conf' + file_name_one = '/etc/service/exists.conf' + file_name_two = '/etc/service/exists2.conf' + restart_map = { + glob_path: ['service'] + } + iglob.side_effect = [[file_name_one, file_name_two], + [file_name_one, file_name_two]] + exists.return_value = True + + @host.restart_on_change(restart_map) + def make_some_changes(): + pass + + with patch_open() as (mock_open, mock_file): + mock_file.read.side_effect = [b'content', b'content2', + b'changed', b'content2'] + make_some_changes() + + self.assertEquals([call('restart', 'service')], service.call_args_list) + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_glob_restart_on_create(self, iglob, exists, service): + glob_path = '/etc/service/*.conf' + file_name_one = '/etc/service/exists.conf' + file_name_two = '/etc/service/missing.conf' + restart_map = { + glob_path: ['service'] + } + iglob.side_effect = [[file_name_one], + [file_name_one, file_name_two]] + exists.return_value = True + + @host.restart_on_change(restart_map) + def make_some_changes(): + pass + + with patch_open() as (mock_open, mock_file): + mock_file.read.side_effect = [b'exists', + b'exists', b'created'] + make_some_changes() + + self.assertEquals([call('restart', 'service')], service.call_args_list) + + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_glob_restart_on_delete(self, iglob, exists, service): + glob_path = '/etc/service/*.conf' + file_name_one = '/etc/service/exists.conf' + file_name_two = '/etc/service/exists2.conf' + restart_map = { + glob_path: ['service'] + } + iglob.side_effect = [[file_name_one, file_name_two], + [file_name_two]] + exists.return_value = True + + @host.restart_on_change(restart_map) + def make_some_changes(): + pass + + with patch_open() as (mock_open, mock_file): + mock_file.read.side_effect = [b'exists', b'exists2', + b'exists2'] + make_some_changes() + + self.assertEquals([call('restart', 'service')], service.call_args_list) + + @patch.object(host, 'service_reload') + @patch.object(host, 'service') + @patch('os.path.exists') + @patch('glob.iglob') + def test_restart_on_change_restart_functs(self, iglob, exists, service, + service_reload): + file_name_one = '/etc/cinder/cinder.conf' + file_name_two = '/etc/haproxy/haproxy.conf' + restart_map = OrderedDict([ + (file_name_one, ['some-api']), + (file_name_two, ['haproxy']) + ]) + iglob.side_effect = [[], [file_name_two], + [file_name_one], [file_name_two]] + exists.return_value = True + + restart_funcs = { + 'some-api': service_reload, + } + + @host.restart_on_change(restart_map, restart_functions=restart_funcs) + def make_some_changes(): + pass + + with patch_open() as (mock_open, mock_file): + mock_file.read.side_effect = [b'exists', b'missing', b'exists2'] + make_some_changes() + + self.assertEquals([call('restart', 'haproxy')], service.call_args_list) + self.assertEquals([call('some-api')], service_reload.call_args_list) + + @patch.object(osplatform, 'get_platform') + def test_lsb_release_ubuntu(self, platform): + platform.return_value = 'ubuntu' + imp.reload(host) + + result = { + "DISTRIB_ID": "Ubuntu", + "DISTRIB_RELEASE": "13.10", + "DISTRIB_CODENAME": "saucy", + "DISTRIB_DESCRIPTION": "\"Ubuntu Saucy Salamander " + "(development branch)\"" + } + with mocked_open('/etc/lsb-release', LSB_RELEASE): + lsb_release = host.lsb_release() + for key in result: + self.assertEqual(result[key], lsb_release[key]) + + @patch.object(osplatform, 'get_platform') + def test_lsb_release_centos(self, platform): + platform.return_value = 'centos' + imp.reload(host) + + result = { + 'NAME': '"CentOS Linux"', + 'ANSI_COLOR': '"0;31"', + 'ID_LIKE': '"rhel fedora"', + 'VERSION_ID': '"7"', + 'BUG_REPORT_URL': '"https://bugs.centos.org/"', + 'CENTOS_MANTISBT_PROJECT': '"CentOS-7"', + 'PRETTY_NAME': '"CentOS Linux 7 (Core)"', + 'VERSION': '"7 (Core)"', + 'REDHAT_SUPPORT_PRODUCT_VERSION': '"7"', + 'CENTOS_MANTISBT_PROJECT_VERSION': '"7"', + 'REDHAT_SUPPORT_PRODUCT': '"centos"', + 'HOME_URL': '"https://www.centos.org/"', + 'CPE_NAME': '"cpe:/o:centos:centos:7"', + 'ID': '"centos"' + } + with mocked_open('/etc/os-release', OS_RELEASE): + lsb_release = host.lsb_release() + for key in result: + self.assertEqual(result[key], lsb_release[key]) + + def test_pwgen(self): + pw = host.pwgen() + self.assert_(len(pw) >= 35, 'Password is too short') + + pw = host.pwgen(10) + self.assertEqual(len(pw), 10, 'Password incorrect length') + + pw2 = host.pwgen(10) + self.assertNotEqual(pw, pw2, 'Duplicated password') + + @patch.object(host, 'glob') + @patch('os.path.realpath') + @patch('os.path.isdir') + def test_is_phy_iface(self, mock_isdir, mock_realpath, mock_glob): + mock_isdir.return_value = True + mock_glob.glob.return_value = ['/sys/class/net/eth0', + '/sys/class/net/veth0'] + + def fake_realpath(soft): + if soft.endswith('/eth0'): + hard = ('/sys/devices/pci0000:00/0000:00:1c.4' + '/0000:02:00.1/net/eth0') + else: + hard = '/sys/devices/virtual/net/veth0' + + return hard + + mock_realpath.side_effect = fake_realpath + self.assertTrue(host.is_phy_iface('eth0')) + self.assertFalse(host.is_phy_iface('veth0')) + + @patch('os.path.exists') + @patch('os.path.realpath') + @patch('os.path.isdir') + def test_get_bond_master(self, mock_isdir, mock_realpath, mock_exists): + mock_isdir.return_value = True + + def fake_realpath(soft): + if soft.endswith('/eth0'): + return ('/sys/devices/pci0000:00/0000:00:1c.4' + '/0000:02:00.1/net/eth0') + elif soft.endswith('/br0'): + return '/sys/devices/virtual/net/br0' + elif soft.endswith('/master'): + return '/sys/devices/virtual/net/bond0' + + return None + + def fake_exists(path): + return True + + mock_exists.side_effect = fake_exists + mock_realpath.side_effect = fake_realpath + self.assertEqual(host.get_bond_master('eth0'), 'bond0') + self.assertIsNone(host.get_bond_master('br0')) + + @patch('subprocess.check_output') + def test_list_nics(self, check_output): + check_output.return_value = IP_LINES + nics = host.list_nics() + self.assertEqual(nics, ['eth0', 'eth1', 'eth0.10', 'eth100']) + nics = host.list_nics('eth') + self.assertEqual(nics, ['eth0', 'eth1', 'eth0.10', 'eth100']) + nics = host.list_nics(['eth']) + self.assertEqual(nics, ['eth0', 'eth1', 'eth0.10', 'eth100']) + + @patch('subprocess.check_output') + def test_list_nics_with_bonds(self, check_output): + check_output.return_value = IP_LINE_BONDS + nics = host.list_nics('bond') + self.assertEqual(nics, ['bond0.10', ]) + + @patch('subprocess.check_output') + def test_get_nic_mtu_with_bonds(self, check_output): + check_output.return_value = IP_LINE_BONDS + nic = "bond0.10" + mtu = host.get_nic_mtu(nic) + self.assertEqual(mtu, '1500') + + @patch('subprocess.check_call') + def test_set_nic_mtu(self, mock_call): + mock_call.return_value = 0 + nic = 'eth7' + mtu = '1546' + host.set_nic_mtu(nic, mtu) + mock_call.assert_called_with(['ip', 'link', 'set', nic, 'mtu', mtu]) + + @patch('subprocess.check_output') + def test_get_nic_mtu(self, check_output): + check_output.return_value = IP_LINE_ETH0 + nic = "eth0" + mtu = host.get_nic_mtu(nic) + self.assertEqual(mtu, '1500') + + @patch('subprocess.check_output') + def test_get_nic_mtu_vlan(self, check_output): + check_output.return_value = IP_LINE_ETH0_VLAN + nic = "eth0.10" + mtu = host.get_nic_mtu(nic) + self.assertEqual(mtu, '1500') + + @patch('subprocess.check_output') + def test_get_nic_hwaddr(self, check_output): + check_output.return_value = IP_LINE_HWADDR + nic = "eth0" + hwaddr = host.get_nic_hwaddr(nic) + self.assertEqual(hwaddr, 'e4:11:5b:ab:a7:3c') + + @patch('charmhelpers.core.host_factory.ubuntu.lsb_release') + def test_get_distrib_codename(self, lsb_release): + lsb_release.return_value = {'DISTRIB_CODENAME': 'bionic'} + self.assertEqual(host.get_distrib_codename(), 'bionic') + + @patch.object(osplatform, 'get_platform') + @patch.object(ubuntu_apt_pkg, 'Cache') + def test_cmp_pkgrevno_revnos_ubuntu(self, pkg_cache, platform): + platform.return_value = 'ubuntu' + imp.reload(host) + + class MockPackage: + class MockPackageRevno: + def __init__(self, ver_str): + self.ver_str = ver_str + + def __init__(self, current_ver): + self.current_ver = self.MockPackageRevno(current_ver) + + pkg_dict = { + 'python': MockPackage('2.4') + } + pkg_cache.return_value = pkg_dict + self.assertEqual(host.cmp_pkgrevno('python', '2.3'), 1) + self.assertEqual(host.cmp_pkgrevno('python', '2.4'), 0) + self.assertEqual(host.cmp_pkgrevno('python', '2.5'), -1) + + @patch.object(osplatform, 'get_platform') + def test_cmp_pkgrevno_revnos_centos(self, platform): + platform.return_value = 'centos' + imp.reload(host) + + class MockPackage: + def __init__(self, name, version): + self.Name = name + self.version = version + + yum_dict = { + 'installed': { + MockPackage('python', '2.4') + } + } + + import yum + yum.YumBase.return_value.doPackageLists.return_value = ( + yum_dict) + + self.assertEqual(host.cmp_pkgrevno('python', '2.3'), 1) + self.assertEqual(host.cmp_pkgrevno('python', '2.4'), 0) + self.assertEqual(host.cmp_pkgrevno('python', '2.5'), -1) + + @patch.object(host.os, 'stat') + @patch.object(host.pwd, 'getpwuid') + @patch.object(host.grp, 'getgrgid') + @patch('posix.stat_result') + def test_owner(self, stat_result_, getgrgid_, getpwuid_, stat_): + getgrgid_.return_value = ['testgrp'] + getpwuid_.return_value = ['testuser'] + stat_.return_value = stat_result_() + + user, group = host.owner('/some/path') + stat_.assert_called_once_with('/some/path') + self.assertEqual('testuser', user) + self.assertEqual('testgrp', group) + + def test_get_total_ram(self): + raw = dedent('''\ + MemFree: 183868 kB + MemTotal: 7096108 kB + MemAvailable: 5645240 kB + ''').strip() + with patch_open() as (mock_open, mock_file): + mock_file.readlines.return_value = raw.splitlines() + self.assertEqual(host.get_total_ram(), 7266414592) # 7GB + mock_open.assert_called_once_with('/proc/meminfo', 'r') + + @patch.object(host, 'os') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.call') + def test_is_container_with_systemd_container(self, + call, + init_is_systemd, + mock_os): + init_is_systemd.return_value = True + call.return_value = 0 + self.assertTrue(host.is_container()) + call.assert_called_with(['systemd-detect-virt', '--container']) + + @patch.object(host, 'os') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.call') + def test_is_container_with_systemd_non_container(self, + call, + init_is_systemd, + mock_os): + init_is_systemd.return_value = True + call.return_value = 1 + self.assertFalse(host.is_container()) + call.assert_called_with(['systemd-detect-virt', '--container']) + + @patch.object(host, 'os') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.call') + def test_is_container_with_upstart_container(self, + call, + init_is_systemd, + mock_os): + init_is_systemd.return_value = False + mock_os.path.exists.return_value = True + self.assertTrue(host.is_container()) + mock_os.path.exists.assert_called_with('/run/container_type') + + @patch.object(host, 'os') + @patch.object(host, 'init_is_systemd') + @patch('subprocess.call') + def test_is_container_with_upstart_not_container(self, + call, + init_is_systemd, + mock_os): + init_is_systemd.return_value = False + mock_os.path.exists.return_value = False + self.assertFalse(host.is_container()) + mock_os.path.exists.assert_called_with('/run/container_type') + + def test_updatedb(self): + updatedb_text = 'PRUNEPATHS="/tmp"' + self.assertEqual(host.updatedb(updatedb_text, '/srv/node'), + 'PRUNEPATHS="/tmp /srv/node"') + + def test_no_change_updatedb(self): + updatedb_text = 'PRUNEPATHS="/tmp /srv/node"' + self.assertEqual(host.updatedb(updatedb_text, '/srv/node'), + updatedb_text) + + def test_no_prunepaths(self): + updatedb_text = 'PRUNE_BIND_MOUNTS="yes"' + self.assertEqual(host.updatedb(updatedb_text, '/srv/node'), + updatedb_text) + + def test_write_updatedb(self): + _open = mock_open(read_data='PRUNEPATHS="/tmp /srv/node"') + with patch('charmhelpers.core.host.open', _open, create=True): + host.add_to_updatedb_prunepath("/tmp/test") + handle = _open() + + self.assertTrue(handle.read.call_count == 1) + self.assertTrue(handle.seek.call_count == 1) + handle.write.assert_called_once_with( + 'PRUNEPATHS="/tmp /srv/node /tmp/test"') + + @patch.object(host, 'os') + def test_prunepaths_no_updatedb_conf_file(self, mock_os): + mock_os.path.exists.return_value = False + _open = mock_open(read_data='PRUNEPATHS="/tmp /srv/node"') + with patch('charmhelpers.core.host.open', _open, create=True): + host.add_to_updatedb_prunepath("/tmp/test") + handle = _open() + + self.assertTrue(handle.call_count == 0) + + @patch.object(host, 'os') + def test_prunepaths_updatedb_conf_file_isdir(self, mock_os): + mock_os.path.exists.return_value = True + mock_os.path.isdir.return_value = True + _open = mock_open(read_data='PRUNEPATHS="/tmp /srv/node"') + with patch('charmhelpers.core.host.open', _open, create=True): + host.add_to_updatedb_prunepath("/tmp/test") + handle = _open() + + self.assertTrue(handle.call_count == 0) + + @patch.object(host, 'local_unit') + def test_modulo_distribution(self, local_unit): + local_unit.return_value = 'test/7' + + # unit % modulo * wait + self.assertEqual(host.modulo_distribution(modulo=6, wait=10), 10) + + # Zero wait when unit % modulo == 0 + self.assertEqual(host.modulo_distribution(modulo=7, wait=10), 0) + + # modulo * wait when unit % modulo == 0 and non_zero_wait=True + self.assertEqual(host.modulo_distribution(modulo=7, wait=10, + non_zero_wait=True), + 70) + + @patch.object(host, 'log') + @patch.object(host, 'charm_name') + @patch.object(host, 'write_file') + @patch.object(subprocess, 'check_call') + @patch.object(host, 'file_hash') + @patch('hashlib.md5') + def test_install_ca_cert_new_cert(self, md5, file_hash, check_call, + write_file, charm_name, log): + file_hash.return_value = 'old_hash' + charm_name.return_value = 'charm-name' + + md5().hexdigest.return_value = 'old_hash' + host.install_ca_cert('cert_data') + assert not check_call.called + + md5().hexdigest.return_value = 'new_hash' + host.install_ca_cert(None) + assert not check_call.called + host.install_ca_cert('') + assert not check_call.called + + host.install_ca_cert('cert_data', 'name') + write_file.assert_called_with( + '/usr/local/share/ca-certificates/name.crt', + b'cert_data') + check_call.assert_called_with(['update-ca-certificates', '--fresh']) + + host.install_ca_cert('cert_data') + write_file.assert_called_with( + '/usr/local/share/ca-certificates/juju-charm-name.crt', + b'cert_data') + check_call.assert_called_with(['update-ca-certificates', '--fresh']) + + @patch('subprocess.check_output') + def test_arch(self, check_output): + _ = host.arch() + check_output.assert_called_with( + ['dpkg', '--print-architecture'] + ) + + @patch('subprocess.check_output') + def test_get_system_env(self, check_output): + check_output.return_value = '' + self.assertEquals( + host.get_system_env('aKey', 'aDefault'), 'aDefault') + self.assertEquals(host.get_system_env('aKey'), None) + check_output.return_value = 'aKey=aValue\n' + self.assertEquals( + host.get_system_env('aKey', 'aDefault'), 'aValue') + check_output.return_value = 'otherKey=shell=wicked\n' + self.assertEquals( + host.get_system_env('otherKey', 'aDefault'), 'shell=wicked') + + +class TestHostCompator(TestCase): + + def test_compare_ubuntu_releases(self): + from charmhelpers.osplatform import get_platform + if get_platform() == 'ubuntu': + self.assertTrue(host.CompareHostReleases('yakkety') < 'zesty') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_hugepage.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_hugepage.py new file mode 100644 index 0000000000000000000000000000000000000000..69ab9b285f97d4df24b6d4322672925ac79e46cb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_hugepage.py @@ -0,0 +1,118 @@ +from testtools import TestCase +from mock import patch +from charmhelpers.core import hugepage +import yaml + +TO_PATCH = [ + 'fstab', + 'add_group', + 'add_user_to_group', + 'sysctl', + 'fstab_mount', + 'mkdir', + 'check_output', +] + + +class Group(object): + def __init__(self): + self.gr_gid = '1010' + + +class HugepageTests(TestCase): + + def setUp(self): + super(HugepageTests, self).setUp() + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + + def _patch(self, method): + _m = patch('charmhelpers.core.hugepage.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def test_hugepage_support(self): + self.add_group.return_value = Group() + self.fstab.Fstab().get_entry_by_attr.return_value = 'old fstab entry' + self.fstab.Fstab().Entry.return_value = 'new fstab entry' + hugepage.hugepage_support('nova') + sysctl_expect = (""" +vm.hugetlb_shm_group: '1010' +vm.max_map_count: 65536 +vm.nr_hugepages: 256 +""".lstrip()) + self.sysctl.create.assert_called_with(sysctl_expect, + '/etc/sysctl.d/10-hugepage.conf') + self.mkdir.assert_called_with('/run/hugepages/kvm', owner='root', + group='root', perms=0o755, force=False) + self.fstab.Fstab().remove_entry.assert_called_with('old fstab entry') + self.fstab.Fstab().Entry.assert_called_with( + 'nodev', '/run/hugepages/kvm', 'hugetlbfs', + 'mode=1770,gid=1010,pagesize=2MB', 0, 0) + self.fstab.Fstab().add_entry.assert_called_with('new fstab entry') + self.fstab_mount.assert_called_with('/run/hugepages/kvm') + + def test_hugepage_support_new_mnt(self): + self.add_group.return_value = Group() + self.fstab.Fstab().get_entry_by_attr.return_value = None + self.fstab.Fstab().Entry.return_value = 'new fstab entry' + hugepage.hugepage_support('nova') + self.assertEqual(self.fstab.Fstab().remove_entry.call_args_list, []) + + def test_hugepage_support_no_automount(self): + self.add_group.return_value = Group() + self.fstab.Fstab().get_entry_by_attr.return_value = None + self.fstab.Fstab().Entry.return_value = 'new fstab entry' + hugepage.hugepage_support('nova', mount=False) + self.assertEqual(self.fstab_mount.call_args_list, []) + + def test_hugepage_support_nodefaults(self): + self.add_group.return_value = Group() + self.fstab.Fstab().get_entry_by_attr.return_value = 'old fstab entry' + self.fstab.Fstab().Entry.return_value = 'new fstab entry' + hugepage.hugepage_support( + 'nova', group='neutron', nr_hugepages=512, max_map_count=70000, + mnt_point='/hugepages', pagesize='1G', mount=False) + sysctl_expect = { + 'vm.hugetlb_shm_group': '1010', + 'vm.max_map_count': 70000, + 'vm.nr_hugepages': 512, + } + sysctl_setting_arg = self.sysctl.create.call_args_list[0][0][0] + self.assertEqual(yaml.safe_load(sysctl_setting_arg), sysctl_expect) + self.mkdir.assert_called_with('/hugepages', owner='root', + group='root', perms=0o755, force=False) + self.fstab.Fstab().remove_entry.assert_called_with('old fstab entry') + self.fstab.Fstab().Entry.assert_called_with( + 'nodev', '/hugepages', 'hugetlbfs', + 'mode=1770,gid=1010,pagesize=1G', 0, 0) + self.fstab.Fstab().add_entry.assert_called_with('new fstab entry') + + def test_hugepage_support_set_shmmax(self): + self.add_group.return_value = Group() + self.fstab.Fstab().get_entry_by_attr.return_value = None + self.fstab.Fstab().Entry.return_value = 'new fstab entry' + self.check_output.return_value = 2000 + hugepage.hugepage_support('nova', mount=False, set_shmmax=True) + sysctl_expect = { + 'kernel.shmmax': 536870912, + 'vm.hugetlb_shm_group': '1010', + 'vm.max_map_count': 65536, + 'vm.nr_hugepages': 256 + } + sysctl_setting_arg = self.sysctl.create.call_args_list[0][0][0] + self.assertEqual(yaml.safe_load(sysctl_setting_arg), sysctl_expect) + + def test_hugepage_support_auto_increase_max_map_count(self): + self.add_group.return_value = Group() + hugepage.hugepage_support( + 'nova', group='neutron', nr_hugepages=512, max_map_count=200, + mnt_point='/hugepages', pagesize='1G', mount=False) + sysctl_expect = { + 'vm.hugetlb_shm_group': '1010', + 'vm.max_map_count': 1024, + 'vm.nr_hugepages': 512, + } + sysctl_setting_arg = self.sysctl.create.call_args_list[0][0][0] + self.assertEqual(yaml.safe_load(sysctl_setting_arg), sysctl_expect) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_kernel.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..9da19988dc19fe8e86cd058070d4a38796a205f0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_kernel.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import unittest +import imp + +from charmhelpers import osplatform +from mock import patch +from tests.helpers import patch_open +from charmhelpers.core import kernel + + +class TestKernel(unittest.TestCase): + + @patch('subprocess.check_call') + @patch.object(osplatform, 'get_platform') + def test_modprobe_persistent_ubuntu(self, platform, check_call): + platform.return_value = 'ubuntu' + imp.reload(kernel) + + with patch_open() as (_open, _file): + _file.read.return_value = 'anothermod\n' + with patch("charmhelpers.core.kernel.log"): + kernel.modprobe('mymod') + _open.assert_called_with('/etc/modules', 'r+') + _file.read.assert_called_with() + _file.write.assert_called_with('mymod\n') + check_call.assert_called_with(['modprobe', 'mymod']) + + @patch('os.chmod') + @patch('subprocess.check_call') + @patch.object(osplatform, 'get_platform') + def test_modprobe_persistent_centos(self, platform, check_call, os): + platform.return_value = 'centos' + imp.reload(kernel) + + with patch_open() as (_open, _file): + _file.read.return_value = 'anothermod\n' + with patch("charmhelpers.core.kernel.log"): + kernel.modprobe('mymod') + _open.assert_called_with('/etc/rc.modules', 'r+') + os.assert_called_with('/etc/rc.modules', 111) + _file.read.assert_called_with() + _file.write.assert_called_with('modprobe mymod\n') + check_call.assert_called_with(['modprobe', 'mymod']) + + @patch('subprocess.check_call') + @patch.object(osplatform, 'get_platform') + def test_modprobe_not_persistent_ubuntu(self, platform, check_call): + platform.return_value = 'ubuntu' + imp.reload(kernel) + + with patch_open() as (_open, _file): + _file.read.return_value = 'anothermod\n' + with patch("charmhelpers.core.kernel.log"): + kernel.modprobe('mymod', persist=False) + assert not _open.called + check_call.assert_called_with(['modprobe', 'mymod']) + + @patch('subprocess.check_call') + @patch.object(osplatform, 'get_platform') + def test_modprobe_not_persistent_centos(self, platform, check_call): + platform.return_value = 'centos' + imp.reload(kernel) + + with patch_open() as (_open, _file): + _file.read.return_value = 'anothermod\n' + with patch("charmhelpers.core.kernel.log"): + kernel.modprobe('mymod', persist=False) + assert not _open.called + check_call.assert_called_with(['modprobe', 'mymod']) + + @patch.object(kernel, 'log') + @patch('subprocess.check_call') + def test_rmmod_not_forced(self, check_call, log): + kernel.rmmod('mymod') + check_call.assert_called_with(['rmmod', 'mymod']) + + @patch.object(kernel, 'log') + @patch('subprocess.check_call') + def test_rmmod_forced(self, check_call, log): + kernel.rmmod('mymod', force=True) + check_call.assert_called_with(['rmmod', '-f', 'mymod']) + + @patch.object(kernel, 'log') + @patch('subprocess.check_output') + def test_lsmod(self, check_output, log): + kernel.lsmod() + check_output.assert_called_with(['lsmod'], + universal_newlines=True) + + @patch('charmhelpers.core.kernel.lsmod') + def test_is_module_loaded(self, lsmod): + lsmod.return_value = "ip6_tables 28672 1 ip6table_filter" + self.assertTrue(kernel.is_module_loaded("ip6_tables")) + + @patch.object(osplatform, 'get_platform') + @patch('subprocess.check_call') + def test_update_initramfs_ubuntu(self, check_call, platform): + platform.return_value = 'ubuntu' + imp.reload(kernel) + + kernel.update_initramfs() + check_call.assert_called_with(["update-initramfs", "-k", "all", "-u"]) + + @patch.object(osplatform, 'get_platform') + @patch('subprocess.check_call') + def test_update_initramfs_centos(self, check_call, platform): + platform.return_value = 'centos' + imp.reload(kernel) + + kernel.update_initramfs() + check_call.assert_called_with(['dracut', '-f', 'all']) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_services.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_services.py new file mode 100644 index 0000000000000000000000000000000000000000..8f6935301182e9dbe1cddd17e270dd9cbc51bb8e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_services.py @@ -0,0 +1,868 @@ +import os +import mock +import unittest +import uuid +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import services +from functools import partial + + +class TestServiceManager(unittest.TestCase): + def setUp(self): + self.pcharm_dir = mock.patch.object(hookenv, 'charm_dir') + self.mcharm_dir = self.pcharm_dir.start() + self.mcharm_dir.return_value = 'charm_dir' + + def tearDown(self): + self.pcharm_dir.stop() + + def test_register(self): + manager = services.ServiceManager([ + {'service': 'service1', + 'foo': 'bar'}, + {'service': 'service2', + 'qux': 'baz'}, + ]) + self.assertEqual(manager.services, { + 'service1': {'service': 'service1', + 'foo': 'bar'}, + 'service2': {'service': 'service2', + 'qux': 'baz'}, + }) + + def test_register_preserves_order(self): + service_list = [dict(service='a'), dict(service='b')] + + # Test that the services list order is preserved by checking + # both forwards and backwards - only one of these will be + # dictionary order, and if both work we know order is being + # preserved. + manager = services.ServiceManager(service_list) + self.assertEqual(list(manager.services.keys()), ['a', 'b']) + manager = services.ServiceManager(reversed(service_list)) + self.assertEqual(list(manager.services.keys()), ['b', 'a']) + + @mock.patch.object(services.ServiceManager, 'reconfigure_services') + @mock.patch.object(services.ServiceManager, 'stop_services') + @mock.patch.object(hookenv, 'hook_name') + @mock.patch.object(hookenv, 'config') + def test_manage_stop(self, config, hook_name, stop_services, reconfigure_services): + manager = services.ServiceManager() + hook_name.return_value = 'stop' + manager.manage() + stop_services.assert_called_once_with() + assert not reconfigure_services.called + + @mock.patch.object(services.ServiceManager, 'provide_data') + @mock.patch.object(services.ServiceManager, 'reconfigure_services') + @mock.patch.object(services.ServiceManager, 'stop_services') + @mock.patch.object(hookenv, 'hook_name') + @mock.patch.object(hookenv, 'config') + def test_manage_other(self, config, hook_name, stop_services, reconfigure_services, provide_data): + manager = services.ServiceManager() + hook_name.return_value = 'config-changed' + manager.manage() + assert not stop_services.called + reconfigure_services.assert_called_once_with() + provide_data.assert_called_once_with() + + def test_manage_calls_atstart(self): + cb = mock.MagicMock() + hookenv.atstart(cb) + manager = services.ServiceManager() + manager.manage() + self.assertTrue(cb.called) + + def test_manage_calls_atexit(self): + cb = mock.MagicMock() + hookenv.atexit(cb) + manager = services.ServiceManager() + manager.manage() + self.assertTrue(cb.called) + + @mock.patch.object(hookenv, 'config') + def test_manage_config_not_saved(self, config): + config = config.return_value + config.implicit_save = False + manager = services.ServiceManager() + manager.manage() + self.assertFalse(config.save.called) + + @mock.patch.object(services.ServiceManager, 'save_ready') + @mock.patch.object(services.ServiceManager, 'fire_event') + @mock.patch.object(services.ServiceManager, 'is_ready') + def test_reconfigure_ready(self, is_ready, fire_event, save_ready): + manager = services.ServiceManager([ + {'service': 'service1'}, {'service': 'service2'}]) + is_ready.return_value = True + manager.reconfigure_services() + is_ready.assert_has_calls([ + mock.call('service1'), + mock.call('service2'), + ], any_order=True) + fire_event.assert_has_calls([ + mock.call('data_ready', 'service1'), + mock.call('start', 'service1', default=[ + services.service_restart, + services.manage_ports]), + ], any_order=False) + fire_event.assert_has_calls([ + mock.call('data_ready', 'service2'), + mock.call('start', 'service2', default=[ + services.service_restart, + services.manage_ports]), + ], any_order=False) + save_ready.assert_has_calls([ + mock.call('service1'), + mock.call('service2'), + ], any_order=True) + + @mock.patch.object(services.ServiceManager, 'save_ready') + @mock.patch.object(services.ServiceManager, 'fire_event') + @mock.patch.object(services.ServiceManager, 'is_ready') + def test_reconfigure_ready_list(self, is_ready, fire_event, save_ready): + manager = services.ServiceManager([ + {'service': 'service1'}, {'service': 'service2'}]) + is_ready.return_value = True + manager.reconfigure_services('service3', 'service4') + self.assertEqual(is_ready.call_args_list, [ + mock.call('service3'), + mock.call('service4'), + ]) + self.assertEqual(fire_event.call_args_list, [ + mock.call('data_ready', 'service3'), + mock.call('start', 'service3', default=[ + services.service_restart, + services.open_ports]), + mock.call('data_ready', 'service4'), + mock.call('start', 'service4', default=[ + services.service_restart, + services.open_ports]), + ]) + self.assertEqual(save_ready.call_args_list, [ + mock.call('service3'), + mock.call('service4'), + ]) + + @mock.patch.object(services.ServiceManager, 'save_lost') + @mock.patch.object(services.ServiceManager, 'fire_event') + @mock.patch.object(services.ServiceManager, 'was_ready') + @mock.patch.object(services.ServiceManager, 'is_ready') + def test_reconfigure_not_ready(self, is_ready, was_ready, fire_event, save_lost): + manager = services.ServiceManager([ + {'service': 'service1'}, {'service': 'service2'}]) + is_ready.return_value = False + was_ready.return_value = False + manager.reconfigure_services() + is_ready.assert_has_calls([ + mock.call('service1'), + mock.call('service2'), + ], any_order=True) + fire_event.assert_has_calls([ + mock.call('stop', 'service1', default=[ + services.close_ports, + services.service_stop]), + mock.call('stop', 'service2', default=[ + services.close_ports, + services.service_stop]), + ], any_order=True) + save_lost.assert_has_calls([ + mock.call('service1'), + mock.call('service2'), + ], any_order=True) + + @mock.patch.object(services.ServiceManager, 'save_lost') + @mock.patch.object(services.ServiceManager, 'fire_event') + @mock.patch.object(services.ServiceManager, 'was_ready') + @mock.patch.object(services.ServiceManager, 'is_ready') + def test_reconfigure_no_longer_ready(self, is_ready, was_ready, fire_event, save_lost): + manager = services.ServiceManager([ + {'service': 'service1'}, {'service': 'service2'}]) + is_ready.return_value = False + was_ready.return_value = True + manager.reconfigure_services() + is_ready.assert_has_calls([ + mock.call('service1'), + mock.call('service2'), + ], any_order=True) + fire_event.assert_has_calls([ + mock.call('data_lost', 'service1'), + mock.call('stop', 'service1', default=[ + services.close_ports, + services.service_stop]), + ], any_order=False) + fire_event.assert_has_calls([ + mock.call('data_lost', 'service2'), + mock.call('stop', 'service2', default=[ + services.close_ports, + services.service_stop]), + ], any_order=False) + save_lost.assert_has_calls([ + mock.call('service1'), + mock.call('service2'), + ], any_order=True) + + @mock.patch.object(services.ServiceManager, 'fire_event') + def test_stop_services(self, fire_event): + manager = services.ServiceManager([ + {'service': 'service1'}, {'service': 'service2'}]) + manager.stop_services() + fire_event.assert_has_calls([ + mock.call('stop', 'service1', default=[ + services.close_ports, + services.service_stop]), + mock.call('stop', 'service2', default=[ + services.close_ports, + services.service_stop]), + ], any_order=True) + + @mock.patch.object(services.ServiceManager, 'fire_event') + def test_stop_services_list(self, fire_event): + manager = services.ServiceManager([ + {'service': 'service1'}, {'service': 'service2'}]) + manager.stop_services('service3', 'service4') + self.assertEqual(fire_event.call_args_list, [ + mock.call('stop', 'service3', default=[ + services.close_ports, + services.service_stop]), + mock.call('stop', 'service4', default=[ + services.close_ports, + services.service_stop]), + ]) + + def test_get_service(self): + service = {'service': 'test', 'test': 'test_service'} + manager = services.ServiceManager([service]) + self.assertEqual(manager.get_service('test'), service) + + def test_get_service_not_registered(self): + service = {'service': 'test', 'test': 'test_service'} + manager = services.ServiceManager([service]) + self.assertRaises(KeyError, manager.get_service, 'foo') + + @mock.patch.object(services.ServiceManager, 'get_service') + def test_fire_event_default(self, get_service): + get_service.return_value = {} + cb = mock.Mock() + manager = services.ServiceManager() + manager.fire_event('event', 'service', cb) + cb.assert_called_once_with('service') + + @mock.patch.object(services.ServiceManager, 'get_service') + def test_fire_event_default_list(self, get_service): + get_service.return_value = {} + cb = mock.Mock() + manager = services.ServiceManager() + manager.fire_event('event', 'service', [cb]) + cb.assert_called_once_with('service') + + @mock.patch.object(services.ServiceManager, 'get_service') + def test_fire_event_simple_callback(self, get_service): + cb = mock.Mock() + dcb = mock.Mock() + get_service.return_value = {'event': cb} + manager = services.ServiceManager() + manager.fire_event('event', 'service', dcb) + assert not dcb.called + cb.assert_called_once_with('service') + + @mock.patch.object(services.ServiceManager, 'get_service') + def test_fire_event_simple_callback_list(self, get_service): + cb = mock.Mock() + dcb = mock.Mock() + get_service.return_value = {'event': [cb]} + manager = services.ServiceManager() + manager.fire_event('event', 'service', dcb) + assert not dcb.called + cb.assert_called_once_with('service') + + @mock.patch.object(services.ManagerCallback, '__call__') + @mock.patch.object(services.ServiceManager, 'get_service') + def test_fire_event_manager_callback(self, get_service, mcall): + cb = services.ManagerCallback() + dcb = mock.Mock() + get_service.return_value = {'event': cb} + manager = services.ServiceManager() + manager.fire_event('event', 'service', dcb) + assert not dcb.called + mcall.assert_called_once_with(manager, 'service', 'event') + + @mock.patch.object(services.ManagerCallback, '__call__') + @mock.patch.object(services.ServiceManager, 'get_service') + def test_fire_event_manager_callback_list(self, get_service, mcall): + cb = services.ManagerCallback() + dcb = mock.Mock() + get_service.return_value = {'event': [cb]} + manager = services.ServiceManager() + manager.fire_event('event', 'service', dcb) + assert not dcb.called + mcall.assert_called_once_with(manager, 'service', 'event') + + @mock.patch.object(services.ServiceManager, 'get_service') + def test_is_ready(self, get_service): + get_service.side_effect = [ + {}, + {'required_data': [True]}, + {'required_data': [False]}, + {'required_data': [True, False]}, + ] + manager = services.ServiceManager() + assert manager.is_ready('foo') + assert manager.is_ready('bar') + assert not manager.is_ready('foo') + assert not manager.is_ready('foo') + get_service.assert_has_calls([mock.call('foo'), mock.call('bar')]) + + def test_load_ready_file_short_circuit(self): + manager = services.ServiceManager() + manager._ready = 'foo' + manager._load_ready_file() + self.assertEqual(manager._ready, 'foo') + + @mock.patch('os.path.exists') + @mock.patch.object(services.base, 'open', create=True) + def test_load_ready_file_new(self, mopen, exists): + manager = services.ServiceManager() + exists.return_value = False + manager._load_ready_file() + self.assertEqual(manager._ready, set()) + assert not mopen.called + + @mock.patch('json.load') + @mock.patch('os.path.exists') + @mock.patch.object(services.base, 'open', create=True) + def test_load_ready_file(self, mopen, exists, jload): + manager = services.ServiceManager() + exists.return_value = True + jload.return_value = ['bar'] + manager._load_ready_file() + self.assertEqual(manager._ready, set(['bar'])) + exists.assert_called_once_with('charm_dir/READY-SERVICES.json') + mopen.assert_called_once_with('charm_dir/READY-SERVICES.json') + + @mock.patch('json.dump') + @mock.patch.object(services.base, 'open', create=True) + def test_save_ready_file(self, mopen, jdump): + manager = services.ServiceManager() + manager._save_ready_file() + assert not mopen.called + manager._ready = set(['foo']) + manager._save_ready_file() + mopen.assert_called_once_with('charm_dir/READY-SERVICES.json', 'w') + jdump.assert_called_once_with(['foo'], mopen.return_value.__enter__()) + + @mock.patch.object(services.base.ServiceManager, '_save_ready_file') + @mock.patch.object(services.base.ServiceManager, '_load_ready_file') + def test_save_ready(self, _lrf, _srf): + manager = services.ServiceManager() + manager._ready = set(['foo']) + manager.save_ready('bar') + _lrf.assert_called_once_with() + self.assertEqual(manager._ready, set(['foo', 'bar'])) + _srf.assert_called_once_with() + + @mock.patch.object(services.base.ServiceManager, '_save_ready_file') + @mock.patch.object(services.base.ServiceManager, '_load_ready_file') + def test_save_lost(self, _lrf, _srf): + manager = services.ServiceManager() + manager._ready = set(['foo', 'bar']) + manager.save_lost('bar') + _lrf.assert_called_once_with() + self.assertEqual(manager._ready, set(['foo'])) + _srf.assert_called_once_with() + manager.save_lost('bar') + self.assertEqual(manager._ready, set(['foo'])) + + @mock.patch.object(services.base.ServiceManager, '_save_ready_file') + @mock.patch.object(services.base.ServiceManager, '_load_ready_file') + def test_was_ready(self, _lrf, _srf): + manager = services.ServiceManager() + manager._ready = set() + manager.save_ready('foo') + manager.save_ready('bar') + assert manager.was_ready('foo') + assert manager.was_ready('bar') + manager.save_lost('bar') + assert manager.was_ready('foo') + assert not manager.was_ready('bar') + + @mock.patch.object(services.base.hookenv, 'relation_set') + @mock.patch.object(services.base.hookenv, 'related_units') + @mock.patch.object(services.base.hookenv, 'relation_ids') + def test_provide_data_no_match(self, relation_ids, related_units, relation_set): + provider = mock.Mock() + provider.name = 'provided' + manager = services.ServiceManager([ + {'service': 'service', 'provided_data': [provider]} + ]) + relation_ids.return_value = [] + manager.provide_data() + assert not provider.provide_data.called + relation_ids.assert_called_once_with('provided') + + @mock.patch.object(services.base.hookenv, 'relation_set') + @mock.patch.object(services.base.hookenv, 'related_units') + @mock.patch.object(services.base.hookenv, 'relation_ids') + def test_provide_data_not_ready(self, relation_ids, related_units, relation_set): + provider = mock.Mock() + provider.name = 'provided' + pd = mock.Mock() + data = pd.return_value = {'data': True} + provider.provide_data = lambda remote_service, service_ready: pd(remote_service, service_ready) + manager = services.ServiceManager([ + {'service': 'service', 'provided_data': [provider]} + ]) + manager.is_ready = mock.Mock(return_value=False) + relation_ids.return_value = ['relid'] + related_units.return_value = ['service/0'] + manager.provide_data() + relation_set.assert_called_once_with('relid', data) + pd.assert_called_once_with('service', False) + + @mock.patch.object(services.base.hookenv, 'relation_set') + @mock.patch.object(services.base.hookenv, 'related_units') + @mock.patch.object(services.base.hookenv, 'relation_ids') + def test_provide_data_ready(self, relation_ids, related_units, relation_set): + provider = mock.Mock() + provider.name = 'provided' + pd = mock.Mock() + data = pd.return_value = {'data': True} + provider.provide_data = lambda remote_service, service_ready: pd(remote_service, service_ready) + manager = services.ServiceManager([ + {'service': 'service', 'provided_data': [provider]} + ]) + manager.is_ready = mock.Mock(return_value=True) + relation_ids.return_value = ['relid'] + related_units.return_value = ['service/0'] + manager.provide_data() + relation_set.assert_called_once_with('relid', data) + pd.assert_called_once_with('service', True) + + +class TestRelationContext(unittest.TestCase): + def setUp(self): + self.phookenv = mock.patch.object(services.helpers, 'hookenv') + self.mhookenv = self.phookenv.start() + self.mhookenv.relation_ids.return_value = [] + self.context = services.RelationContext() + self.context.name = 'http' + self.context.interface = 'http' + self.context.required_keys = ['foo', 'bar'] + self.mhookenv.reset_mock() + + def tearDown(self): + self.phookenv.stop() + + def test_no_relations(self): + self.context.get_data() + self.assertFalse(self.context.is_ready()) + self.assertEqual(self.context, {}) + self.mhookenv.relation_ids.assert_called_once_with('http') + + def test_no_units(self): + self.mhookenv.relation_ids.return_value = ['nginx'] + self.mhookenv.related_units.return_value = [] + self.context.get_data() + self.assertFalse(self.context.is_ready()) + self.assertEqual(self.context, {'http': []}) + + def test_incomplete(self): + self.mhookenv.relation_ids.return_value = ['nginx', 'apache'] + self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] + self.mhookenv.relation_get.side_effect = [{}, {'foo': '1'}] + self.context.get_data() + self.assertFalse(bool(self.context)) + self.assertEqual(self.mhookenv.relation_get.call_args_list, [ + mock.call(rid='apache', unit='apache/0'), + mock.call(rid='nginx', unit='nginx/0'), + ]) + + def test_complete(self): + self.mhookenv.relation_ids.return_value = ['nginx', 'apache', 'tomcat'] + self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] + self.mhookenv.relation_get.side_effect = [{'foo': '1'}, {'foo': '2', 'bar': '3'}, {}] + self.context.get_data() + self.assertTrue(self.context.is_ready()) + self.assertEqual(self.context, {'http': [ + { + 'foo': '2', + 'bar': '3', + }, + ]}) + self.mhookenv.relation_ids.assert_called_with('http') + self.assertEqual(self.mhookenv.relation_get.call_args_list, [ + mock.call(rid='apache', unit='apache/0'), + mock.call(rid='nginx', unit='nginx/0'), + mock.call(rid='tomcat', unit='tomcat/0'), + ]) + + def test_provide(self): + self.assertEqual(self.context.provide_data(), {}) + + +class TestHttpRelation(unittest.TestCase): + def setUp(self): + self.phookenv = mock.patch.object(services.helpers, 'hookenv') + self.mhookenv = self.phookenv.start() + + self.context = services.helpers.HttpRelation() + + def tearDown(self): + self.phookenv.stop() + + def test_provide_data(self): + self.mhookenv.unit_get.return_value = "127.0.0.1" + self.assertEqual(self.context.provide_data(), { + 'host': "127.0.0.1", + 'port': 80, + }) + + def test_complete(self): + self.mhookenv.relation_ids.return_value = ['website'] + self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] + self.mhookenv.relation_get.side_effect = [{'host': '127.0.0.2', + 'port': 8080}] + self.context.get_data() + self.assertTrue(self.context.is_ready()) + self.assertEqual(self.context, {'website': [ + { + 'host': '127.0.0.2', + 'port': 8080, + }, + ]}) + + self.mhookenv.relation_ids.assert_called_with('website') + self.assertEqual(self.mhookenv.relation_get.call_args_list, [ + mock.call(rid='website', unit='website/0'), + ]) + + +class TestMysqlRelation(unittest.TestCase): + + def setUp(self): + self.phookenv = mock.patch.object(services.helpers, 'hookenv') + self.mhookenv = self.phookenv.start() + + self.context = services.helpers.MysqlRelation() + + def tearDown(self): + self.phookenv.stop() + + def test_complete(self): + self.mhookenv.relation_ids.return_value = ['db'] + self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] + self.mhookenv.relation_get.side_effect = [{'host': '127.0.0.2', + 'user': 'mysql', + 'password': 'mysql', + 'database': 'mysql', + }] + self.context.get_data() + self.assertTrue(self.context.is_ready()) + self.assertEqual(self.context, {'db': [ + { + 'host': '127.0.0.2', + 'user': 'mysql', + 'password': 'mysql', + 'database': 'mysql', + }, + ]}) + + self.mhookenv.relation_ids.assert_called_with('db') + self.assertEqual(self.mhookenv.relation_get.call_args_list, [ + mock.call(rid='db', unit='db/0'), + ]) + + +class TestRequiredConfig(unittest.TestCase): + def setUp(self): + self.options = { + 'options': { + 'option1': { + 'type': 'string', + 'description': 'First option', + }, + 'option2': { + 'type': 'int', + 'default': 0, + 'description': 'Second option', + }, + }, + } + self.config = { + 'option1': None, + 'option2': 0, + } + self._pyaml = mock.patch.object(services.helpers, 'yaml') + self.myaml = self._pyaml.start() + self.myaml.load.side_effect = lambda fp: self.options + self._pconfig = mock.patch.object(hookenv, 'config') + self.mconfig = self._pconfig.start() + self.mconfig.side_effect = lambda: self.config + self._pcharm_dir = mock.patch.object(hookenv, 'charm_dir') + self.mcharm_dir = self._pcharm_dir.start() + self.mcharm_dir.return_value = 'charm_dir' + + def tearDown(self): + self._pyaml.stop() + self._pconfig.stop() + self._pcharm_dir.stop() + + def test_none_changed(self): + with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): + context = services.helpers.RequiredConfig('option1', 'option2') + self.assertFalse(bool(context)) + self.assertEqual(context['config']['option1'], None) + self.assertEqual(context['config']['option2'], 0) + + def test_partial(self): + self.config['option1'] = 'value' + with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): + context = services.helpers.RequiredConfig('option1', 'option2') + self.assertFalse(bool(context)) + self.assertEqual(context['config']['option1'], 'value') + self.assertEqual(context['config']['option2'], 0) + + def test_ready(self): + self.config['option1'] = 'value' + self.config['option2'] = 1 + with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): + context = services.helpers.RequiredConfig('option1', 'option2') + self.assertTrue(bool(context)) + self.assertEqual(context['config']['option1'], 'value') + self.assertEqual(context['config']['option2'], 1) + + def test_none_empty(self): + self.config['option1'] = '' + self.config['option2'] = 1 + with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): + context = services.helpers.RequiredConfig('option1', 'option2') + self.assertFalse(bool(context)) + self.assertEqual(context['config']['option1'], '') + self.assertEqual(context['config']['option2'], 1) + + +class TestStoredContext(unittest.TestCase): + @mock.patch.object(services.helpers.StoredContext, 'read_context') + @mock.patch.object(services.helpers.StoredContext, 'store_context') + @mock.patch('os.path.exists') + def test_new(self, exists, store_context, read_context): + exists.return_value = False + context = services.helpers.StoredContext('foo.yaml', {'key': 'val'}) + assert not read_context.called + store_context.assert_called_once_with('foo.yaml', {'key': 'val'}) + self.assertEqual(context, {'key': 'val'}) + + @mock.patch.object(services.helpers.StoredContext, 'read_context') + @mock.patch.object(services.helpers.StoredContext, 'store_context') + @mock.patch('os.path.exists') + def test_existing(self, exists, store_context, read_context): + exists.return_value = True + read_context.return_value = {'key': 'other'} + context = services.helpers.StoredContext('foo.yaml', {'key': 'val'}) + read_context.assert_called_once_with('foo.yaml') + assert not store_context.called + self.assertEqual(context, {'key': 'other'}) + + @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') + @mock.patch.object(services.helpers.StoredContext, 'read_context') + @mock.patch.object(services.helpers, 'yaml') + @mock.patch('os.fchmod') + @mock.patch('os.path.exists') + def test_store_context(self, exists, fchmod, yaml, read_context): + exists.return_value = False + mopen = mock.mock_open() + with mock.patch.object(services.helpers, 'open', mopen, create=True): + services.helpers.StoredContext('foo.yaml', {'key': 'val'}) + mopen.assert_called_once_with('charm_dir/foo.yaml', 'w') + fchmod.assert_called_once_with(mopen.return_value.fileno(), 0o600) + yaml.dump.assert_called_once_with({'key': 'val'}, mopen.return_value) + + @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') + @mock.patch.object(services.helpers.StoredContext, 'read_context') + @mock.patch.object(services.helpers, 'yaml') + @mock.patch('os.fchmod') + @mock.patch('os.path.exists') + def test_store_context_abs(self, exists, fchmod, yaml, read_context): + exists.return_value = False + mopen = mock.mock_open() + with mock.patch.object(services.helpers, 'open', mopen, create=True): + services.helpers.StoredContext('/foo.yaml', {'key': 'val'}) + mopen.assert_called_once_with('/foo.yaml', 'w') + + @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') + @mock.patch.object(services.helpers, 'yaml') + @mock.patch('os.path.exists') + def test_read_context(self, exists, yaml): + exists.return_value = True + yaml.load.return_value = {'key': 'other'} + mopen = mock.mock_open() + with mock.patch.object(services.helpers, 'open', mopen, create=True): + context = services.helpers.StoredContext('foo.yaml', {'key': 'val'}) + mopen.assert_called_once_with('charm_dir/foo.yaml', 'r') + yaml.load.assert_called_once_with(mopen.return_value) + self.assertEqual(context, {'key': 'other'}) + + @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') + @mock.patch.object(services.helpers, 'yaml') + @mock.patch('os.path.exists') + def test_read_context_abs(self, exists, yaml): + exists.return_value = True + yaml.load.return_value = {'key': 'other'} + mopen = mock.mock_open() + with mock.patch.object(services.helpers, 'open', mopen, create=True): + context = services.helpers.StoredContext('/foo.yaml', {'key': 'val'}) + mopen.assert_called_once_with('/foo.yaml', 'r') + yaml.load.assert_called_once_with(mopen.return_value) + self.assertEqual(context, {'key': 'other'}) + + @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') + @mock.patch.object(services.helpers, 'yaml') + @mock.patch('os.path.exists') + def test_read_context_empty(self, exists, yaml): + exists.return_value = True + yaml.load.return_value = None + mopen = mock.mock_open() + with mock.patch.object(services.helpers, 'open', mopen, create=True): + self.assertRaises(OSError, services.helpers.StoredContext, '/foo.yaml', {}) + + +class TestTemplateCallback(unittest.TestCase): + @mock.patch.object(services.helpers, 'templating') + def test_template_defaults(self, mtemplating): + manager = mock.Mock(**{'get_service.return_value': { + 'required_data': [{'foo': 'bar'}]}}) + self.assertRaises(TypeError, services.template, source='foo.yml') + callback = services.template(source='foo.yml', target='bar.yml') + assert isinstance(callback, services.ManagerCallback) + assert not mtemplating.render.called + callback(manager, 'test', 'event') + mtemplating.render.assert_called_once_with( + 'foo.yml', 'bar.yml', {'foo': 'bar', 'ctx': {'foo': 'bar'}}, + 'root', 'root', 0o444, template_loader=None) + + @mock.patch.object(services.helpers, 'templating') + def test_template_explicit(self, mtemplating): + manager = mock.Mock(**{'get_service.return_value': { + 'required_data': [{'foo': 'bar'}]}}) + callback = services.template( + source='foo.yml', target='bar.yml', + owner='user', group='group', perms=0o555 + ) + assert isinstance(callback, services.ManagerCallback) + assert not mtemplating.render.called + callback(manager, 'test', 'event') + mtemplating.render.assert_called_once_with( + 'foo.yml', 'bar.yml', {'foo': 'bar', 'ctx': {'foo': 'bar'}}, + 'user', 'group', 0o555, template_loader=None) + + @mock.patch.object(services.helpers, 'templating') + def test_template_loader(self, mtemplating): + manager = mock.Mock(**{'get_service.return_value': { + 'required_data': [{'foo': 'bar'}]}}) + callback = services.template( + source='foo.yml', target='bar.yml', + owner='user', group='group', perms=0o555, + template_loader='myloader' + ) + assert isinstance(callback, services.ManagerCallback) + assert not mtemplating.render.called + callback(manager, 'test', 'event') + mtemplating.render.assert_called_once_with( + 'foo.yml', 'bar.yml', {'foo': 'bar', 'ctx': {'foo': 'bar'}}, + 'user', 'group', 0o555, template_loader='myloader') + + @mock.patch.object(os.path, 'isfile') + @mock.patch.object(host, 'file_hash') + @mock.patch.object(host, 'service_restart') + @mock.patch.object(services.helpers, 'templating') + def test_template_onchange_restart(self, mtemplating, mrestart, mfile_hash, misfile): + def random_string(arg): + return uuid.uuid4() + mfile_hash.side_effect = random_string + misfile.return_value = True + manager = mock.Mock(**{'get_service.return_value': { + 'required_data': [{'foo': 'bar'}]}}) + callback = services.template( + source='foo.yml', target='bar.yml', + owner='user', group='group', perms=0o555, + on_change_action=(partial(mrestart, "mysuperservice")), + ) + assert isinstance(callback, services.ManagerCallback) + assert not mtemplating.render.called + callback(manager, 'test', 'event') + mtemplating.render.assert_called_once_with( + 'foo.yml', 'bar.yml', {'foo': 'bar', 'ctx': {'foo': 'bar'}}, + 'user', 'group', 0o555, template_loader=None) + mrestart.assert_called_with('mysuperservice') + + @mock.patch.object(hookenv, 'log') + @mock.patch.object(os.path, 'isfile') + @mock.patch.object(host, 'file_hash') + @mock.patch.object(host, 'service_restart') + @mock.patch.object(services.helpers, 'templating') + def test_template_onchange_restart_nochange(self, mtemplating, mrestart, + mfile_hash, misfile, mlog): + mfile_hash.return_value = "myhash" + misfile.return_value = True + manager = mock.Mock(**{'get_service.return_value': { + 'required_data': [{'foo': 'bar'}]}}) + callback = services.template( + source='foo.yml', target='bar.yml', + owner='user', group='group', perms=0o555, + on_change_action=(partial(mrestart, "mysuperservice")), + ) + assert isinstance(callback, services.ManagerCallback) + assert not mtemplating.render.called + callback(manager, 'test', 'event') + mtemplating.render.assert_called_once_with( + 'foo.yml', 'bar.yml', {'foo': 'bar', 'ctx': {'foo': 'bar'}}, + 'user', 'group', 0o555, template_loader=None) + self.assertEqual(mrestart.call_args_list, []) + + +class TestPortsCallback(unittest.TestCase): + def setUp(self): + self.phookenv = mock.patch.object(services.base, 'hookenv') + self.mhookenv = self.phookenv.start() + self.mhookenv.relation_ids.return_value = [] + self.mhookenv.charm_dir.return_value = 'charm_dir' + self.popen = mock.patch.object(services.base, 'open', create=True) + self.mopen = self.popen.start() + + def tearDown(self): + self.phookenv.stop() + self.popen.stop() + + def test_no_ports(self): + manager = mock.Mock(**{'get_service.return_value': {}}) + services.PortManagerCallback()(manager, 'service', 'event') + assert not self.mhookenv.open_port.called + assert not self.mhookenv.close_port.called + + def test_open_ports(self): + manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) + services.open_ports(manager, 'service', 'start') + self.mhookenv.open_port.has_calls([mock.call(1), mock.call(2)]) + assert not self.mhookenv.close_port.called + + def test_close_ports(self): + manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) + services.close_ports(manager, 'service', 'stop') + assert not self.mhookenv.open_port.called + self.mhookenv.close_port.has_calls([mock.call(1), mock.call(2)]) + + def test_close_old_ports(self): + self.mopen.return_value.read.return_value = '10,20' + manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) + services.close_ports(manager, 'service', 'stop') + assert not self.mhookenv.open_port.called + self.mhookenv.close_port.has_calls([ + mock.call(10), + mock.call(20), + mock.call(1), + mock.call(2)]) + + +if __name__ == '__main__': + unittest.main() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_strutils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_strutils.py new file mode 100644 index 0000000000000000000000000000000000000000..9110e563470fd35979edd10d7303f70d9495ac88 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_strutils.py @@ -0,0 +1,108 @@ +import unittest + +import charmhelpers.core.strutils as strutils + + +class TestStrUtils(unittest.TestCase): + def setUp(self): + super(TestStrUtils, self).setUp() + + def tearDown(self): + super(TestStrUtils, self).tearDown() + + def test_bool_from_string(self): + self.assertTrue(strutils.bool_from_string('true')) + self.assertTrue(strutils.bool_from_string('True')) + self.assertTrue(strutils.bool_from_string('yes')) + self.assertTrue(strutils.bool_from_string('Yes')) + self.assertTrue(strutils.bool_from_string('y')) + self.assertTrue(strutils.bool_from_string('Y')) + self.assertTrue(strutils.bool_from_string('on')) + + # unicode should also work + self.assertTrue(strutils.bool_from_string(u'true')) + + self.assertFalse(strutils.bool_from_string('False')) + self.assertFalse(strutils.bool_from_string('false')) + self.assertFalse(strutils.bool_from_string('no')) + self.assertFalse(strutils.bool_from_string('No')) + self.assertFalse(strutils.bool_from_string('n')) + self.assertFalse(strutils.bool_from_string('N')) + self.assertFalse(strutils.bool_from_string('off')) + + self.assertRaises(ValueError, strutils.bool_from_string, None) + self.assertRaises(ValueError, strutils.bool_from_string, 'foo') + + def test_bytes_from_string(self): + self.assertEqual(strutils.bytes_from_string('10'), 10) + self.assertEqual(strutils.bytes_from_string('3K'), 3072) + self.assertEqual(strutils.bytes_from_string('3KB'), 3072) + self.assertEqual(strutils.bytes_from_string('3M'), 3145728) + self.assertEqual(strutils.bytes_from_string('3MB'), 3145728) + self.assertEqual(strutils.bytes_from_string('3G'), 3221225472) + self.assertEqual(strutils.bytes_from_string('3GB'), 3221225472) + self.assertEqual(strutils.bytes_from_string('3T'), 3298534883328) + self.assertEqual(strutils.bytes_from_string('3TB'), 3298534883328) + self.assertEqual(strutils.bytes_from_string('3P'), 3377699720527872) + self.assertEqual(strutils.bytes_from_string('3PB'), 3377699720527872) + + self.assertRaises(ValueError, strutils.bytes_from_string, None) + self.assertRaises(ValueError, strutils.bytes_from_string, 'foo') + + def test_basic_string_comparator_class_fails_instantiation(self): + try: + strutils.BasicStringComparator('hello') + raise Exception("instantiating BasicStringComparator should fail") + except Exception as e: + assert (str(e) == "Must define the _list in the class definition!") + + def test_basic_string_comparator_class(self): + + class MyComparator(strutils.BasicStringComparator): + + _list = ('zomg', 'bartlet', 'over', 'and') + + x = MyComparator('zomg') + self.assertEquals(x.index, 0) + y = MyComparator('over') + self.assertEquals(y.index, 2) + self.assertTrue(x == 'zomg') + self.assertTrue(x != 'bartlet') + self.assertTrue(x == x) + self.assertTrue(x != y) + self.assertTrue(x < y) + self.assertTrue(y > x) + self.assertTrue(x < 'bartlet') + self.assertTrue(y > 'bartlet') + self.assertTrue(x >= 'zomg') + self.assertTrue(x <= 'zomg') + self.assertTrue(x >= x) + self.assertTrue(x <= x) + self.assertTrue(y >= 'zomg') + self.assertTrue(y <= 'over') + self.assertTrue(y >= x) + self.assertTrue(x <= y) + # ensure that something not in the list dies + try: + MyComparator('nope') + raise Exception("MyComparator('nope') should have failed") + except Exception as e: + self.assertTrue(isinstance(e, KeyError)) + + def test_basic_string_comparator_fails_different_comparators(self): + + class MyComparator1(strutils.BasicStringComparator): + + _list = ('the truth is out there'.split(' ')) + + class MyComparator2(strutils.BasicStringComparator): + + _list = ('no one in space can hear you scream'.split(' ')) + + x = MyComparator1('is') + y = MyComparator2('you') + try: + x > y + raise Exception("Comparing different comparators should fail") + except Exception as e: + self.assertTrue(isinstance(e, AssertionError)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_sysctl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_sysctl.py new file mode 100644 index 0000000000000000000000000000000000000000..58aec64dd16f380ad34b287c5bfb901d35f02f09 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_sysctl.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from charmhelpers.core.sysctl import create +import io +from mock import patch, MagicMock +from subprocess import CalledProcessError +import unittest +import tempfile + +import six +if not six.PY3: + builtin_open = '__builtin__.open' +else: + builtin_open = 'builtins.open' + +__author__ = 'Jorge Niedbalski R. ' + + +TO_PATCH = [ + 'log', + 'check_call', + 'is_container', +] + + +class SysctlTests(unittest.TestCase): + def setUp(self): + self.tempfile = tempfile.NamedTemporaryFile(delete=False) + for m in TO_PATCH: + setattr(self, m, self._patch(m)) + + def _patch(self, method): + _m = patch('charmhelpers.core.sysctl.' + method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + @patch(builtin_open) + def test_create(self, mock_open): + """Test create sysctl method""" + _file = MagicMock(spec=io.FileIO) + mock_open.return_value = _file + + create('{"kernel.max_pid": 1337}', "/etc/sysctl.d/test-sysctl.conf") + + _file.__enter__().write.assert_called_with("kernel.max_pid=1337\n") + + self.log.assert_called_with( + "Updating sysctl_file: /etc/sysctl.d/test-sysctl.conf" + " values: {'kernel.max_pid': 1337}", + level='DEBUG') + + self.check_call.assert_called_with([ + "sysctl", "-p", + "/etc/sysctl.d/test-sysctl.conf"]) + + @patch(builtin_open) + def test_create_with_ignore(self, mock_open): + """Test create sysctl method""" + _file = MagicMock(spec=io.FileIO) + mock_open.return_value = _file + + create('{"kernel.max_pid": 1337}', + "/etc/sysctl.d/test-sysctl.conf", + ignore=True) + + _file.__enter__().write.assert_called_with("kernel.max_pid=1337\n") + + self.log.assert_called_with( + "Updating sysctl_file: /etc/sysctl.d/test-sysctl.conf" + " values: {'kernel.max_pid': 1337}", + level='DEBUG') + + self.check_call.assert_called_with([ + "sysctl", "-p", + "/etc/sysctl.d/test-sysctl.conf", "-e"]) + + @patch(builtin_open) + def test_create_with_dict(self, mock_open): + """Test create sysctl method""" + _file = MagicMock(spec=io.FileIO) + mock_open.return_value = _file + + create({"kernel.max_pid": 1337}, "/etc/sysctl.d/test-sysctl.conf") + + _file.__enter__().write.assert_called_with("kernel.max_pid=1337\n") + + self.log.assert_called_with( + "Updating sysctl_file: /etc/sysctl.d/test-sysctl.conf" + " values: {'kernel.max_pid': 1337}", + level='DEBUG') + + self.check_call.assert_called_with([ + "sysctl", "-p", + "/etc/sysctl.d/test-sysctl.conf"]) + + @patch(builtin_open) + def test_create_invalid_argument(self, mock_open): + """Test create sysctl with an invalid argument""" + _file = MagicMock(spec=io.FileIO) + mock_open.return_value = _file + + create('{"kernel.max_pid": 1337 xxxx', "/etc/sysctl.d/test-sysctl.conf") + + self.log.assert_called_with( + 'Error parsing YAML sysctl_dict: {"kernel.max_pid": 1337 xxxx', + level='ERROR') + + @patch(builtin_open) + def test_create_raises(self, mock_open): + """CalledProcessErrors are propagated for non-container machines.""" + _file = MagicMock(spec=io.FileIO) + mock_open.return_value = _file + + self.is_container.return_value = False + self.check_call.side_effect = CalledProcessError(1, 'sysctl') + + with self.assertRaises(CalledProcessError): + create('{"kernel.max_pid": 1337}', "/etc/sysctl.d/test-sysctl.conf") + + @patch(builtin_open) + def test_create_raises_container(self, mock_open): + """CalledProcessErrors are logged for containers.""" + _file = MagicMock(spec=io.FileIO) + mock_open.return_value = _file + + self.is_container.return_value = True + self.check_call.side_effect = CalledProcessError(1, 'sysctl', 'foo') + + create('{"kernel.max_pid": 1337}', "/etc/sysctl.d/test-sysctl.conf") + self.log.assert_called_with( + 'Error setting some sysctl keys in this container: foo', + level='WARNING') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_templating.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_templating.py new file mode 100644 index 0000000000000000000000000000000000000000..df4767734099b4d3a031b4b9c84ef1c49fef7c77 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_templating.py @@ -0,0 +1,166 @@ +import pkg_resources +import shutil +import tempfile +import unittest +import jinja2 +import os.path +import pwd +import grp + +import mock +from charmhelpers.core import templating + + +TEMPLATES_DIR = pkg_resources.resource_filename(__name__, 'templates') + + +class TestTemplating(unittest.TestCase): + def setUp(self): + self.charm_dir = pkg_resources.resource_filename(__name__, '') + self._charm_dir_patch = mock.patch.object(templating.hookenv, + 'charm_dir') + self._charm_dir_mock = self._charm_dir_patch.start() + self._charm_dir_mock.side_effect = lambda: self.charm_dir + + def tearDown(self): + self._charm_dir_patch.stop() + + @mock.patch.object(templating.host.os, 'fchown') + @mock.patch.object(templating.host, 'mkdir') + @mock.patch.object(templating.host, 'log') + def test_render(self, log, mkdir, fchown): + with tempfile.NamedTemporaryFile() as fn1, \ + tempfile.NamedTemporaryFile() as fn2: + context = { + 'nats': { + 'port': '1234', + 'host': 'example.com', + }, + 'router': { + 'domain': 'api.foo.com' + }, + 'nginx_port': 80, + } + templating.render('fake_cc.yml', fn1.name, + context, templates_dir=TEMPLATES_DIR) + contents = open(fn1.name).read() + self.assertRegexpMatches(contents, 'port: 1234') + self.assertRegexpMatches(contents, 'host: example.com') + self.assertRegexpMatches(contents, 'domain: api.foo.com') + + templating.render('test.conf', fn2.name, context, + templates_dir=TEMPLATES_DIR) + contents = open(fn2.name).read() + self.assertRegexpMatches(contents, 'listen 80') + self.assertEqual(fchown.call_count, 2) + # Not called, because the target directory exists. Calling + # it would make the target directory world readable and + # expose your secrets (!). + self.assertEqual(mkdir.call_count, 0) + + @mock.patch.object(templating.host.os, 'fchown') + @mock.patch.object(templating.host, 'mkdir') + @mock.patch.object(templating.host, 'log') + def test_render_from_string(self, log, mkdir, fchown): + with tempfile.NamedTemporaryFile() as fn: + context = { + 'foo': 'bar' + } + + config_template = '{{ foo }}' + templating.render('somefile.txt', fn.name, + context, templates_dir=TEMPLATES_DIR, + config_template=config_template) + contents = open(fn.name).read() + self.assertRegexpMatches(contents, 'bar') + + self.assertEqual(fchown.call_count, 1) + # Not called, because the target directory exists. Calling + # it would make the target directory world readable and + # expose your secrets (!). + self.assertEqual(mkdir.call_count, 0) + + @mock.patch.object(templating.host.os, 'fchown') + @mock.patch.object(templating.host, 'mkdir') + @mock.patch.object(templating.host, 'log') + def test_render_loader(self, log, mkdir, fchown): + with tempfile.NamedTemporaryFile() as fn1: + context = { + 'nats': { + 'port': '1234', + 'host': 'example.com', + }, + 'router': { + 'domain': 'api.foo.com' + }, + 'nginx_port': 80, + } + template_loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(TEMPLATES_DIR)]) + templating.render('fake_cc.yml', fn1.name, + context, template_loader=template_loader) + contents = open(fn1.name).read() + self.assertRegexpMatches(contents, 'port: 1234') + self.assertRegexpMatches(contents, 'host: example.com') + self.assertRegexpMatches(contents, 'domain: api.foo.com') + + @mock.patch.object(templating.os.path, 'exists') + @mock.patch.object(templating.host.os, 'fchown') + @mock.patch.object(templating.host, 'mkdir') + @mock.patch.object(templating.host, 'log') + def test_render_no_dir(self, log, mkdir, fchown, exists): + exists.return_value = False + with tempfile.NamedTemporaryFile() as fn1, \ + tempfile.NamedTemporaryFile() as fn2: + context = { + 'nats': { + 'port': '1234', + 'host': 'example.com', + }, + 'router': { + 'domain': 'api.foo.com' + }, + 'nginx_port': 80, + } + templating.render('fake_cc.yml', fn1.name, + context, templates_dir=TEMPLATES_DIR) + contents = open(fn1.name).read() + self.assertRegexpMatches(contents, 'port: 1234') + self.assertRegexpMatches(contents, 'host: example.com') + self.assertRegexpMatches(contents, 'domain: api.foo.com') + + templating.render('test.conf', fn2.name, context, + templates_dir=TEMPLATES_DIR) + contents = open(fn2.name).read() + self.assertRegexpMatches(contents, 'listen 80') + self.assertEqual(fchown.call_count, 2) + # Target directory was created, world readable (!). + self.assertEqual(mkdir.call_count, 2) + + @mock.patch.object(templating.host.os, 'fchown') + @mock.patch.object(templating.host, 'log') + def test_render_2(self, log, fchown): + tmpdir = tempfile.mkdtemp() + fn1 = os.path.join(tmpdir, 'test.conf') + try: + context = {'nginx_port': 80} + templating.render('test.conf', fn1, context, + owner=pwd.getpwuid(os.getuid()).pw_name, + group=grp.getgrgid(os.getgid()).gr_name, + templates_dir=TEMPLATES_DIR) + with open(fn1) as f: + contents = f.read() + + self.assertRegexpMatches(contents, 'something') + finally: + shutil.rmtree(tmpdir, ignore_errors=True) + + @mock.patch.object(templating, 'hookenv') + @mock.patch('jinja2.Environment') + def test_load_error(self, Env, hookenv): + Env().get_template.side_effect = jinja2.exceptions.TemplateNotFound( + 'fake_cc.yml') + self.assertRaises( + jinja2.exceptions.TemplateNotFound, templating.render, + 'fake.src', 'fake.tgt', {}, templates_dir='tmpl') + hookenv.log.assert_called_once_with( + 'Could not load template fake.src from tmpl.', level=hookenv.ERROR) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_unitdata.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_unitdata.py new file mode 100644 index 0000000000000000000000000000000000000000..c6a85ecf5b9d736e1e493d9e38a46af607d60131 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/core/test_unitdata.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 Canonical Ltd. +# +# Authors: +# Kapil Thangavelu +# +try: + from StringIO import StringIO +except Exception: + from io import StringIO + +import os +import shutil +import tempfile +import unittest + +from mock import patch + +from charmhelpers.core.unitdata import Storage, HookData, kv + + +class HookDataTest(unittest.TestCase): + + def setUp(self): + self.charm_dir = tempfile.mkdtemp() + self.addCleanup(lambda: shutil.rmtree(self.charm_dir)) + self.change_environment(CHARM_DIR=self.charm_dir) + + def change_environment(self, **kw): + original_env = dict(os.environ) + + @self.addCleanup + def cleanup_env(): + os.environ.clear() + os.environ.update(original_env) + + os.environ.update(kw) + + @patch('charmhelpers.core.hookenv.hook_name') + @patch('charmhelpers.core.hookenv.execution_environment') + @patch('charmhelpers.core.hookenv.charm_dir') + def test_hook_data_records(self, cdir, ctx, name): + name.return_value = 'config-changed' + ctx.return_value = { + 'rels': {}, 'conf': {'a': 1}, 'env': {}, 'unit': 'someunit'} + cdir.return_value = self.charm_dir + with open(os.path.join(self.charm_dir, 'revision'), 'w') as fh: + fh.write('1') + hook_data = HookData() + + with hook_data(): + self.assertEqual(kv(), hook_data.kv) + self.assertEqual(kv().get('charm_revisions'), ['1']) + self.assertEqual(kv().get('unit'), 'someunit') + self.assertEqual(list(hook_data.conf), ['a']) + self.assertEqual(tuple(hook_data.conf.a), (None, 1)) + + +class StorageTest(unittest.TestCase): + + def test_init_kv_multiple(self): + with tempfile.NamedTemporaryFile() as fh: + kv = Storage(fh.name) + with kv.hook_scope('xyz'): + kv.set('x', 1) + kv.close() + self.assertEqual(os.stat(fh.name).st_mode & 0o777, 0o600) + + kv = Storage(fh.name) + with kv.hook_scope('abc'): + self.assertEqual(kv.get('x'), 1) + kv.close() + + def test_hook_scope(self): + kv = Storage(':memory:') + try: + with kv.hook_scope('install') as rev: + self.assertEqual(rev, 1) + kv.set('a', 1) + raise RuntimeError('x') + except RuntimeError: + self.assertEqual(kv.get('a'), None) + + with kv.hook_scope('config-changed') as rev: + self.assertEqual(rev, 1) + kv.set('a', 1) + self.assertEqual(kv.get('a'), 1) + + kv.revision = None + + with kv.hook_scope('start') as rev: + self.assertEqual(rev, 2) + kv.set('a', False) + kv.set('a', True) + self.assertEqual(kv.get('a'), True) + + # History doesn't decode values by default + history = [h[:-1] for h in kv.gethistory('a')] + self.assertEqual( + history, + [(1, 'a', '1', 'config-changed'), + (2, 'a', 'true', 'start')]) + + history = [h[:-1] for h in kv.gethistory('a', deserialize=True)] + self.assertEqual( + history, + [(1, 'a', 1, 'config-changed'), + (2, 'a', True, 'start')]) + + def test_delta_no_previous_and_history(self): + kv = Storage(':memory:') + with kv.hook_scope('install'): + data = {'a': 0, 'c': False} + delta = kv.delta(data, 'settings.') + self.assertEqual(delta, { + 'a': (None, False), 'c': (None, False)}) + kv.update(data, 'settings.') + + with kv.hook_scope('config'): + data = {'a': 1, 'c': True} + delta = kv.delta(data, 'settings.') + self.assertEqual(delta, { + 'a': (0, 1), 'c': (False, True)}) + kv.update(data, 'settings.') + # strip the time + history = [h[:-1] for h in kv.gethistory('settings.a')] + self.assertEqual( + history, + [(1, 'settings.a', '0', 'install'), + (2, 'settings.a', '1', 'config')]) + + def test_unset(self): + kv = Storage(':memory:') + with kv.hook_scope('install'): + kv.set('a', True) + with kv.hook_scope('start'): + kv.set('a', False) + with kv.hook_scope('config-changed'): + kv.unset('a') + history = [h[:-1] for h in kv.gethistory('a')] + + self.assertEqual(history, [ + (1, 'a', 'true', 'install'), + (2, 'a', 'false', 'start'), + (3, 'a', '"DELETED"', "config-changed")]) + + def test_flush_and_close_on_closed(self): + kv = Storage(':memory:') + kv.close() + kv.flush(False) + kv.close() + + def test_multi_value_set_skips(self): + # pure coverage test + kv = Storage(':memory:') + kv.set('x', 1) + self.assertEqual(kv.set('x', 1), 1) + + def test_debug(self): + # pure coverage test... + io = StringIO() + kv = Storage(':memory:') + kv.debug(io) + + def test_record(self): + kv = Storage(':memory:') + kv.set('config', {'x': 1, 'b': False}) + config = kv.get('config', record=True) + self.assertEqual(config.b, False) + self.assertEqual(config.x, 1) + self.assertEqual(kv.set('config.x', 1), 1) + try: + config.z + except AttributeError: + pass + else: + self.fail('attribute error should fire on nonexistant') + + def test_delta(self): + kv = Storage(':memory:') + kv.update({'a': 1, 'b': 2.2}, prefix="x") + delta = kv.delta({'a': 0, 'c': False}, prefix='x') + self.assertEqual( + delta, + {'a': (1, 0), 'b': (2.2, None), 'c': (None, False)}) + self.assertEqual(delta.a.previous, 1) + self.assertEqual(delta.a.current, 0) + self.assertEqual(delta.c.previous, None) + self.assertEqual(delta.a.current, False) + + def test_update(self): + kv = Storage(':memory:') + kv.update({'v_a': 1, 'v_b': 2.2}) + self.assertEqual(kv.getrange('v_'), {'v_a': 1, 'v_b': 2.2}) + + kv.update({'a': False, 'b': True}, prefix='x_') + self.assertEqual( + kv.getrange('x_', True), {'a': False, 'b': True}) + + def test_keyrange(self): + kv = Storage(':memory:') + kv.set('docker.net_mtu', 1) + kv.set('docker.net_nack', True) + kv.set('docker.net_type', 'vxlan') + self.assertEqual( + kv.getrange('docker'), + {'docker.net_mtu': 1, 'docker.net_type': 'vxlan', + 'docker.net_nack': True}) + self.assertEqual( + kv.getrange('docker.', True), + {'net_mtu': 1, 'net_type': 'vxlan', 'net_nack': True}) + + def test_get_set_unset(self): + kv = Storage(':memory:') + kv.hook_scope('test') + kv.set('hello', 'saucy') + kv.set('hello', 'world') + self.assertEqual(kv.get('hello'), 'world') + kv.flush() + kv.unset('hello') + self.assertEqual(kv.get('hello'), None) + kv.flush(False) + self.assertEqual(kv.get('hello'), 'world') + + +if __name__ == '__main__': + unittest.main() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_debug.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_debug.py new file mode 100755 index 0000000000000000000000000000000000000000..76ee450628ef8b2deee7bcb4842289c8971c52ee --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +from unittest import TestCase + +from charmhelpers.fetch.python import debug + +import mock + +__author__ = "Jorge Niedbalski " + +TO_PATCH = [ + "log", + "open_port", + "close_port", + "Rpdb", + "_error", +] + + +class DebugTestCase(TestCase): + """Test cases for charmhelpers.contrib.python.debug""" + + def setUp(self): + TestCase.setUp(self) + self.patch_all() + self.log.return_value = True + self.close_port.return_value = True + + self.wrapped_function = mock.Mock(return_value=True) + self.set_trace = debug.set_trace + + def patch(self, method): + _m = mock.patch.object(debug, method) + _mock = _m.start() + self.addCleanup(_m.stop) + return _mock + + def patch_all(self): + for method in TO_PATCH: + setattr(self, method, self.patch(method)) + + def test_debug_set_trace(self): + """Check if set_trace works + """ + self.set_trace() + self.open_port.assert_called_with(4444) + + def test_debug_set_trace_ex(self): + """Check if set_trace raises exception + """ + self.set_trace() + self.Rpdb.set_trace.side_effect = Exception() + self.assertTrue(self._error.called) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_packages.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_packages.py new file mode 100644 index 0000000000000000000000000000000000000000..c4bd05c2cad7dda7d8cf1a7101358d6af4d2c825 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_packages.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# coding: utf-8 + +import mock +import six + +from unittest import TestCase +from charmhelpers.fetch.python import packages + +__author__ = "Jorge Niedbalski " + +TO_PATCH = [ + "apt_install", + "charm_dir", + "log", + "pip_execute", +] + + +class PipTestCase(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.patch_all() + + self.log.return_value = True + self.apt_install.return_value = True + + def patch(self, method): + _m = mock.patch.object(packages, method) + _mock = _m.start() + self.addCleanup(_m.stop) + return _mock + + def patch_all(self): + for method in TO_PATCH: + setattr(self, method, self.patch(method)) + + def test_pip_install_requirements(self): + """ + Check if pip_install_requirements works correctly + """ + packages.pip_install_requirements("test_requirements.txt") + self.pip_execute.assert_called_with(["install", + "-r test_requirements.txt"]) + + packages.pip_install_requirements("test_requirements.txt", + "test_constraints.txt") + self.pip_execute.assert_called_with(["install", + "-r test_requirements.txt", + "-c test_constraints.txt"]) + + packages.pip_install_requirements("test_requirements.txt", + proxy="proxy_addr:8080") + + self.pip_execute.assert_called_with(["install", + "--proxy=proxy_addr:8080", + "-r test_requirements.txt"]) + + packages.pip_install_requirements("test_requirements.txt", + log="output.log", + proxy="proxy_addr:8080") + + self.pip_execute.assert_called_with(["install", + "--log=output.log", + "--proxy=proxy_addr:8080", + "-r test_requirements.txt"]) + + def test_pip_install(self): + """ + Check if pip_install works correctly with a single package + """ + packages.pip_install("mock") + self.pip_execute.assert_called_with(["install", + "mock"]) + packages.pip_install("mock", + proxy="proxy_addr:8080") + + self.pip_execute.assert_called_with(["install", + "--proxy=proxy_addr:8080", + "mock"]) + packages.pip_install("mock", + log="output.log", + proxy="proxy_addr:8080") + + self.pip_execute.assert_called_with(["install", + "--log=output.log", + "--proxy=proxy_addr:8080", + "mock"]) + + def test_pip_install_upgrade(self): + """ + Check if pip_install works correctly with a single package + """ + packages.pip_install("mock", upgrade=True) + self.pip_execute.assert_called_with(["install", + "--upgrade", + "mock"]) + + def test_pip_install_multiple(self): + """ + Check if pip_install works correctly with multiple packages + """ + packages.pip_install(["mock", "nose"]) + self.pip_execute.assert_called_with(["install", + "mock", "nose"]) + + @mock.patch('subprocess.check_call') + @mock.patch('os.path.join') + def test_pip_install_venv(self, join, check_call): + """ + Check if pip_install works correctly with multiple packages + """ + join.return_value = 'joined-path' + packages.pip_install(["mock", "nose"], venv=True) + check_call.assert_called_with(["joined-path", "install", + "mock", "nose"]) + + def test_pip_uninstall(self): + """ + Check if pip_uninstall works correctly with a single package + """ + packages.pip_uninstall("mock") + self.pip_execute.assert_called_with(["uninstall", + "-q", + "-y", + "mock"]) + packages.pip_uninstall("mock", + proxy="proxy_addr:8080") + + self.pip_execute.assert_called_with(["uninstall", + "-q", + "-y", + "--proxy=proxy_addr:8080", + "mock"]) + packages.pip_uninstall("mock", + log="output.log", + proxy="proxy_addr:8080") + + self.pip_execute.assert_called_with(["uninstall", + "-q", + "-y", + "--log=output.log", + "--proxy=proxy_addr:8080", + "mock"]) + + def test_pip_uninstall_multiple(self): + """ + Check if pip_uninstall works correctly with multiple packages + """ + packages.pip_uninstall(["mock", "nose"]) + self.pip_execute.assert_called_with(["uninstall", + "-q", + "-y", + "mock", "nose"]) + + def test_pip_list(self): + """ + Checks if pip_list works correctly + """ + packages.pip_list() + self.pip_execute.assert_called_with(["list"]) + + @mock.patch('os.path.join') + @mock.patch('subprocess.check_call') + @mock.patch.object(packages, 'pip_install') + def test_pip_create_virtualenv(self, pip_install, check_call, join): + """ + Checks if pip_create_virtualenv works correctly + """ + join.return_value = 'joined-path' + packages.pip_create_virtualenv() + if six.PY2: + self.apt_install.assert_called_with('python-virtualenv') + else: + self.apt_install.assert_called_with('python3-virtualenv') + check_call.assert_called_with(['virtualenv', 'joined-path']) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_version.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_version.py new file mode 100644 index 0000000000000000000000000000000000000000..9e32af3d5455f94573885e77330d7532f000e973 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/python/test_version.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# coding: utf-8 + +from unittest import TestCase +from charmhelpers.fetch.python import version + +import sys + +__author__ = "Jorge Niedbalski " + + +class VersionTestCase(TestCase): + + def setUp(self): + TestCase.setUp(self) + + def test_current_version(self): + """ + Check if version.current_version and version.current_version_string + works correctly + """ + self.assertEquals(version.current_version(), + sys.version_info) + self.assertEquals(version.current_version_string(), + "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_archiveurl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_archiveurl.py new file mode 100644 index 0000000000000000000000000000000000000000..b3733b5ba58ad6f4542052ccb07da559cf93a207 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_archiveurl.py @@ -0,0 +1,131 @@ +import os + +from unittest import TestCase +from mock import ( + MagicMock, + patch, + mock_open, + Mock, + ANY +) +from charmhelpers.fetch import ( + archiveurl, + UnhandledSource, +) + +import six +if six.PY3: + from urllib.parse import urlparse + from urllib.error import URLError +else: + from urllib2 import URLError + from urlparse import urlparse + + +class ArchiveUrlFetchHandlerTest(TestCase): + + def setUp(self): + super(ArchiveUrlFetchHandlerTest, self).setUp() + self.valid_urls = ( + "http://example.com/foo.tar.gz", + "http://example.com/foo.tgz", + "http://example.com/foo.tar.bz2", + "http://example.com/foo.tbz2", + "http://example.com/foo.zip", + "http://example.com/foo.zip?bar=baz&x=y#whee", + "ftp://example.com/foo.tar.gz", + "https://example.com/foo.tgz", + "file://example.com/foo.tar.bz2", + ) + self.invalid_urls = ( + "git://example.com/foo.tar.gz", + "http://example.com/foo", + "http://example.com/foobar=baz&x=y#tar.gz", + "http://example.com/foobar?h=baz.zip", + "bzr+ssh://example.com/foo.tar.gz", + "lp:example/foo.tgz", + "file//example.com/foo.tar.bz2", + "garbage", + ) + self.fh = archiveurl.ArchiveUrlFetchHandler() + + def test_handles_archive_urls(self): + for url in self.valid_urls: + result = self.fh.can_handle(url) + self.assertEqual(result, True, url) + for url in self.invalid_urls: + result = self.fh.can_handle(url) + self.assertNotEqual(result, True, url) + + @patch('charmhelpers.fetch.archiveurl.urlopen') + def test_downloads(self, _urlopen): + for url in self.valid_urls: + response = MagicMock() + response.read.return_value = "bar" + _urlopen.return_value = response + + _open = mock_open() + with patch('charmhelpers.fetch.archiveurl.open', + _open, create=True): + self.fh.download(url, "foo") + + response.read.assert_called_with() + _open.assert_called_once_with("foo", 'wb') + _open().write.assert_called_with("bar") + + @patch('charmhelpers.fetch.archiveurl.check_hash') + @patch('charmhelpers.fetch.archiveurl.mkdir') + @patch('charmhelpers.fetch.archiveurl.extract') + def test_installs(self, _extract, _mkdir, _check_hash): + self.fh.download = MagicMock() + + for url in self.valid_urls: + filename = urlparse(url).path + dest = os.path.join('foo', 'fetched', os.path.basename(filename)) + _extract.return_value = dest + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + where = self.fh.install(url, checksum='deadbeef') + self.fh.download.assert_called_with(url, dest) + _extract.assert_called_with(dest, None) + _check_hash.assert_called_with(dest, 'deadbeef', 'sha1') + self.assertEqual(where, dest) + _check_hash.reset_mock() + + url = "http://www.example.com/archive.tar.gz" + + self.fh.download.side_effect = URLError('fail') + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + self.assertRaises(UnhandledSource, self.fh.install, url) + + self.fh.download.side_effect = OSError('fail') + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + self.assertRaises(UnhandledSource, self.fh.install, url) + + @patch('charmhelpers.fetch.archiveurl.check_hash') + @patch('charmhelpers.fetch.archiveurl.mkdir') + @patch('charmhelpers.fetch.archiveurl.extract') + def test_install_with_hash_in_url(self, _extract, _mkdir, _check_hash): + self.fh.download = MagicMock() + url = "file://example.com/foo.tar.bz2#sha512=beefdead" + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + self.fh.install(url) + _check_hash.assert_called_with(ANY, 'beefdead', 'sha512') + + @patch('charmhelpers.fetch.archiveurl.mkdir') + @patch('charmhelpers.fetch.archiveurl.extract') + def test_install_with_duplicate_hash_in_url(self, _extract, _mkdir): + self.fh.download = MagicMock() + url = "file://example.com/foo.tar.bz2#sha512=a&sha512=b" + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + with self.assertRaisesRegexp( + TypeError, "Expected 1 hash value, not 2"): + self.fh.install(url) + + @patch('charmhelpers.fetch.archiveurl.urlretrieve') + @patch('charmhelpers.fetch.archiveurl.check_hash') + def test_download_and_validate(self, vfmock, urlmock): + urlmock.return_value = ('/tmp/tmpebM9Hv', Mock()) + dlurl = 'http://example.com/foo.tgz' + dlhash = '988881adc9fc3655077dc2d4d757d480b5ea0e11' + self.fh.download_and_validate(dlurl, dlhash) + vfmock.assert_called_with('/tmp/tmpebM9Hv', dlhash, 'sha1') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_bzrurl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_bzrurl.py new file mode 100644 index 0000000000000000000000000000000000000000..4b71209b9998cb319daf3b9a2645c394e1883d8e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_bzrurl.py @@ -0,0 +1,136 @@ +import os +import shutil +import subprocess +import tempfile +from testtools import TestCase +from mock import ( + MagicMock, + patch, +) + +import six +if six.PY3: + from urllib.parse import urlparse +else: + from urlparse import urlparse + +try: + from charmhelpers.fetch import ( + bzrurl, + UnhandledSource, + ) +except ImportError: + bzrurl = None + UnhandledSource = None + + +class BzrUrlFetchHandlerTest(TestCase): + + def setUp(self): + super(BzrUrlFetchHandlerTest, self).setUp() + self.valid_urls = ( + "bzr+ssh://example.com/branch-name", + "bzr+ssh://example.com/branch-name/", + "lp:lp-branch-name", + "lp:example/lp-branch-name", + ) + self.invalid_urls = ( + "http://example.com/foo.tar.gz", + "http://example.com/foo.tgz", + "http://example.com/foo.tar.bz2", + "http://example.com/foo.tbz2", + "http://example.com/foo.zip", + "http://example.com/foo.zip?bar=baz&x=y#whee", + "ftp://example.com/foo.tar.gz", + "https://example.com/foo.tgz", + "file://example.com/foo.tar.bz2", + "git://example.com/foo.tar.gz", + "http://example.com/foo", + "http://example.com/foobar=baz&x=y#tar.gz", + "http://example.com/foobar?h=baz.zip", + "abc:example", + "file//example.com/foo.tar.bz2", + "garbage", + ) + self.fh = bzrurl.BzrUrlFetchHandler() + + def test_handles_bzr_urls(self): + for url in self.valid_urls: + result = self.fh.can_handle(url) + self.assertEqual(result, True, url) + for url in self.invalid_urls: + result = self.fh.can_handle(url) + self.assertNotEqual(result, True, url) + + @patch('charmhelpers.fetch.bzrurl.check_output') + def test_branch(self, check_output): + dest_path = "/destination/path" + for url in self.valid_urls: + self.fh.remote_branch = MagicMock() + self.fh.load_plugins = MagicMock() + self.fh.branch(url, dest_path) + + check_output.assert_called_with(['bzr', 'branch', url, dest_path], stderr=-2) + + for url in self.invalid_urls: + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + self.assertRaises(UnhandledSource, self.fh.branch, + url, dest_path) + + @patch('charmhelpers.fetch.bzrurl.check_output') + def test_branch_revno(self, check_output): + dest_path = "/destination/path" + for url in self.valid_urls: + self.fh.remote_branch = MagicMock() + self.fh.load_plugins = MagicMock() + self.fh.branch(url, dest_path, revno=42) + + check_output.assert_called_with(['bzr', 'branch', '-r', '42', + url, dest_path], stderr=-2) + + for url in self.invalid_urls: + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + self.assertRaises(UnhandledSource, self.fh.branch, url, + dest_path) + + def test_branch_functional(self): + src = None + dst = None + try: + src = tempfile.mkdtemp() + subprocess.check_output(['bzr', 'init', src], stderr=subprocess.STDOUT) + dst = tempfile.mkdtemp() + os.rmdir(dst) + self.fh.branch(src, dst) + assert os.path.exists(os.path.join(dst, '.bzr')) + self.fh.branch(src, dst) # idempotent + assert os.path.exists(os.path.join(dst, '.bzr')) + finally: + if src: + shutil.rmtree(src, ignore_errors=True) + if dst: + shutil.rmtree(dst, ignore_errors=True) + + def test_installs(self): + self.fh.branch = MagicMock() + + for url in self.valid_urls: + branch_name = urlparse(url).path.strip("/").split("/")[-1] + dest = os.path.join('foo', 'fetched') + dest_dir = os.path.join(dest, os.path.basename(branch_name)) + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + where = self.fh.install(url) + self.assertEqual(where, dest_dir) + + @patch('charmhelpers.fetch.bzrurl.mkdir') + def test_installs_dir(self, _mkdir): + self.fh.branch = MagicMock() + + for url in self.valid_urls: + branch_name = urlparse(url).path.strip("/").split("/")[-1] + dest = os.path.join('opt', 'f') + dest_dir = os.path.join(dest, os.path.basename(branch_name)) + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + where = self.fh.install(url, dest) + self.assertEqual(where, dest_dir) + _mkdir.assert_called_with(dest, perms=0o755) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch.py new file mode 100644 index 0000000000000000000000000000000000000000..202467f99900a8c8598cc7e9526034ab4c364906 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch.py @@ -0,0 +1,269 @@ +import six +import os +import yaml + +from testtools import TestCase +from mock import ( + patch, + MagicMock, + call, +) + +from charmhelpers import fetch + +if six.PY3: + from urllib.parse import urlparse + builtin_open = 'builtins.open' +else: + from urlparse import urlparse + builtin_open = '__builtin__.open' + + +FAKE_APT_CACHE = { + # an installed package + 'vim': { + 'current_ver': '2:7.3.547-6ubuntu5' + }, + # a uninstalled installation candidate + 'emacs': { + } +} + + +def fake_apt_cache(in_memory=True, progress=None): + def _get(package): + pkg = MagicMock() + if package not in FAKE_APT_CACHE: + raise KeyError + pkg.name = package + if 'current_ver' in FAKE_APT_CACHE[package]: + pkg.current_ver.ver_str = FAKE_APT_CACHE[package]['current_ver'] + else: + pkg.current_ver = None + return pkg + cache = MagicMock() + cache.__getitem__.side_effect = _get + return cache + + +def getenv(update=None): + # return a copy of os.environ with update applied. + # this was necessary because some modules modify os.environment directly + copy = os.environ.copy() + if update is not None: + copy.update(update) + return copy + + +class FetchTest(TestCase): + + @patch('charmhelpers.fetch.log') + @patch.object(fetch, 'config') + @patch.object(fetch, 'add_source') + def test_configure_sources_single_source(self, add_source, config, log): + config.side_effect = ['source', 'key'] + fetch.configure_sources() + add_source.assert_called_with('source', 'key') + + @patch.object(fetch, 'config') + @patch.object(fetch, 'add_source') + def test_configure_sources_null_source(self, add_source, config): + config.side_effect = [None, None] + fetch.configure_sources() + self.assertEqual(add_source.call_count, 0) + + @patch.object(fetch, 'config') + @patch.object(fetch, 'add_source') + def test_configure_sources_empty_source(self, add_source, config): + config.side_effect = ['', ''] + fetch.configure_sources() + self.assertEqual(add_source.call_count, 0) + + @patch.object(fetch, 'config') + @patch.object(fetch, 'add_source') + def test_configure_sources_single_source_no_key(self, add_source, config): + config.side_effect = ['source', None] + fetch.configure_sources() + add_source.assert_called_with('source', None) + + @patch.object(fetch, 'config') + @patch.object(fetch, 'add_source') + def test_configure_sources_multiple_sources(self, add_source, config): + sources = ["sourcea", "sourceb"] + keys = ["keya", None] + config.side_effect = [ + yaml.dump(sources), + yaml.dump(keys) + ] + fetch.configure_sources() + add_source.assert_has_calls([ + call('sourcea', 'keya'), + call('sourceb', None) + ]) + + @patch.object(fetch, 'config') + @patch.object(fetch, 'add_source') + def test_configure_sources_missing_keys(self, add_source, config): + sources = ["sourcea", "sourceb"] + keys = ["keya"] # Second key is missing + config.side_effect = [ + yaml.dump(sources), + yaml.dump(keys) + ] + self.assertRaises(fetch.SourceConfigError, fetch.configure_sources) + + @patch.object(fetch, '_fetch_update') + @patch.object(fetch, 'config') + @patch.object(fetch, 'add_source') + def test_configure_sources_update_called_ubuntu(self, add_source, config, + update): + config.side_effect = ['source', 'key'] + fetch.configure_sources(update=True) + add_source.assert_called_with('source', 'key') + self.assertTrue(update.called) + + +class InstallTest(TestCase): + + def setUp(self): + super(InstallTest, self).setUp() + self.valid_urls = ( + "http://example.com/foo.tar.gz", + "http://example.com/foo.tgz", + "http://example.com/foo.tar.bz2", + "http://example.com/foo.tbz2", + "http://example.com/foo.zip", + "http://example.com/foo.zip?bar=baz&x=y#whee", + "ftp://example.com/foo.tar.gz", + "https://example.com/foo.tgz", + "file://example.com/foo.tar.bz2", + "bzr+ssh://example.com/branch-name", + "bzr+ssh://example.com/branch-name/", + "lp:branch-name", + "lp:example/branch-name", + ) + self.invalid_urls = ( + "git://example.com/foo.tar.gz", + "http://example.com/foo", + "http://example.com/foobar=baz&x=y#tar.gz", + "http://example.com/foobar?h=baz.zip", + "abc:example", + "file//example.com/foo.tar.bz2", + "garbage", + ) + + @patch('charmhelpers.fetch.log') + @patch('charmhelpers.fetch.plugins') + def test_installs_remote(self, _plugins, _log): + h1 = MagicMock(name="h1") + h1.can_handle.return_value = "Nope" + + h2 = MagicMock(name="h2") + h2.can_handle.return_value = True + h2.install.side_effect = fetch.UnhandledSource() + + h3 = MagicMock(name="h3") + h3.can_handle.return_value = True + h3.install.return_value = "foo" + + _plugins.return_value = [h1, h2, h3] + for url in self.valid_urls: + result = fetch.install_remote(url) + + h1.can_handle.assert_called_with(url) + h2.can_handle.assert_called_with(url) + h3.can_handle.assert_called_with(url) + + h1.install.assert_not_called() + h2.install.assert_called_with(url) + h3.install.assert_called_with(url) + + self.assertEqual(result, "foo") + + fetch.install_remote('url', extra_arg=True) + h2.install.assert_called_with('url', extra_arg=True) + + @patch('charmhelpers.fetch.install_remote') + @patch('charmhelpers.fetch.config') + def test_installs_from_config(self, _config, _instrem): + for url in self.valid_urls: + _config.return_value = {"foo": url} + fetch.install_from_config("foo") + _instrem.assert_called_with(url) + + +class PluginTest(TestCase): + + @patch('charmhelpers.fetch.importlib.import_module') + def test_imports_plugins(self, import_): + fetch_handlers = ['a.foo', 'b.foo', 'c.foo'] + module = MagicMock() + import_.return_value = module + plugins = fetch.plugins(fetch_handlers) + + self.assertEqual(len(fetch_handlers), len(plugins)) + module.foo.assert_has_calls(([call()] * len(fetch_handlers))) + + @patch('charmhelpers.fetch.importlib.import_module') + def test_imports_plugins_default(self, import_): + module = MagicMock() + import_.return_value = module + plugins = fetch.plugins() + + self.assertEqual(len(fetch.FETCH_HANDLERS), len(plugins)) + for handler in fetch.FETCH_HANDLERS: + classname = handler.rsplit('.', 1)[-1] + getattr(module, classname).assert_called_with() + + @patch('charmhelpers.fetch.log') + @patch('charmhelpers.fetch.importlib.import_module') + def test_skips_and_logs_missing_plugins(self, import_, log_): + fetch_handlers = ['a.foo', 'b.foo', 'c.foo'] + import_.side_effect = (NotImplementedError, NotImplementedError, + MagicMock()) + plugins = fetch.plugins(fetch_handlers) + + self.assertEqual(1, len(plugins)) + self.assertEqual(2, log_.call_count) + + @patch('charmhelpers.fetch.log') + @patch.object(fetch.importlib, 'import_module') + def test_plugins_are_valid(self, import_module, log_): + plugins = fetch.plugins() + self.assertEqual(len(fetch.FETCH_HANDLERS), len(plugins)) + + +class BaseFetchHandlerTest(TestCase): + + def setUp(self): + super(BaseFetchHandlerTest, self).setUp() + self.test_urls = ( + "http://example.com/foo?bar=baz&x=y#blarg", + "https://example.com/foo", + "ftp://example.com/foo", + "file://example.com/foo", + "git://github.com/foo/bar", + "bzr+ssh://bazaar.launchpad.net/foo/bar", + "bzr+http://bazaar.launchpad.net/foo/bar", + "garbage", + ) + self.fh = fetch.BaseFetchHandler() + + def test_handles_nothing(self): + for url in self.test_urls: + self.assertNotEqual(self.fh.can_handle(url), True) + + def test_install_throws_unhandled(self): + for url in self.test_urls: + self.assertRaises(fetch.UnhandledSource, self.fh.install, url) + + def test_parses_urls(self): + sample_url = "http://example.com/foo?bar=baz&x=y#blarg" + p = self.fh.parse_url(sample_url) + self.assertEqual(p, urlparse(sample_url)) + + def test_returns_baseurl(self): + sample_url = "http://example.com/foo?bar=baz&x=y#blarg" + expected_url = "http://example.com/foo" + u = self.fh.base_url(sample_url) + self.assertEqual(u, expected_url) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_centos.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_centos.py new file mode 100644 index 0000000000000000000000000000000000000000..43ca852d3abc658d52c40cbb96ca28a2c0908a47 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_centos.py @@ -0,0 +1,315 @@ +import subprocess +import os + +from tests.helpers import patch_open +from testtools import TestCase +from mock import ( + patch, + MagicMock, + call, +) +from charmhelpers.fetch import centos as fetch + + +def getenv(update=None): + # return a copy of os.environ with update applied. + # this was necessary because some modules modify os.environment directly + copy = os.environ.copy() + if update is not None: + copy.update(update) + return copy + + +class FetchTest(TestCase): + + @patch("charmhelpers.fetch.log") + @patch('yum.YumBase.doPackageLists') + def test_filter_packages_missing_centos(self, yumBase, log): + + class MockPackage: + def __init__(self, name): + self.base_package_name = name + + yum_dict = { + 'installed': { + MockPackage('vim') + }, + 'available': { + MockPackage('vim') + } + } + import yum + yum.YumBase.return_value.doPackageLists.return_value = yum_dict + result = fetch.filter_installed_packages(['vim', 'emacs']) + self.assertEquals(result, ['emacs']) + + @patch("charmhelpers.fetch.log") + def test_filter_packages_none_missing_centos(self, log): + + class MockPackage: + def __init__(self, name): + self.base_package_name = name + + yum_dict = { + 'installed': { + MockPackage('vim') + }, + 'available': { + MockPackage('vim') + } + } + import yum + yum.yumBase.return_value.doPackageLists.return_value = yum_dict + result = fetch.filter_installed_packages(['vim']) + self.assertEquals(result, []) + + @patch('charmhelpers.fetch.centos.log') + @patch('yum.YumBase.doPackageLists') + def test_filter_packages_not_available_centos(self, yumBase, log): + + class MockPackage: + def __init__(self, name): + self.base_package_name = name + + yum_dict = { + 'installed': { + MockPackage('vim') + } + } + import yum + yum.YumBase.return_value.doPackageLists.return_value = yum_dict + + result = fetch.filter_installed_packages(['vim', 'joe']) + self.assertEquals(result, ['joe']) + + @patch('charmhelpers.fetch.centos.log') + def test_add_source_none_centos(self, log): + fetch.add_source(source=None) + self.assertTrue(log.called) + + @patch('charmhelpers.fetch.centos.log') + @patch('os.listdir') + def test_add_source_http_centos(self, listdir, log): + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source) + listdir.assert_called_with('/etc/yum.repos.d/') + mock_file.write.assert_has_calls([ + call("[archive.ubuntu.com_ubuntu raring-backports main]\n"), + call("name=archive.ubuntu.com/ubuntu raring-backports main\n"), + call("baseurl=http://archive.ubuntu.com/ubuntu raring" + "-backports main\n\n")]) + + @patch('charmhelpers.fetch.centos.log') + @patch('os.listdir') + @patch('subprocess.check_call') + def test_add_source_http_and_key_id_centos(self, check_call, + listdir, log): + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + key_id = "akey" + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source, key=key_id) + listdir.assert_called_with('/etc/yum.repos.d/') + mock_file.write.assert_has_calls([ + call("[archive.ubuntu.com_ubuntu raring-backports main]\n"), + call("name=archive.ubuntu.com/ubuntu raring-backports main\n"), + call("baseurl=http://archive.ubuntu.com/ubuntu raring" + "-backports main\n\n")]) + check_call.assert_called_with(['rpm', '--import', key_id]) + + @patch('charmhelpers.fetch.centos.log') + @patch('os.listdir') + @patch('subprocess.check_call') + def test_add_source_https_and_key_id_centos(self, check_call, + listdir, log): + source = "https://USER:PASS@private-ppa.launchpad.net/project/awesome" + key_id = "GPGPGP" + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source, key=key_id) + listdir.assert_called_with('/etc/yum.repos.d/') + mock_file.write.assert_has_calls([ + call("[_USER:PASS@private-ppa.launchpad" + ".net_project_awesome]\n"), + call("name=/USER:PASS@private-ppa.launchpad.net" + "/project/awesome\n"), + call("baseurl=https://USER:PASS@private-ppa.launchpad.net" + "/project/awesome\n\n")]) + check_call.assert_called_with(['rpm', '--import', key_id]) + + @patch('charmhelpers.fetch.centos.log') + @patch.object(fetch, 'NamedTemporaryFile') + @patch('os.listdir') + @patch('subprocess.check_call') + def test_add_source_http_and_key_centos(self, check_call, + listdir, temp_file, log): + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + key = ''' + -----BEGIN PGP PUBLIC KEY BLOCK----- + [...] + -----END PGP PUBLIC KEY BLOCK----- + ''' + file_mock = MagicMock() + file_mock.name = 'temporary_file' + temp_file.return_value.__enter__.return_value = file_mock + listdir.return_value = [] + + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source, key=key) + listdir.assert_called_with('/etc/yum.repos.d/') + self.assertTrue(log.called) + check_call.assert_called_with(['rpm', '--import', file_mock.name]) + file_mock.write.assert_called_once_with(key) + file_mock.flush.assert_called_once_with() + file_mock.seek.assert_called_once_with(0) + + +class YumTests(TestCase): + + @patch('subprocess.call') + @patch('charmhelpers.fetch.centos.log') + def test_yum_upgrade_non_fatal(self, log, mock_call): + options = ['--foo', '--bar'] + fetch.upgrade(options) + + mock_call.assert_called_with(['yum', '--assumeyes', + '--foo', '--bar', 'upgrade'], + env=getenv()) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.centos.log') + def test_yum_upgrade_fatal(self, log, mock_call): + options = ['--foo', '--bar'] + fetch.upgrade(options, fatal=True) + + mock_call.assert_called_with(['yum', '--assumeyes', + '--foo', '--bar', 'upgrade'], + env=getenv()) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.centos.log') + def test_installs_yum_packages(self, log, mock_call): + packages = ['foo', 'bar'] + options = ['--foo', '--bar'] + + fetch.install(packages, options) + + mock_call.assert_called_with(['yum', '--assumeyes', + '--foo', '--bar', 'install', + 'foo', 'bar'], + env=getenv()) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.centos.log') + def test_installs_yum_packages_without_options(self, log, mock_call): + packages = ['foo', 'bar'] + fetch.install(packages) + + mock_call.assert_called_with(['yum', '--assumeyes', + 'install', 'foo', 'bar'], + env=getenv()) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.centos.log') + def test_installs_yum_packages_as_string(self, log, mock_call): + packages = 'foo bar' + fetch.install(packages) + + mock_call.assert_called_with(['yum', '--assumeyes', + 'install', 'foo bar'], + env=getenv()) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.centos.log') + def test_installs_yum_packages_with_possible_errors(self, log, mock_call): + packages = ['foo', 'bar'] + options = ['--foo', '--bar'] + + fetch.install(packages, options, fatal=True) + + mock_call.assert_called_with(['yum', '--assumeyes', + '--foo', '--bar', + 'install', 'foo', 'bar'], + env=getenv()) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.centos.log') + def test_purges_yum_packages_as_string_fatal(self, log, mock_call): + packages = 'irrelevant names' + mock_call.side_effect = OSError('fail') + + self.assertRaises(OSError, fetch.purge, packages, fatal=True) + self.assertTrue(log.called) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.centos.log') + def test_purges_yum_packages_fatal(self, log, mock_call): + packages = ['irrelevant', 'names'] + mock_call.side_effect = OSError('fail') + + self.assertRaises(OSError, fetch.purge, packages, fatal=True) + self.assertTrue(log.called) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.centos.log') + def test_purges_yum_packages_as_string_nofatal(self, log, mock_call): + packages = 'foo bar' + fetch.purge(packages) + + self.assertTrue(log.called) + mock_call.assert_called_with(['yum', '--assumeyes', + 'remove', 'foo bar'], + env=getenv()) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.centos.log') + def test_purges_yum_packages_nofatal(self, log, mock_call): + packages = ['foo', 'bar'] + fetch.purge(packages) + + self.assertTrue(log.called) + mock_call.assert_called_with(['yum', '--assumeyes', + 'remove', 'foo', 'bar'], + env=getenv()) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.centos.log') + def test_yum_update_fatal(self, log, check_call): + fetch.update(fatal=True) + check_call.assert_called_with(['yum', '--assumeyes', 'update'], + env=getenv()) + self.assertTrue(log.called) + + @patch('subprocess.check_output') + @patch('charmhelpers.fetch.centos.log') + def test_yum_search(self, log, check_output): + package = ['irrelevant'] + + from charmhelpers.fetch.centos import yum_search + yum_search(package) + check_output.assert_called_with(['yum', 'search', 'irrelevant']) + self.assertTrue(log.called) + + @patch('subprocess.check_call') + @patch('time.sleep') + def test_run_yum_command_retries_if_fatal(self, check_call, sleep): + """The _run_yum_command function retries the command if it can't get + the YUM lock.""" + self.called = False + + def side_effect(*args, **kwargs): + """ + First, raise an exception (can't acquire lock), then return 0 + (the lock is grabbed). + """ + if not self.called: + self.called = True + raise subprocess.CalledProcessError( + returncode=1, cmd="some command") + else: + return 0 + + check_call.side_effect = side_effect + check_call.return_value = 0 + from charmhelpers.fetch.centos import _run_yum_command + _run_yum_command(["some", "command"], fatal=True) + self.assertTrue(sleep.called) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_ubuntu.py new file mode 100644 index 0000000000000000000000000000000000000000..506746181990a861b254c774e03393e4a0dced16 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_ubuntu.py @@ -0,0 +1,1019 @@ +import six +import subprocess +import io +import os + +from tests.helpers import patch_open +from testtools import TestCase +from mock import ( + patch, + MagicMock, + call, + sentinel, +) +from charmhelpers.fetch import ubuntu as fetch + +if six.PY3: + builtin_open = 'builtins.open' +else: + builtin_open = '__builtin__.open' + +# mocked return of openstack.get_distrib_codename() +FAKE_CODENAME = 'precise' + +url = 'deb ' + fetch.CLOUD_ARCHIVE_URL +UCA_SOURCES = [ + ('cloud:precise-folsom/proposed', url + ' precise-proposed/folsom main'), + ('cloud:precise-folsom', url + ' precise-updates/folsom main'), + ('cloud:precise-folsom/updates', url + ' precise-updates/folsom main'), + ('cloud:precise-grizzly/proposed', url + ' precise-proposed/grizzly main'), + ('cloud:precise-grizzly', url + ' precise-updates/grizzly main'), + ('cloud:precise-grizzly/updates', url + ' precise-updates/grizzly main'), + ('cloud:precise-havana/proposed', url + ' precise-proposed/havana main'), + ('cloud:precise-havana', url + ' precise-updates/havana main'), + ('cloud:precise-havana/updates', url + ' precise-updates/havana main'), + ('cloud:precise-icehouse/proposed', + url + ' precise-proposed/icehouse main'), + ('cloud:precise-icehouse', url + ' precise-updates/icehouse main'), + ('cloud:precise-icehouse/updates', url + ' precise-updates/icehouse main'), +] + +PGP_KEY_ASCII_ARMOR = """-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: SKS 1.1.5 +Comment: Hostname: keyserver.ubuntu.com + +mI0EUCEyTAEEAMuUxyfiegCCwn4J/c0nw5PUTSJdn5FqiUTq6iMfij65xf1vl0g/Mxqw0gfg +AJIsCDvO9N9dloLAwF6FUBMg5My7WyhRPTAKF505TKJboyX3Pp4J1fU1LV8QFVOp87vUh1Rz +B6GU7cSglhnbL85gmbJTllkzkb3h4Yw7W+edjcQ/ABEBAAG0K0xhdW5jaHBhZCBQUEEgZm9y +IFVidW50dSBDbG91ZCBBcmNoaXZlIFRlYW2IuAQTAQIAIgUCUCEyTAIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQimhEop9oEE7kJAP/eTBgq3Mhbvo0d8elMOuqZx3nmU7gSyPh +ep0zYIRZ5TJWl/7PRtvp0CJA6N6ZywYTQ/4ANHhpibcHZkh8K0AzUvsGXnJRSFoJeqyDbD91 +EhoO+4ZfHs2HvRBQEDZILMa2OyuB497E5Mmyua3HDEOrG2cVLllsUZzpTFCx8NgeMHk= +=jLBm +-----END PGP PUBLIC KEY BLOCK-----""" + +PGP_KEY_BIN_PGP = b'\x98\x8d\x04P!2L\x01\x04\x00\xcb\x94\xc7\'\xe2z\x00\x82\xc2~\t\xfd\xcd\'\xc3\x93\xd4M"]\x9f\x91j\x89D\xea\xea#\x1f\x8a>\xb9\xc5\xfdo\x97H?3\x1a\xb0\xd2\x07\xe0\x00\x92,\x08;\xce\xf4\xdf]\x96\x82\xc0\xc0^\x85P\x13 \xe4\xcc\xbb[(Q=0\n\x17\x9d9L\xa2[\xa3%\xf7>\x9e\t\xd5\xf55-_\x10\x15S\xa9\xf3\xbb\xd4\x87Ts\x07\xa1\x94\xed\xc4\xa0\x96\x19\xdb/\xce`\x99\xb2S\x96Y3\x91\xbd\xe1\xe1\x8c;[\xe7\x9d\x8d\xc4?\x00\x11\x01\x00\x01\xb4+Launchpad PPA for Ubuntu Cloud Archive Team\x88\xb8\x04\x13\x01\x02\x00"\x05\x02P!2L\x02\x1b\x03\x06\x0b\t\x08\x07\x03\x02\x06\x15\x08\x02\t\n\x0b\x04\x16\x02\x03\x01\x02\x1e\x01\x02\x17\x80\x00\n\t\x10\x8ahD\xa2\x9fh\x10N\xe4$\x03\xffy0`\xabs!n\xfa4w\xc7\xa50\xeb\xaag\x1d\xe7\x99N\xe0K#\xe1z\x9d3`\x84Y\xe52V\x97\xfe\xcfF\xdb\xe9\xd0"@\xe8\xde\x99\xcb\x06\x13C\xfe\x004xi\x89\xb7\x07fH|+@3R\xfb\x06^rQHZ\tz\xac\x83l?u\x12\x1a\x0e\xfb\x86_\x1e\xcd\x87\xbd\x10P\x106H,\xc6\xb6;+\x81\xe3\xde\xc4\xe4\xc9\xb2\xb9\xad\xc7\x0cC\xab\x1bg\x15.YlQ\x9c\xe9LP\xb1\xf0\xd8\x1e0y' # noqa + +# a keyid can be retrieved by the ASCII armor-encoded key using this: +# cat testkey.asc | gpg --with-colons --import-options import-show --dry-run +# --import +PGP_KEY_ID = '8a6844a29f68104e' + +FAKE_APT_CACHE = { + # an installed package + 'vim': { + 'current_ver': '2:7.3.547-6ubuntu5' + }, + # a uninstalled installation candidate + 'emacs': { + } +} + + +def fake_apt_cache(in_memory=True, progress=None): + def _get(package): + pkg = MagicMock() + if package not in FAKE_APT_CACHE: + raise KeyError + pkg.name = package + if 'current_ver' in FAKE_APT_CACHE[package]: + pkg.current_ver.ver_str = FAKE_APT_CACHE[package]['current_ver'] + else: + pkg.current_ver = None + return pkg + cache = MagicMock() + cache.__getitem__.side_effect = _get + return cache + + +class FetchTest(TestCase): + + def setUp(self): + super(FetchTest, self).setUp() + self.patch(fetch, 'get_apt_dpkg_env', lambda: {}) + + @patch("charmhelpers.fetch.ubuntu.log") + @patch.object(fetch, 'apt_cache') + def test_filter_packages_missing_ubuntu(self, cache, log): + cache.side_effect = fake_apt_cache + result = fetch.filter_installed_packages(['vim', 'emacs']) + self.assertEquals(result, ['emacs']) + + @patch("charmhelpers.fetch.ubuntu.log") + @patch.object(fetch, 'apt_cache') + def test_filter_packages_none_missing_ubuntu(self, cache, log): + cache.side_effect = fake_apt_cache + result = fetch.filter_installed_packages(['vim']) + self.assertEquals(result, []) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'apt_cache') + def test_filter_packages_not_available_ubuntu(self, cache, log): + cache.side_effect = fake_apt_cache + result = fetch.filter_installed_packages(['vim', 'joe']) + self.assertEquals(result, ['joe']) + log.assert_called_with('Package joe has no installation candidate.', + level='WARNING') + + @patch('charmhelpers.fetch.ubuntu.filter_installed_packages') + def test_filter_missing_packages(self, filter_installed_packages): + filter_installed_packages.return_value = ['pkga'] + self.assertEqual(['pkgb'], + fetch.filter_missing_packages(['pkga', 'pkgb'])) + + @patch.object(fetch, 'log', lambda *args, **kwargs: None) + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + def test_import_apt_key_radix(self, dearmor_gpg_key, + w_keyfile): + def dearmor_side_effect(key_asc): + return { + PGP_KEY_ASCII_ARMOR: PGP_KEY_BIN_PGP, + }[key_asc] + dearmor_gpg_key.side_effect = dearmor_side_effect + + with patch('subprocess.check_output') as _subp_check_output: + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): PGP_KEY_ASCII_ARMOR, + }[' '.join(command)] + _subp_check_output.side_effect = check_output_side_effect + + fetch.import_key(PGP_KEY_ID) + _subp_check_output.assert_called_with(curl_cmd, env=None) + w_keyfile.assert_called_once_with(key_name=PGP_KEY_ID, + key_material=PGP_KEY_BIN_PGP) + + @patch.object(fetch, 'log', lambda *args, **kwargs: None) + @patch.object(os, 'getenv') + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + def test_import_apt_key_radix_https_proxy(self, dearmor_gpg_key, + w_keyfile, getenv): + def dearmor_side_effect(key_asc): + return { + PGP_KEY_ASCII_ARMOR: PGP_KEY_BIN_PGP, + }[key_asc] + dearmor_gpg_key.side_effect = dearmor_side_effect + + def get_env_side_effect(var): + return { + 'HTTPS_PROXY': 'http://squid.internal:3128', + 'JUJU_CHARM_HTTPS_PROXY': None, + }[var] + getenv.side_effect = get_env_side_effect + + with patch('subprocess.check_output') as _subp_check_output: + proxy_settings = { + 'HTTPS_PROXY': 'http://squid.internal:3128', + 'https_proxy': 'http://squid.internal:3128', + } + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): PGP_KEY_ASCII_ARMOR, + }[' '.join(command)] + _subp_check_output.side_effect = check_output_side_effect + + fetch.import_key(PGP_KEY_ID) + _subp_check_output.assert_called_with(curl_cmd, env=proxy_settings) + w_keyfile.assert_called_once_with(key_name=PGP_KEY_ID, + key_material=PGP_KEY_BIN_PGP) + + @patch.object(fetch, 'log', lambda *args, **kwargs: None) + @patch.object(os, 'getenv') + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + def test_import_apt_key_radix_charm_https_proxy(self, dearmor_gpg_key, + w_keyfile, getenv): + def dearmor_side_effect(key_asc): + return { + PGP_KEY_ASCII_ARMOR: PGP_KEY_BIN_PGP, + }[key_asc] + dearmor_gpg_key.side_effect = dearmor_side_effect + + def get_env_side_effect(var): + return { + 'HTTPS_PROXY': None, + 'JUJU_CHARM_HTTPS_PROXY': 'http://squid.internal:3128', + }[var] + getenv.side_effect = get_env_side_effect + + with patch('subprocess.check_output') as _subp_check_output: + proxy_settings = { + 'HTTPS_PROXY': 'http://squid.internal:3128', + 'https_proxy': 'http://squid.internal:3128', + } + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): PGP_KEY_ASCII_ARMOR, + }[' '.join(command)] + _subp_check_output.side_effect = check_output_side_effect + + fetch.import_key(PGP_KEY_ID) + _subp_check_output.assert_called_with(curl_cmd, env=proxy_settings) + w_keyfile.assert_called_once_with(key_name=PGP_KEY_ID, + key_material=PGP_KEY_BIN_PGP) + + @patch.object(fetch, 'log', lambda *args, **kwargs: None) + @patch.object(fetch, '_dearmor_gpg_key') + @patch('subprocess.check_output') + def test_import_bad_apt_key(self, check_output, dearmor_gpg_key): + """Ensure error when importing apt key fails""" + errmsg = ('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + bad_keyid = 'foo' + + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(bad_keyid)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): 'foobar', + }[' '.join(command)] + check_output.side_effect = check_output_side_effect + + def dearmor_side_effect(key_asc): + raise fetch.GPGKeyError(errmsg) + dearmor_gpg_key.side_effect = dearmor_side_effect + try: + fetch.import_key(bad_keyid) + assert False + except fetch.GPGKeyError as e: + self.assertEqual(str(e), errmsg) + + @patch('charmhelpers.fetch.ubuntu.log') + def test_add_source_none_ubuntu(self, log): + fetch.add_source(source=None) + self.assertTrue(log.called) + + @patch('subprocess.check_call') + def test_add_source_ppa(self, check_call): + source = "ppa:test-ppa" + fetch.add_source(source=source) + check_call.assert_called_with( + ['add-apt-repository', '--yes', source], env={}) + + @patch("charmhelpers.fetch.ubuntu.log") + @patch('subprocess.check_call') + @patch('time.sleep') + def test_add_source_ppa_retries_30_times(self, sleep, check_call, log): + self.call_count = 0 + + def side_effect(*args, **kwargs): + """Raise an 3 times, then return 0 """ + self.call_count += 1 + if self.call_count <= fetch.CMD_RETRY_COUNT: + raise subprocess.CalledProcessError( + returncode=1, cmd="some add-apt-repository command") + else: + return 0 + check_call.side_effect = side_effect + + source = "ppa:test-ppa" + fetch.add_source(source=source) + check_call.assert_called_with( + ['add-apt-repository', '--yes', source], env={}) + sleep.assert_called_with(10) + self.assertTrue(fetch.CMD_RETRY_COUNT, sleep.call_count) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch('subprocess.check_call') + def test_add_source_http_ubuntu(self, check_call, log): + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + fetch.add_source(source=source) + check_call.assert_called_with( + ['add-apt-repository', '--yes', source], env={}) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch('subprocess.check_call') + def test_add_source_https(self, check_call, log): + source = "https://example.com" + fetch.add_source(source=source) + check_call.assert_called_with( + ['add-apt-repository', '--yes', source], env={}) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch('subprocess.check_call') + def test_add_source_deb(self, check_call, log): + """add-apt-repository behaves differently when using the deb prefix. + + $ add-apt-repository --yes \ + "http://special.example.com/ubuntu precise-special main" + $ grep special /etc/apt/sources.list + deb http://special.example.com/ubuntu precise precise-special main + deb-src http://special.example.com/ubuntu precise precise-special main + + $ add-apt-repository --yes \ + "deb http://special.example.com/ubuntu precise-special main" + $ grep special /etc/apt/sources.list + deb http://special.example.com/ubuntu precise precise-special main + deb-src http://special.example.com/ubuntu precise precise-special main + deb http://special.example.com/ubuntu precise-special main + deb-src http://special.example.com/ubuntu precise-special main + """ + source = "deb http://archive.ubuntu.com/ubuntu raring-backports main" + fetch.add_source(source=source) + check_call.assert_called_with( + ['add-apt-repository', '--yes', source], env={}) + + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + @patch('charmhelpers.fetch.ubuntu.log') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_add_source_http_and_key_id(self, check_call, check_output, log, + dearmor_gpg_key, + w_keyfile): + def dearmor_side_effect(key_asc): + return { + PGP_KEY_ASCII_ARMOR: PGP_KEY_BIN_PGP, + }[key_asc] + dearmor_gpg_key.side_effect = dearmor_side_effect + + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): PGP_KEY_ASCII_ARMOR, + }[' '.join(command)] + check_output.side_effect = check_output_side_effect + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + check_call.return_value = 0 # Successful exit code + fetch.add_source(source=source, key=PGP_KEY_ID) + check_call.assert_any_call( + ['add-apt-repository', '--yes', source], env={}), + check_output.assert_has_calls([ + call(['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)], + env=None), + ]) + + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + @patch('charmhelpers.fetch.ubuntu.log') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_add_source_https_and_key_id(self, check_call, check_output, log, + dearmor_gpg_key, + w_keyfile): + def dearmor_side_effect(key_asc): + return { + PGP_KEY_ASCII_ARMOR: PGP_KEY_BIN_PGP, + }[key_asc] + dearmor_gpg_key.side_effect = dearmor_side_effect + + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): PGP_KEY_ASCII_ARMOR, + }[' '.join(command)] + check_output.side_effect = check_output_side_effect + + check_call.return_value = 0 + + source = "https://USER:PASS@private-ppa.launchpad.net/project/awesome" + fetch.add_source(source=source, key=PGP_KEY_ID) + check_call.assert_any_call( + ['add-apt-repository', '--yes', source], env={}), + check_output.assert_has_calls([ + call(['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)], + env=None), + ]) + + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'get_distrib_codename') + @patch('subprocess.check_call') + @patch('subprocess.Popen') + def test_add_source_http_and_key_gpg1(self, popen, check_call, + get_distrib_codename, log, + dearmor_gpg_key, + w_keyfile): + + def check_call_side_effect(*args, **kwargs): + # Make sure the gpg key has already been added before the + # add-apt-repository call, as the update could fail otherwise. + popen.assert_called_with( + ['gpg', '--with-colons', '--with-fingerprint'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + return 0 + + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + key = PGP_KEY_ASCII_ARMOR + key_bytes = PGP_KEY_ASCII_ARMOR.encode('utf-8') + get_distrib_codename.return_value = 'trusty' + check_call.side_effect = check_call_side_effect + + expected_key = '35F77D63B5CEC106C577ED856E85A86E4652B4E6' + if six.PY3: + popen.return_value.communicate.return_value = [b""" +pub:-:1024:1:6E85A86E4652B4E6:2009-01-18:::-:Launchpad PPA for Landscape: +fpr:::::::::35F77D63B5CEC106C577ED856E85A86E4652B4E6: + """, b''] + else: + popen.return_value.communicate.return_value = [""" +pub:-:1024:1:6E85A86E4652B4E6:2009-01-18:::-:Launchpad PPA for Landscape: +fpr:::::::::35F77D63B5CEC106C577ED856E85A86E4652B4E6: + """, ''] + + dearmor_gpg_key.return_value = PGP_KEY_BIN_PGP + + fetch.add_source(source=source, key=key) + popen.assert_called_with( + ['gpg', '--with-colons', '--with-fingerprint'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + dearmor_gpg_key.assert_called_with(key_bytes) + w_keyfile.assert_called_with(key_name=expected_key, + key_material=PGP_KEY_BIN_PGP) + check_call.assert_any_call( + ['add-apt-repository', '--yes', source], env={}), + + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'get_distrib_codename') + @patch('subprocess.check_call') + @patch('subprocess.Popen') + def test_add_source_http_and_key_gpg2(self, popen, check_call, + get_distrib_codename, log, + dearmor_gpg_key, + w_keyfile): + + def check_call_side_effect(*args, **kwargs): + # Make sure the gpg key has already been added before the + # add-apt-repository call, as the update could fail otherwise. + popen.assert_called_with( + ['gpg', '--with-colons', '--with-fingerprint'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + return 0 + + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + key = PGP_KEY_ASCII_ARMOR + key_bytes = PGP_KEY_ASCII_ARMOR.encode('utf-8') + get_distrib_codename.return_value = 'bionic' + check_call.side_effect = check_call_side_effect + + expected_key = '35F77D63B5CEC106C577ED856E85A86E4652B4E6' + + if six.PY3: + popen.return_value.communicate.return_value = [b""" +fpr:::::::::35F77D63B5CEC106C577ED856E85A86E4652B4E6: +uid:-::::1232306042::52FE92E6867B4C099AA1A1877A804A965F41A98C::ppa::::::::::0: + """, b''] + else: + # python2 on a distro with gpg2 (unlikely, but possible) + popen.return_value.communicate.return_value = [""" +fpr:::::::::35F77D63B5CEC106C577ED856E85A86E4652B4E6: +uid:-::::1232306042::52FE92E6867B4C099AA1A1877A804A965F41A98C::ppa::::::::::0: + """, ''] + + dearmor_gpg_key.return_value = PGP_KEY_BIN_PGP + + fetch.add_source(source=source, key=key) + popen.assert_called_with( + ['gpg', '--with-colons', '--with-fingerprint'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + dearmor_gpg_key.assert_called_with(key_bytes) + w_keyfile.assert_called_with(key_name=expected_key, + key_material=PGP_KEY_BIN_PGP) + check_call.assert_any_call( + ['add-apt-repository', '--yes', source], env={}), + + def test_add_source_cloud_invalid_pocket(self): + source = "cloud:havana-updates" + self.assertRaises(fetch.SourceConfigError, + fetch.add_source, source) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'filter_installed_packages') + @patch.object(fetch, 'apt_install') + @patch.object(fetch, 'get_distrib_codename') + def test_add_source_cloud_pocket_style(self, get_distrib_codename, + apt_install, + filter_pkg, log): + source = "cloud:precise-updates/havana" + get_distrib_codename.return_value = 'precise' + result = ('# Ubuntu Cloud Archive\n' + 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu' + ' precise-updates/havana main\n') + + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source) + mock_file.write.assert_called_with(result) + filter_pkg.assert_called_with(['ubuntu-cloud-keyring']) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'filter_installed_packages') + @patch.object(fetch, 'apt_install') + @patch.object(fetch, 'get_distrib_codename') + def test_add_source_cloud_os_style(self, get_distrib_codename, apt_install, + filter_pkg, log): + source = "cloud:precise-havana" + get_distrib_codename.return_value = 'precise' + result = ('# Ubuntu Cloud Archive\n' + 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu' + ' precise-updates/havana main\n') + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source) + mock_file.write.assert_called_with(result) + filter_pkg.assert_called_with(['ubuntu-cloud-keyring']) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'filter_installed_packages') + @patch.object(fetch, 'apt_install') + def test_add_source_cloud_distroless_style(self, apt_install, + filter_pkg, log): + source = "cloud:havana" + result = ('# Ubuntu Cloud Archive\n' + 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu' + ' precise-updates/havana main\n') + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source) + mock_file.write.assert_called_with(result) + filter_pkg.assert_called_with(['ubuntu-cloud-keyring']) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'get_distrib_codename') + @patch('platform.machine') + def test_add_source_proposed_x86_64(self, _machine, + get_distrib_codename, log): + source = "proposed" + result = ('# Proposed\n' + 'deb http://archive.ubuntu.com/ubuntu precise-proposed' + ' main universe multiverse restricted\n') + get_distrib_codename.return_value = 'precise' + _machine.return_value = 'x86_64' + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source) + mock_file.write.assert_called_with(result) + + @patch('charmhelpers.fetch.ubuntu.log') + @patch.object(fetch, 'get_distrib_codename') + @patch('platform.machine') + def test_add_source_proposed_ppc64le(self, _machine, + get_distrib_codename, log): + source = "proposed" + result = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports precise-proposed main " + "universe multiverse restricted\n") + get_distrib_codename.return_value = 'precise' + _machine.return_value = 'ppc64le' + with patch_open() as (mock_open, mock_file): + fetch.add_source(source=source) + mock_file.write.assert_called_with(result) + + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + @patch('charmhelpers.fetch.ubuntu.log') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_add_source_http_and_key_id_ubuntu(self, check_call, check_output, + log, dearmor_gpg_key, + w_keyfile): + def dearmor_side_effect(key_asc): + return { + PGP_KEY_ASCII_ARMOR: PGP_KEY_BIN_PGP, + }[key_asc] + dearmor_gpg_key.side_effect = dearmor_side_effect + + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): PGP_KEY_ASCII_ARMOR, + }[' '.join(command)] + check_output.side_effect = check_output_side_effect + check_call.return_value = 0 + source = "http://archive.ubuntu.com/ubuntu raring-backports main" + key_id = PGP_KEY_ID + fetch.add_source(source=source, key=key_id) + check_call.assert_any_call( + ['add-apt-repository', '--yes', source], env={}), + check_output.assert_has_calls([ + call(['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)], + env=None), + ]) + + @patch.object(fetch, '_write_apt_gpg_keyfile') + @patch.object(fetch, '_dearmor_gpg_key') + @patch('charmhelpers.fetch.ubuntu.log') + @patch('subprocess.check_output') + @patch('subprocess.check_call') + def test_add_source_https_and_key_id_ubuntu(self, check_call, check_output, + log, dearmor_gpg_key, + w_keyfile): + def dearmor_side_effect(key_asc): + return { + PGP_KEY_ASCII_ARMOR: PGP_KEY_BIN_PGP, + }[key_asc] + dearmor_gpg_key.side_effect = dearmor_side_effect + + curl_cmd = ['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)] + + def check_output_side_effect(command, env): + return { + ' '.join(curl_cmd): PGP_KEY_ASCII_ARMOR, + }[' '.join(command)] + check_output.side_effect = check_output_side_effect + check_call.return_value = 0 + + source = "https://USER:PASS@private-ppa.launchpad.net/project/awesome" + fetch.add_source(source=source, key=PGP_KEY_ID) + check_call.assert_any_call( + ['add-apt-repository', '--yes', source], env={}), + check_output.assert_has_calls([ + call(['curl', ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr' + '&exact=on&search=0x{}').format(PGP_KEY_ID)], + env=None), + ]) + + @patch('charmhelpers.fetch.ubuntu.log') + def test_configure_bad_install_source(self, log): + try: + fetch.add_source('foo', fail_invalid=True) + self.fail("Calling add_source('foo') should fail") + except fetch.SourceConfigError as e: + self.assertEqual(str(e), "Unknown source: 'foo'") + + @patch('charmhelpers.fetch.ubuntu.get_distrib_codename') + def test_configure_install_source_uca_staging(self, _lsb): + """Test configuring installation source from UCA staging sources""" + _lsb.return_value = FAKE_CODENAME + # staging pockets are configured as PPAs + with patch('subprocess.check_call') as _subp: + src = 'cloud:precise-folsom/staging' + fetch.add_source(src) + cmd = ['add-apt-repository', '-y', + 'ppa:ubuntu-cloud-archive/folsom-staging'] + _subp.assert_called_with(cmd, env={}) + + @patch(builtin_open) + @patch('charmhelpers.fetch.ubuntu.apt_install') + @patch('charmhelpers.fetch.ubuntu.get_distrib_codename') + @patch('charmhelpers.fetch.ubuntu.filter_installed_packages') + def test_configure_install_source_uca_repos( + self, _fip, _lsb, _install, _open): + """Test configuring installation source from UCA sources""" + _lsb.return_value = FAKE_CODENAME + _file = MagicMock(spec=io.FileIO) + _open.return_value = _file + _fip.side_effect = lambda x: x + for src, url in UCA_SOURCES: + actual_url = "# Ubuntu Cloud Archive\n{}\n".format(url) + fetch.add_source(src) + _install.assert_called_with(['ubuntu-cloud-keyring'], + fatal=True) + _open.assert_called_with( + '/etc/apt/sources.list.d/cloud-archive.list', + 'w' + ) + _file.__enter__().write.assert_called_with(actual_url) + + def test_configure_install_source_bad_uca(self): + """Test configuring installation source from bad UCA source""" + try: + fetch.add_source('cloud:foo-bar', fail_invalid=True) + self.fail("add_source('cloud:foo-bar') should fail") + except fetch.SourceConfigError as e: + _e = ('Invalid Cloud Archive release specified: foo-bar' + ' on this Ubuntuversion') + self.assertTrue(str(e).startswith(_e)) + + @patch('charmhelpers.fetch.ubuntu.log') + def test_add_unparsable_source(self, log_): + source = "propsed" # Minor typo + fetch.add_source(source=source) + self.assertEqual(1, log_.call_count) + + @patch('charmhelpers.fetch.ubuntu.log') + def test_add_distro_source(self, log): + source = "distro" + # distro is a noop but test validate no exception is thrown + fetch.add_source(source=source) + + +class AptTests(TestCase): + + def setUp(self): + super(AptTests, self).setUp() + self.patch(fetch, 'get_apt_dpkg_env', lambda: {}) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_apt_upgrade_non_fatal(self, log, mock_call): + options = ['--foo', '--bar'] + fetch.apt_upgrade(options) + + mock_call.assert_called_with( + ['apt-get', '--assume-yes', + '--foo', '--bar', 'upgrade'], + env={}) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_apt_upgrade_fatal(self, log, mock_call): + options = ['--foo', '--bar'] + fetch.apt_upgrade(options, fatal=True) + + mock_call.assert_called_with( + ['apt-get', '--assume-yes', + '--foo', '--bar', 'upgrade'], + env={}) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_apt_dist_upgrade_fatal(self, log, mock_call): + options = ['--foo', '--bar'] + fetch.apt_upgrade(options, fatal=True, dist=True) + + mock_call.assert_called_with( + ['apt-get', '--assume-yes', + '--foo', '--bar', 'dist-upgrade'], + env={}) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_installs_apt_packages(self, log, mock_call): + packages = ['foo', 'bar'] + options = ['--foo', '--bar'] + + fetch.apt_install(packages, options) + + mock_call.assert_called_with( + ['apt-get', '--assume-yes', + '--foo', '--bar', 'install', 'foo', 'bar'], + env={}) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_installs_apt_packages_without_options(self, log, mock_call): + packages = ['foo', 'bar'] + + fetch.apt_install(packages) + + mock_call.assert_called_with( + ['apt-get', '--assume-yes', + '--option=Dpkg::Options::=--force-confold', + 'install', 'foo', 'bar'], + env={}) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_installs_apt_packages_as_string(self, log, mock_call): + packages = 'foo bar' + options = ['--foo', '--bar'] + + fetch.apt_install(packages, options) + + mock_call.assert_called_with( + ['apt-get', '--assume-yes', + '--foo', '--bar', 'install', 'foo bar'], + env={}) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_installs_apt_packages_with_possible_errors(self, log, + check_call): + packages = ['foo', 'bar'] + options = ['--foo', '--bar'] + + fetch.apt_install(packages, options, fatal=True) + + check_call.assert_called_with( + ['apt-get', '--assume-yes', + '--foo', '--bar', 'install', 'foo', 'bar'], + env={}) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_purges_apt_packages_as_string_fatal(self, log, mock_call): + packages = 'irrelevant names' + mock_call.side_effect = OSError('fail') + + self.assertRaises(OSError, fetch.apt_purge, packages, fatal=True) + self.assertTrue(log.called) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_purges_apt_packages_fatal(self, log, mock_call): + packages = ['irrelevant', 'names'] + mock_call.side_effect = OSError('fail') + + self.assertRaises(OSError, fetch.apt_purge, packages, fatal=True) + self.assertTrue(log.called) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_purges_apt_packages_as_string_nofatal(self, log, mock_call): + packages = 'foo bar' + + fetch.apt_purge(packages) + + self.assertTrue(log.called) + mock_call.assert_called_with( + ['apt-get', '--assume-yes', 'purge', 'foo bar'], + env={}) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_purges_apt_packages_nofatal(self, log, mock_call): + packages = ['foo', 'bar'] + + fetch.apt_purge(packages) + + self.assertTrue(log.called) + mock_call.assert_called_with( + ['apt-get', '--assume-yes', 'purge', 'foo', 'bar'], + env={}) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_mark_apt_packages_as_string_fatal(self, log, mock_call): + packages = 'irrelevant names' + mock_call.side_effect = OSError('fail') + + self.assertRaises(OSError, fetch.apt_mark, packages, sentinel.mark, + fatal=True) + self.assertTrue(log.called) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_mark_apt_packages_fatal(self, log, mock_call): + packages = ['irrelevant', 'names'] + mock_call.side_effect = OSError('fail') + + self.assertRaises(OSError, fetch.apt_mark, packages, sentinel.mark, + fatal=True) + self.assertTrue(log.called) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_mark_apt_packages_as_string_nofatal(self, log, mock_call): + packages = 'foo bar' + + fetch.apt_mark(packages, sentinel.mark) + + self.assertTrue(log.called) + mock_call.assert_called_with( + ['apt-mark', sentinel.mark, 'foo bar'], + universal_newlines=True) + + @patch('subprocess.call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_mark_apt_packages_nofatal(self, log, mock_call): + packages = ['foo', 'bar'] + + fetch.apt_mark(packages, sentinel.mark) + + self.assertTrue(log.called) + mock_call.assert_called_with( + ['apt-mark', sentinel.mark, 'foo', 'bar'], + universal_newlines=True) + + @patch('subprocess.check_call') + @patch('charmhelpers.fetch.ubuntu.log') + def test_mark_apt_packages_nofatal_abortonfatal(self, log, mock_call): + packages = ['foo', 'bar'] + + fetch.apt_mark(packages, sentinel.mark, fatal=True) + + self.assertTrue(log.called) + mock_call.assert_called_with( + ['apt-mark', sentinel.mark, 'foo', 'bar'], + universal_newlines=True) + + @patch('charmhelpers.fetch.ubuntu.apt_mark') + def test_apt_hold(self, apt_mark): + fetch.apt_hold(sentinel.packages) + apt_mark.assert_called_once_with(sentinel.packages, 'hold', + fatal=False) + + @patch('charmhelpers.fetch.ubuntu.apt_mark') + def test_apt_hold_fatal(self, apt_mark): + fetch.apt_hold(sentinel.packages, fatal=sentinel.fatal) + apt_mark.assert_called_once_with(sentinel.packages, 'hold', + fatal=sentinel.fatal) + + @patch('charmhelpers.fetch.ubuntu.apt_mark') + def test_apt_unhold(self, apt_mark): + fetch.apt_unhold(sentinel.packages) + apt_mark.assert_called_once_with(sentinel.packages, 'unhold', + fatal=False) + + @patch('charmhelpers.fetch.ubuntu.apt_mark') + def test_apt_unhold_fatal(self, apt_mark): + fetch.apt_unhold(sentinel.packages, fatal=sentinel.fatal) + apt_mark.assert_called_once_with(sentinel.packages, 'unhold', + fatal=sentinel.fatal) + + @patch('subprocess.check_call') + def test_apt_update_fatal(self, check_call): + fetch.apt_update(fatal=True) + check_call.assert_called_with( + ['apt-get', 'update'], + env={}) + + @patch('subprocess.call') + def test_apt_update_nonfatal(self, call): + fetch.apt_update() + call.assert_called_with( + ['apt-get', 'update'], + env={}) + + @patch('subprocess.check_call') + @patch('time.sleep') + def test_run_apt_command_retries_if_fatal(self, check_call, sleep): + """The _run_apt_command function retries the command if it can't get + the APT lock.""" + self.called = False + + def side_effect(*args, **kwargs): + """ + First, raise an exception (can't acquire lock), then return 0 + (the lock is grabbed). + """ + if not self.called: + self.called = True + raise subprocess.CalledProcessError( + returncode=100, cmd="some command") + else: + return 0 + + check_call.side_effect = side_effect + check_call.return_value = 0 + + from charmhelpers.fetch.ubuntu import _run_apt_command + _run_apt_command(["some", "command"], fatal=True) + self.assertTrue(sleep.called) + + @patch.object(fetch, 'apt_cache') + def test_get_upstream_version(self, cache): + cache.side_effect = fake_apt_cache + self.assertEqual(fetch.get_upstream_version('vim'), '7.3.547') + self.assertEqual(fetch.get_upstream_version('emacs'), None) + self.assertEqual(fetch.get_upstream_version('unknown'), None) + + @patch('charmhelpers.fetch.ubuntu._run_apt_command') + def test_apt_autoremove_fatal(self, run_apt_command): + fetch.apt_autoremove(purge=True, fatal=True) + run_apt_command.assert_called_with( + ['apt-get', '--assume-yes', 'autoremove', '--purge'], + True + ) + + @patch('charmhelpers.fetch.ubuntu._run_apt_command') + def test_apt_autoremove_nonfatal(self, run_apt_command): + fetch.apt_autoremove(purge=False, fatal=False) + run_apt_command.assert_called_with( + ['apt-get', '--assume-yes', 'autoremove'], + False + ) + + +class TestAptDpkgEnv(TestCase): + + @patch.object(fetch, 'get_system_env') + def test_get_apt_dpkg_env(self, mock_get_system_env): + mock_get_system_env.return_value = '/a/path' + self.assertEquals( + fetch.get_apt_dpkg_env(), + {'DEBIAN_FRONTEND': 'noninteractive', 'PATH': '/a/path'}) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_ubuntu_apt_pkg.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_ubuntu_apt_pkg.py new file mode 100644 index 0000000000000000000000000000000000000000..d0affc324568fc1290147b8b298af7a2b152a1df --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_fetch_ubuntu_apt_pkg.py @@ -0,0 +1,216 @@ +import mock +import subprocess +import unittest + +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg + + +class Test_apt_pkg_Cache(unittest.TestCase): + """Borrow PatchHelper methods from ``charms.openstack``.""" + def setUp(self): + self._patches = {} + self._patches_start = {} + + def tearDown(self): + for k, v in self._patches.items(): + v.stop() + setattr(self, k, None) + self._patches = None + self._patches_start = None + + def patch(self, patchee, name=None, **kwargs): + """Patch a patchable thing. Uses mock.patch() to do the work. + Automatically unpatches at the end of the test. + + The mock gets added to the test object (self) using 'name' or the last + part of the patchee string, after the final dot. + + :param patchee: representing module.object that is to be + patched. + :param name: optional name to call the mock. + :param **kwargs: any other args to pass to mock.patch() + """ + mocked = mock.patch(patchee, **kwargs) + if name is None: + name = patchee.split('.')[-1] + started = mocked.start() + self._patches[name] = mocked + self._patches_start[name] = started + setattr(self, name, started) + + def patch_object(self, obj, attr, name=None, **kwargs): + """Patch a patchable thing. Uses mock.patch.object() to do the work. + Automatically unpatches at the end of the test. + + The mock gets added to the test object (self) using 'name' or the attr + passed in the arguments. + + :param obj: an object that needs to have an attribute patched. + :param attr: that represents the attribute being patched. + :param name: optional name to call the mock. + :param **kwargs: any other args to pass to mock.patch() + """ + mocked = mock.patch.object(obj, attr, **kwargs) + if name is None: + name = attr + started = mocked.start() + self._patches[name] = mocked + self._patches_start[name] = started + setattr(self, name, started) + + def test_apt_cache_show(self): + self.patch_object(apt_pkg.subprocess, 'check_output') + apt_cache = apt_pkg.Cache() + self.check_output.return_value = ( + 'Package: dpkg\n' + 'Version: 1.19.0.5ubuntu2.1\n' + 'Bugs: https://bugs.launchpad.net/ubuntu/+filebug\n' + 'Description-en: Debian package management system\n' + ' Multiline description\n' + '\n' + 'Package: lsof\n' + 'Architecture: amd64\n' + 'Version: 4.91+dfsg-1ubuntu1\n' + '\n' + 'N: There is 1 additional record.\n') + self.assertEquals( + apt_cache._apt_cache_show(['package']), + {'dpkg': { + 'package': 'dpkg', 'version': '1.19.0.5ubuntu2.1', + 'bugs': 'https://bugs.launchpad.net/ubuntu/+filebug', + 'description-en': 'Debian package management system\n' + 'Multiline description'}, + 'lsof': { + 'package': 'lsof', 'architecture': 'amd64', + 'version': '4.91+dfsg-1ubuntu1'}, + }) + self.check_output.assert_called_once_with( + ['apt-cache', 'show', '--no-all-versions', 'package'], + stderr=subprocess.STDOUT, + universal_newlines=True) + + def test_dpkg_list(self): + self.patch_object(apt_pkg.subprocess, 'check_output') + apt_cache = apt_pkg.Cache() + self.check_output.return_value = ( + 'Desired=Unknown/Install/Remove/Purge/Hold\n' + '| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/' + 'trig-aWait/Trig-pend\n' + '|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)\n' + '||/ Name Version Architecture Description\n' + '+++-=============================-==================-===========-' + '=================================\n' + 'ii dpkg 1.19.0.5ubuntu2.1 amd64 ' + 'Debian package management system\n' + 'rc linux-image-4.15.0-42-generic 4.15.0-42.45 amd64 ' + 'Signed kernel image generic\n' + 'ii lsof 4.91+dfsg-1ubuntu1 amd64 ' + 'utility to list open files\n') + expect = { + 'dpkg': { + 'name': 'dpkg', + 'version': '1.19.0.5ubuntu2.1', + 'architecture': 'amd64', + 'description': 'Debian package management system' + }, + 'lsof': { + 'name': 'lsof', + 'version': '4.91+dfsg-1ubuntu1', + 'architecture': 'amd64', + 'description': 'utility to list open files' + }, + } + self.assertEquals( + apt_cache._dpkg_list(['package']), expect) + self.check_output.side_effect = subprocess.CalledProcessError( + 1, '', output=self.check_output.return_value) + self.assertEquals(apt_cache._dpkg_list(['package']), expect) + self.check_output.side_effect = subprocess.CalledProcessError(2, '') + with self.assertRaises(subprocess.CalledProcessError): + _ = apt_cache._dpkg_list(['package']) + + def test_version_compare(self): + self.patch_object(apt_pkg.subprocess, 'check_call') + self.assertEquals(apt_pkg.version_compare('2', '1'), 1) + self.check_call.assert_called_once_with( + ['dpkg', '--compare-versions', '2', 'gt', '1'], + stderr=subprocess.STDOUT, + universal_newlines=True) + self.check_call.side_effect = [ + subprocess.CalledProcessError(1, '', ''), + None, + None, + ] + self.assertEquals(apt_pkg.version_compare('2', '2'), 0) + self.check_call.side_effect = [ + subprocess.CalledProcessError(1, '', ''), + subprocess.CalledProcessError(1, '', ''), + None, + ] + self.assertEquals(apt_pkg.version_compare('1', '2'), -1) + self.check_call.side_effect = subprocess.CalledProcessError(2, '', '') + self.assertRaises(subprocess.CalledProcessError, + apt_pkg.version_compare, '2', '2') + + def test_apt_cache(self): + self.patch_object(apt_pkg.subprocess, 'check_output') + apt_cache = apt_pkg.Cache() + self.check_output.side_effect = [ + ('Package: dpkg\n' + 'Version: 1.19.0.6ubuntu0\n' + 'Bugs: https://bugs.launchpad.net/ubuntu/+filebug\n' + 'Description-en: Debian package management system\n' + ' Multiline description\n' + '\n' + 'Package: lsof\n' + 'Architecture: amd64\n' + 'Version: 4.91+dfsg-1ubuntu1\n' + '\n' + 'N: There is 1 additional record.\n'), + ('Desired=Unknown/Install/Remove/Purge/Hold\n' + '| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/' + 'trig-aWait/Trig-pend\n' + '|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)\n' + '||/ Name Version Architecture Description\n' + '+++-=============================-==================-===========-' + '=================================\n' + 'ii dpkg 1.19.0.5ubuntu2.1 amd64 ' + 'Debian package management system\n' + 'rc linux-image-4.15.0-42-generic 4.15.0-42.45 amd64 ' + 'Signed kernel image generic\n' + 'ii lsof 4.91+dfsg-1ubuntu1 amd64 ' + 'utility to list open files\n'), + ] + pkg = apt_cache['dpkg'] + self.assertEquals(pkg.name, 'dpkg') + self.assertEquals(pkg.current_ver.ver_str, '1.19.0.5ubuntu2.1') + self.assertEquals(pkg.architecture, 'amd64') + self.check_output.side_effect = [ + subprocess.CalledProcessError(100, ''), + subprocess.CalledProcessError(1, ''), + ] + with self.assertRaises(KeyError): + pkg = apt_cache['nonexistent'] + self.check_output.side_effect = [ + ('Package: dpkg\n' + 'Version: 1.19.0.6ubuntu0\n' + 'Bugs: https://bugs.launchpad.net/ubuntu/+filebug\n' + 'Description-en: Debian package management system\n' + ' Multiline description\n' + '\n' + 'Package: lsof\n' + 'Architecture: amd64\n' + 'Version: 4.91+dfsg-1ubuntu1\n' + '\n' + 'N: There is 1 additional record.\n'), + subprocess.CalledProcessError(42, ''), + ] + with self.assertRaises(subprocess.CalledProcessError): + # System error occurs while making dpkg inquiry + pkg = apt_cache['dpkg'] + self.check_output.side_effect = [ + subprocess.CalledProcessError(42, ''), + subprocess.CalledProcessError(1, ''), + ] + with self.assertRaises(subprocess.CalledProcessError): + pkg = apt_cache['system-error-occurs-while-making-apt-inquiry'] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_giturl.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_giturl.py new file mode 100644 index 0000000000000000000000000000000000000000..e657027fab59ed7054275c7910686db7c0d6e671 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_giturl.py @@ -0,0 +1,115 @@ +import os +import shutil +import subprocess +import tempfile +from testtools import TestCase +from mock import ( + MagicMock, + patch, +) +from charmhelpers.core.host import chdir + +import six +if six.PY3: + from urllib.parse import urlparse +else: + from urlparse import urlparse + + +try: + from charmhelpers.fetch import ( + giturl, + UnhandledSource, + ) +except ImportError: + giturl = None + UnhandledSource = None + + +class GitUrlFetchHandlerTest(TestCase): + def setUp(self): + super(GitUrlFetchHandlerTest, self).setUp() + self.valid_urls = ( + "http://example.com/git-branch", + "https://example.com/git-branch", + "git://example.com/git-branch", + ) + self.invalid_urls = ( + "file://example.com/foo.tar.bz2", + "abc:example", + "garbage", + ) + self.fh = giturl.GitUrlFetchHandler() + + def test_handles_git_urls(self): + for url in self.valid_urls: + result = self.fh.can_handle(url) + self.assertEqual(result, True, url) + for url in self.invalid_urls: + result = self.fh.can_handle(url) + self.assertNotEqual(result, True, url) + + @patch.object(giturl, 'check_output') + def test_clone(self, check_output): + dest_path = "/destination/path" + branch = "master" + for url in self.valid_urls: + self.fh.remote_branch = MagicMock() + self.fh.load_plugins = MagicMock() + self.fh.clone(url, dest_path, branch, None) + + check_output.assert_called_with( + ['git', 'clone', url, dest_path, '--branch', branch], stderr=-2) + + for url in self.invalid_urls: + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + self.assertRaises(UnhandledSource, self.fh.clone, url, + dest_path, None, + branch) + + def test_clone_functional(self): + src = None + dst = None + try: + src = tempfile.mkdtemp() + with chdir(src): + subprocess.check_output(['git', 'init']) + subprocess.check_output(['git', 'config', 'user.name', 'Joe']) + subprocess.check_output( + ['git', 'config', 'user.email', 'joe@test.com']) + subprocess.check_output(['touch', 'foo']) + subprocess.check_output(['git', 'add', 'foo']) + subprocess.check_output(['git', 'commit', '-m', 'test']) + dst = tempfile.mkdtemp() + os.rmdir(dst) + self.fh.clone(src, dst) + assert os.path.exists(os.path.join(dst, '.git')) + self.fh.clone(src, dst) # idempotent + assert os.path.exists(os.path.join(dst, '.git')) + finally: + if src: + shutil.rmtree(src, ignore_errors=True) + if dst: + shutil.rmtree(dst, ignore_errors=True) + + def test_installs(self): + self.fh.clone = MagicMock() + + for url in self.valid_urls: + branch_name = urlparse(url).path.strip("/").split("/")[-1] + dest = os.path.join('foo', 'fetched', + os.path.basename(branch_name)) + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + where = self.fh.install(url) + self.assertEqual(where, dest) + + def test_installs_specified_dest(self): + self.fh.clone = MagicMock() + + for url in self.valid_urls: + branch_name = urlparse(url).path.strip("/").split("/")[-1] + dest_repo = os.path.join('/tmp/git/', + os.path.basename(branch_name)) + with patch.dict('os.environ', {'CHARM_DIR': 'foo'}): + where = self.fh.install(url, dest="/tmp/git") + self.assertEqual(where, dest_repo) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_snap.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_snap.py new file mode 100644 index 0000000000000000000000000000000000000000..02e6a70c0b2a31ac48bbd81f43d708dd0ec5d466 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/fetch/test_snap.py @@ -0,0 +1,79 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mock import patch +from unittest import TestCase +import charmhelpers.fetch.snap as fetch_snap + +__author__ = 'Joseph Borg ' + +TEST_ENV = {'foo': 'bar'} + + +class SnapTest(TestCase): + """ + Test and install and removal of a snap. + """ + @patch.object(fetch_snap, 'log', lambda *args, **kwargs: None) + @patch('subprocess.check_call') + @patch('os.environ', TEST_ENV) + def testSnapInstall(self, check_call): + """ + Test snap install. + + :param check_call: Mock object + :return: None + """ + check_call.return_value = 0 + fetch_snap.snap_install(['hello-world', 'htop'], '--classic', '--stable') + check_call.assert_called_with(['snap', 'install', '--classic', '--stable', 'hello-world', 'htop'], env=TEST_ENV) + + @patch.object(fetch_snap, 'log', lambda *args, **kwargs: None) + @patch('subprocess.check_call') + @patch('os.environ', TEST_ENV) + def testSnapRefresh(self, check_call): + """ + Test snap refresh. + + :param check_call: Mock object + :return: None + """ + check_call.return_value = 0 + fetch_snap.snap_refresh(['hello-world', 'htop'], '--classic', '--stable') + check_call.assert_called_with(['snap', 'refresh', '--classic', '--stable', 'hello-world', 'htop'], env=TEST_ENV) + + @patch.object(fetch_snap, 'log', lambda *args, **kwargs: None) + @patch('subprocess.check_call') + @patch('os.environ', TEST_ENV) + def testSnapRemove(self, check_call): + """ + Test snap remove. + + :param check_call: Mock object + :return: None + """ + check_call.return_value = 0 + fetch_snap.snap_remove(['hello-world', 'htop']) + check_call.assert_called_with(['snap', 'remove', 'hello-world', 'htop'], env=TEST_ENV) + + def test_valid_snap_channel(self): + """ Test valid snap channel + + :return: None + """ + # Valid + self.assertTrue(fetch_snap.valid_snap_channel('edge')) + + # Invalid + with self.assertRaises(fetch_snap.InvalidSnapChannel): + fetch_snap.valid_snap_channel('DOESNOTEXIST') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/helpers.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..10581268bfd7d956644fe0df1669f974c4f59c11 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/helpers.py @@ -0,0 +1,144 @@ +''' General helper functions for tests ''' +from contextlib import contextmanager +from mock import patch, MagicMock +import io + +import six +if not six.PY3: + builtin_open = '__builtin__.open' +else: + builtin_open = 'builtins.open' + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=io.FileIO) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch(builtin_open, stub_open): + yield mock_open, mock_file + + +@contextmanager +def mock_open(filename, contents=None): + ''' Slightly simpler mock of open to return contents for filename ''' + def mock_file(name, mode='r', buffering=-1): # Python 2 signature. + if name == filename: + if (not six.PY3) or 'b' in mode: + return io.BytesIO(contents) + return io.StringIO(contents) + else: + return open(name, mode, buffering) + + with patch(builtin_open, mock_file): + yield + + +class FakeRelation(object): + ''' + A fake relation class. Lets tests specify simple relation data + for a default relation + unit (foo:0, foo/0, set in setUp()), eg: + + rel = { + 'private-address': 'foo', + 'password': 'passwd', + } + relation = FakeRelation(rel) + self.relation_get.side_effect = relation.get + passwd = self.relation_get('password') + + or more complex relations meant to be addressed by explicit relation id + + unit id combos: + + rel = { + 'mysql:0': { + 'mysql/0': { + 'private-address': 'foo', + 'password': 'passwd', + } + } + } + relation = FakeRelation(rel) + self.relation_get.side_affect = relation.get + passwd = self.relation_get('password', rid='mysql:0', unit='mysql/0') + + set_relation_context can be used to simulate being in a relation hook + context. This allows omitting a relation id or unit when calling relation + helpers as the related unit is present. + + To set the context: + + relation = FakeRelation(rel) + relation.set_relation_context('mysql-svc2/0', 'shared-db:12') + + To clear it: + + relation.clear_relation_context() + ''' + def __init__(self, relation_data): + self.relation_data = relation_data + self.remote_unit = None + self.current_relation_id = None + + def set_relation_context(self, remote_unit, relation_id): + self.remote_unit = remote_unit + self.current_relation_id = relation_id + + def clear_relation_context(self): + self.remote_unit = None + self.current_relation_id = None + + def get(self, attribute=None, unit=None, rid=None): + if not rid or rid == 'foo:0': + if self.current_relation_id: + if not unit: + unit = self.remote_unit + udata = self.relation_data[self.current_relation_id][unit] + if attribute: + return udata[attribute] + else: + return udata[unit] + if attribute is None: + return self.relation_data + elif attribute in self.relation_data: + return self.relation_data[attribute] + return None + else: + if rid not in self.relation_data: + return None + try: + relation = self.relation_data[rid][unit] + except KeyError: + return None + if attribute and attribute in relation: + return relation[attribute] + return relation + + def relation_id(self): + return self.current_relation_id + + def relation_ids(self, reltype=None): + rids = self.relation_data.keys() + if reltype: + return [r for r in rids if r.split(':')[0] == reltype] + return rids + + def related_units(self, relid=None): + try: + return self.relation_data[relid].keys() + except KeyError: + return [] + + def relation_units(self, relation_id): + if relation_id not in self.relation_data: + return None + return self.relation_data[relation_id].keys() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/test_archive.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/test_archive.py new file mode 100644 index 0000000000000000000000000000000000000000..f636917c9d15e3529962af5f02863618bc0b6b9c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/test_archive.py @@ -0,0 +1,137 @@ +import os +from testtools import TestCase +from mock import ( + patch, + MagicMock, +) +from charmhelpers.payload import archive +from tempfile import mkdtemp +from shutil import rmtree +import subprocess + + +class ArchiveTestCase(TestCase): + + def create_archive(self, format): + workdir = mkdtemp() + if format == "tar": + workfile = "{}/foo.tar.gz".format(workdir) + cmd = "tar czf {} hosts".format(workfile) + elif format == "zip": + workfile = "{}/foo.zip".format(workdir) + cmd = "zip {} hosts".format(workfile) + curdir = os.getcwd() + os.chdir("/etc") + subprocess.check_output(cmd, shell=True) + os.chdir(curdir) + self.addCleanup(rmtree, workdir) + return (workfile, ["hosts"]) + + @patch('os.path.isfile') + def test_gets_archive_handler_by_ext(self, _isfile): + tar_archive_handler = archive.extract_tarfile + zip_archive_handler = archive.extract_zipfile + _isfile.return_value = False + + for ext in ('tar', 'tar.gz', 'tgz', 'tar.bz2', 'tbz2', 'tbz'): + handler = archive.get_archive_handler("somefile.{}".format(ext)) + msg = "handler for extension: {}".format(ext) + self.assertEqual(handler, tar_archive_handler, msg) + + for ext in ('zip', 'jar'): + handler = archive.get_archive_handler("somefile.{}".format(ext)) + msg = "handler for extension {}".format(ext) + self.assertEqual(handler, zip_archive_handler, msg) + + @patch('zipfile.is_zipfile') + @patch('tarfile.is_tarfile') + @patch('os.path.isfile') + def test_gets_archive_hander_by_filetype(self, _isfile, _istarfile, + _iszipfile): + tar_archive_handler = archive.extract_tarfile + zip_archive_handler = archive.extract_zipfile + _isfile.return_value = True + + _istarfile.return_value = True + _iszipfile.return_value = False + handler = archive.get_archive_handler("foo") + self.assertEqual(handler, tar_archive_handler) + + _istarfile.return_value = False + _iszipfile.return_value = True + handler = archive.get_archive_handler("foo") + self.assertEqual(handler, zip_archive_handler) + + @patch('charmhelpers.core.hookenv.charm_dir') + def test_gets_archive_dest_default(self, _charmdir): + _charmdir.return_value = "foo" + thedir = archive.archive_dest_default("baz") + self.assertEqual(thedir, os.path.join("foo", "archives", "baz")) + + thedir = archive.archive_dest_default("baz/qux") + self.assertEqual(thedir, os.path.join("foo", "archives", "qux")) + + def test_extracts_tarfile(self): + destdir = mkdtemp() + self.addCleanup(rmtree, destdir) + tar_file, contents = self.create_archive("tar") + archive.extract_tarfile(tar_file, destdir) + for path in [os.path.join(destdir, item) for item in contents]: + self.assertTrue(os.path.exists(path)) + + def test_extracts_zipfile(self): + destdir = mkdtemp() + self.addCleanup(rmtree, destdir) + try: + zip_file, contents = self.create_archive("zip") + except subprocess.CalledProcessError as e: + if e.returncode == 127: + self.skip("Skipping - zip is not installed") + else: + raise + archive.extract_zipfile(zip_file, destdir) + for path in [os.path.join(destdir, item) for item in contents]: + self.assertTrue(os.path.exists(path)) + + @patch('charmhelpers.core.host.mkdir') + @patch('charmhelpers.payload.archive.get_archive_handler') + @patch('charmhelpers.payload.archive.archive_dest_default') + def test_extracts(self, _defdest, _gethandler, _mkdir): + archive_name = "foo" + archive_handler = MagicMock() + _gethandler.return_value = archive_handler + + dest = archive.extract(archive_name, "bar") + + _gethandler.assert_called_with(archive_name) + archive_handler.assert_called_with(archive_name, "bar") + _defdest.assert_not_called() + _mkdir.assert_called_with("bar") + self.assertEqual(dest, "bar") + + @patch('charmhelpers.core.host.mkdir') + @patch('charmhelpers.payload.archive.get_archive_handler') + def test_unhandled_extract_raises_exc(self, _gethandler, _mkdir): + archive_name = "foo" + _gethandler.return_value = None + + self.assertRaises(archive.ArchiveError, archive.extract, + archive_name) + + _gethandler.assert_called_with(archive_name) + _mkdir.assert_not_called() + + @patch('charmhelpers.core.host.mkdir') + @patch('charmhelpers.payload.archive.get_archive_handler') + @patch('charmhelpers.payload.archive.archive_dest_default') + def test_extracts_default_dest(self, _defdest, _gethandler, _mkdir): + expected_dest = "bar" + archive_name = "foo" + _defdest.return_value = expected_dest + handler = MagicMock() + handler.return_value = expected_dest + _gethandler.return_value = handler + + dest = archive.extract(archive_name) + self.assertEqual(expected_dest, dest) + handler.assert_called_with(archive_name, expected_dest) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/test_execd.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/test_execd.py new file mode 100644 index 0000000000000000000000000000000000000000..20d83c5891376ff2547ab828f2757f585be25aca --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/payload/test_execd.py @@ -0,0 +1,153 @@ +from testtools import TestCase +from mock import patch +import os +import shutil +import stat + +from tempfile import mkdtemp + +from charmhelpers.payload import execd + + +class ExecDTestCase(TestCase): + + def setUp(self): + super(ExecDTestCase, self).setUp() + charm_dir = mkdtemp() + self.addCleanup(shutil.rmtree, charm_dir) + self.test_charm_dir = charm_dir + + env_patcher = patch.dict('os.environ', + {'CHARM_DIR': self.test_charm_dir}) + env_patcher.start() + self.addCleanup(env_patcher.stop) + + def test_default_execd_dir(self): + expected = os.path.join(self.test_charm_dir, 'exec.d') + default_dir = execd.default_execd_dir() + + self.assertEqual(expected, default_dir) + + def make_preinstall_executable(self, module_dir, execd_dir='exec.d', + error_on_preinstall=False): + """Add a charm-pre-install to module dir. + + When executed, the charm-pre-install will create a second + file in the same directory, charm-pre-install-success. + """ + module_path = os.path.join(self.test_charm_dir, execd_dir, module_dir) + os.makedirs(module_path) + + charm_pre_install_path = os.path.join(module_path, + 'charm-pre-install') + pre_install_success_path = os.path.join(module_path, + 'charm-pre-install-success') + with open(charm_pre_install_path, 'w+') as f: + if not error_on_preinstall: + f.write("#!/bin/bash\n" + "/usr/bin/touch {}".format(pre_install_success_path)) + else: + f.write("#!/bin/bash\n" + "echo stdout_from_pre_install\n" + "echo stderr_from_pre_install >&2\n" + "exit 1") + + # ensure it is executable. + perms = stat.S_IRUSR + stat.S_IXUSR + os.chmod(charm_pre_install_path, perms) + + def assert_preinstall_called_for_mod(self, module_dir, + execd_dir='exec.d'): + """Asserts that the charm-pre-install-success file exists.""" + expected_file = os.path.join(self.test_charm_dir, execd_dir, + module_dir, 'charm-pre-install-success') + files = os.listdir(os.path.dirname(expected_file)) + self.assertTrue(os.path.exists(expected_file), "files were: %s. charmdir is: %s" % (files, self.test_charm_dir)) + + def test_execd_preinstall(self): + """All charm-pre-install hooks are executed.""" + self.make_preinstall_executable(module_dir='basenode') + self.make_preinstall_executable(module_dir='mod2') + + execd.execd_preinstall() + + self.assert_preinstall_called_for_mod('basenode') + self.assert_preinstall_called_for_mod('mod2') + + def test_execd_module_list_from_env(self): + modules = ['basenode', 'mod2', 'c'] + for module in modules: + self.make_preinstall_executable(module_dir=module) + + actual_mod_paths = list(execd.execd_module_paths()) + + expected_mod_paths = [ + os.path.join(self.test_charm_dir, 'exec.d', module) + for module in modules] + self.assertSetEqual(set(actual_mod_paths), set(expected_mod_paths)) + + def test_execd_module_list_with_dir(self): + modules = ['basenode', 'mod2', 'c'] + for module in modules: + self.make_preinstall_executable(module_dir=module, + execd_dir='foo') + + actual_mod_paths = list(execd.execd_module_paths( + execd_dir=os.path.join(self.test_charm_dir, 'foo'))) + + expected_mod_paths = [ + os.path.join(self.test_charm_dir, 'foo', module) + for module in modules] + self.assertSetEqual(set(actual_mod_paths), set(expected_mod_paths)) + + def test_execd_module_paths_no_execd_dir(self): + """Empty list is returned when the exec.d doesn't exist.""" + actual_mod_paths = list(execd.execd_module_paths()) + + self.assertEqual(actual_mod_paths, []) + + def test_execd_submodule_list(self): + modules = ['basenode', 'mod2', 'c'] + for module in modules: + self.make_preinstall_executable(module_dir=module) + + submodules = list(execd.execd_submodule_paths('charm-pre-install')) + + expected = [os.path.join(self.test_charm_dir, 'exec.d', mod, + 'charm-pre-install') for mod in modules] + self.assertEqual(sorted(submodules), sorted(expected)) + + def test_execd_run(self): + modules = ['basenode', 'mod2', 'c'] + for module in modules: + self.make_preinstall_executable(module_dir=module) + + execd.execd_run('charm-pre-install') + + self.assert_preinstall_called_for_mod('basenode') + self.assert_preinstall_called_for_mod('mod2') + self.assert_preinstall_called_for_mod('c') + + @patch('charmhelpers.core.hookenv.log') + def test_execd_run_logs_exception(self, log_): + self.make_preinstall_executable(module_dir='basenode', + error_on_preinstall=True) + + execd.execd_run('charm-pre-install', die_on_error=False) + + expected_log = ('Error (1) running {}/exec.d/basenode/' + 'charm-pre-install. Output: ' + 'stdout_from_pre_install\n' + 'stderr_from_pre_install\n'.format(self.test_charm_dir)) + log_.assert_called_with(expected_log) + + @patch('charmhelpers.core.hookenv.log') + @patch('sys.exit') + def test_execd_run_dies_with_return_code(self, exit_, log): + self.make_preinstall_executable(module_dir='basenode', + error_on_preinstall=True) + + with open(os.devnull, 'wb') as devnull: + execd.execd_run('charm-pre-install', stderr=devnull) + + exit_.assert_called_with(1) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/tools/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/tools/test_charm_helper_sync.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/tools/test_charm_helper_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..7c66b9ba80700c1b042948cdfd12b2982ccbe9dc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/tools/test_charm_helper_sync.py @@ -0,0 +1,304 @@ +import unittest +from mock import call, patch +import yaml + +import tools.charm_helpers_sync.charm_helpers_sync as sync + +import six +if not six.PY3: + builtin_open = '__builtin__.open' +else: + builtin_open = 'builtins.open' + + +INCLUDE = """ +include: + - core + - contrib.openstack + - contrib.storage + - contrib.hahelpers: + - utils + - ceph_utils + - cluster_utils + - haproxy_utils +""" + + +class HelperSyncTests(unittest.TestCase): + def test_clone_helpers(self): + '''It properly branches the correct helpers branch''' + with patch('subprocess.check_call') as check_call: + sync.clone_helpers(work_dir='/tmp/foo', repo='git:charm-helpers') + check_call.assert_called_with(['git', + 'clone', '--depth=1', + 'git:charm-helpers', + '/tmp/foo/charm-helpers']) + + def test_module_path(self): + '''It converts a python module path to a filesystem path''' + self.assertEquals(sync._module_path('some.test.module'), + 'some/test/module') + + def test_src_path(self): + '''It renders the correct path to module within charm-helpers tree''' + path = sync._src_path(src='/tmp/charm-helpers', + module='contrib.openstack') + self.assertEquals('/tmp/charm-helpers/charmhelpers/contrib/openstack', + path) + + def test_dest_path(self): + '''It correctly finds the correct install path within a charm''' + path = sync._dest_path(dest='/tmp/mycharm/hooks/charmhelpers', + module='contrib.openstack') + self.assertEquals('/tmp/mycharm/hooks/charmhelpers/contrib/openstack', + path) + + @patch(builtin_open) + @patch('os.path.exists') + @patch('os.walk') + def test_ensure_init(self, walk, exists, _open): + '''It ensures all subdirectories of a parent are python importable''' + # os walk + # os.path.join + # os.path.exists + # open + def _walk(path): + yield ('/tmp/hooks/', ['helpers'], []) + yield ('/tmp/hooks/helpers', ['foo'], []) + yield ('/tmp/hooks/helpers/foo', [], []) + walk.side_effect = _walk + exists.return_value = False + sync.ensure_init('hooks/helpers/foo/') + ex = [call('/tmp/hooks/__init__.py', 'wb'), + call('/tmp/hooks/helpers/__init__.py', 'wb'), + call('/tmp/hooks/helpers/foo/__init__.py', 'wb')] + for c in ex: + self.assertIn(c, _open.call_args_list) + + @patch('tools.charm_helpers_sync.charm_helpers_sync.ensure_init') + @patch('os.path.isfile') + @patch('shutil.copy') + @patch('os.makedirs') + @patch('os.path.exists') + def test_sync_pyfile(self, exists, mkdirs, copy, isfile, ensure_init): + '''It correctly syncs a py src file from src to dest''' + exists.return_value = False + isfile.return_value = True + sync.sync_pyfile('/tmp/charm-helpers/core/host', + 'hooks/charmhelpers/core') + mkdirs.assert_called_with('hooks/charmhelpers/core') + copy_f = call('/tmp/charm-helpers/core/host.py', + 'hooks/charmhelpers/core') + copy_i = call('/tmp/charm-helpers/core/__init__.py', + 'hooks/charmhelpers/core') + self.assertIn(copy_f, copy.call_args_list) + self.assertIn(copy_i, copy.call_args_list) + ensure_init.assert_called_with('hooks/charmhelpers/core') + + def _test_filter_dir(self, opts, isfile, isdir): + '''It filters non-python files and non-module dirs from source''' + files = { + 'bad_file.bin': 'f', + 'some_dir': 'd', + 'good_helper.py': 'f', + 'good_helper2.py': 'f', + 'good_helper3.py': 'f', + 'bad_file.img': 'f', + } + + def _isfile(f): + try: + return files[f.split('/').pop()] == 'f' + except KeyError: + return False + + def _isdir(f): + try: + return files[f.split('/').pop()] == 'd' + except KeyError: + return False + + isfile.side_effect = _isfile + isdir.side_effect = _isdir + result = sync.get_filter(opts)(dir='/tmp/charm-helpers/core', + ls=six.iterkeys(files)) + return result + + @patch('os.path.isdir') + @patch('os.path.isfile') + def test_filter_dir_no_opts(self, isfile, isdir): + '''It filters out all non-py files by default''' + result = self._test_filter_dir(opts=None, isfile=isfile, isdir=isdir) + ex = ['bad_file.bin', 'bad_file.img', 'some_dir'] + self.assertEquals(sorted(ex), sorted(result)) + + @patch('os.path.isdir') + @patch('os.path.isfile') + def test_filter_dir_with_include(self, isfile, isdir): + '''It includes non-py files if specified as an include opt''' + result = sorted(self._test_filter_dir(opts=['inc=*.img'], + isfile=isfile, isdir=isdir)) + ex = sorted(['bad_file.bin', 'some_dir']) + self.assertEquals(ex, result) + + @patch('os.path.isdir') + @patch('os.path.isfile') + def test_filter_dir_include_all(self, isfile, isdir): + '''It does not filter anything if option specified to include all''' + self.assertEquals(sync.get_filter(opts=['inc=*']), None) + + @patch('tools.charm_helpers_sync.charm_helpers_sync.get_filter') + @patch('tools.charm_helpers_sync.charm_helpers_sync.ensure_init') + @patch('shutil.copytree') + @patch('shutil.rmtree') + @patch('os.path.exists') + def test_sync_directory(self, exists, rmtree, copytree, ensure_init, + _filter): + '''It correctly syncs src directory to dest directory''' + _filter.return_value = None + sync.sync_directory('/tmp/charm-helpers/charmhelpers/core', + 'hooks/charmhelpers/core') + exists.return_value = True + rmtree.assert_called_with('hooks/charmhelpers/core') + copytree.assert_called_with('/tmp/charm-helpers/charmhelpers/core', + 'hooks/charmhelpers/core', ignore=None) + ensure_init.assert_called_with('hooks/charmhelpers/core') + + @patch('os.path.isfile') + def test_is_pyfile(self, isfile): + '''It correctly identifies incomplete path to a py src file as such''' + sync._is_pyfile('/tmp/charm-helpers/charmhelpers/core/host') + isfile.assert_called_with( + '/tmp/charm-helpers/charmhelpers/core/host.py' + ) + + @patch('tools.charm_helpers_sync.charm_helpers_sync.sync_pyfile') + @patch('tools.charm_helpers_sync.charm_helpers_sync.sync_directory') + @patch('os.path.isdir') + def test_syncs_directory(self, is_dir, sync_dir, sync_pyfile): + '''It correctly syncs a module directory''' + is_dir.return_value = True + sync.sync(src='/tmp/charm-helpers', + dest='hooks/charmhelpers', + module='contrib.openstack') + + sync_dir.assert_called_with( + '/tmp/charm-helpers/charmhelpers/contrib/openstack', + 'hooks/charmhelpers/contrib/openstack', None) + + # __init__.py files leading to the directory were also synced. + sync_pyfile.assert_has_calls([ + call('/tmp/charm-helpers/charmhelpers/__init__', + 'hooks/charmhelpers'), + call('/tmp/charm-helpers/charmhelpers/contrib/__init__', + 'hooks/charmhelpers/contrib')]) + + @patch('tools.charm_helpers_sync.charm_helpers_sync.sync_pyfile') + @patch('tools.charm_helpers_sync.charm_helpers_sync._is_pyfile') + @patch('os.path.isdir') + def test_syncs_file(self, is_dir, is_pyfile, sync_pyfile): + '''It correctly syncs a module file''' + is_dir.return_value = False + is_pyfile.return_value = True + sync.sync(src='/tmp/charm-helpers', + dest='hooks/charmhelpers', + module='contrib.openstack.utils') + sync_pyfile.assert_has_calls([ + call('/tmp/charm-helpers/charmhelpers/__init__', + 'hooks/charmhelpers'), + call('/tmp/charm-helpers/charmhelpers/contrib/__init__', + 'hooks/charmhelpers/contrib'), + call('/tmp/charm-helpers/charmhelpers/contrib/openstack/__init__', + 'hooks/charmhelpers/contrib/openstack'), + call('/tmp/charm-helpers/charmhelpers/contrib/openstack/utils', + 'hooks/charmhelpers/contrib/openstack')]) + + @patch('tools.charm_helpers_sync.charm_helpers_sync.sync') + @patch('os.path.isdir') + @patch('os.path.exists') + def test_sync_helpers_from_config(self, exists, isdir, _sync): + '''It correctly syncs a list of included helpers''' + include = yaml.safe_load(INCLUDE)['include'] + isdir.return_value = True + exists.return_value = False + sync.sync_helpers(include=include, + src='/tmp/charm-helpers', + + dest='hooks/charmhelpers') + mods = [ + 'core', + 'contrib.openstack', + 'contrib.storage', + 'contrib.hahelpers.utils', + 'contrib.hahelpers.ceph_utils', + 'contrib.hahelpers.cluster_utils', + 'contrib.hahelpers.haproxy_utils' + ] + + ex_calls = [] + [ex_calls.append( + call('/tmp/charm-helpers', 'hooks/charmhelpers', c, []) + ) for c in mods] + self.assertEquals(ex_calls, _sync.call_args_list) + + @patch('tools.charm_helpers_sync.charm_helpers_sync.sync') + @patch('os.path.isdir') + @patch('os.path.exists') + @patch('shutil.rmtree') + def test_sync_helpers_from_config_cleanup(self, _rmtree, _exists, + isdir, _sync): + '''It correctly syncs a list of included helpers''' + include = yaml.safe_load(INCLUDE)['include'] + isdir.return_value = True + _exists.return_value = True + + sync.sync_helpers(include=include, + src='/tmp/charm-helpers', + + dest='hooks/charmhelpers') + _rmtree.assert_called_with('hooks/charmhelpers') + mods = [ + 'core', + 'contrib.openstack', + 'contrib.storage', + 'contrib.hahelpers.utils', + 'contrib.hahelpers.ceph_utils', + 'contrib.hahelpers.cluster_utils', + 'contrib.hahelpers.haproxy_utils' + ] + + ex_calls = [] + [ex_calls.append( + call('/tmp/charm-helpers', 'hooks/charmhelpers', c, []) + ) for c in mods] + self.assertEquals(ex_calls, _sync.call_args_list) + + def test_extract_option_no_globals(self): + '''It extracts option from an included item with no global options''' + inc = 'contrib.openstack.templates|inc=*.template' + result = sync.extract_options(inc) + ex = ('contrib.openstack.templates', ['inc=*.template']) + self.assertEquals(ex, result) + + def test_extract_option_with_global_as_string(self): + '''It extracts option for include with global options as str''' + inc = 'contrib.openstack.templates|inc=*.template' + result = sync.extract_options(inc, global_options='inc=foo.*') + ex = ('contrib.openstack.templates', + ['inc=*.template', 'inc=foo.*']) + self.assertEquals(ex, result) + + def test_extract_option_with_globals(self): + '''It extracts option from an included item with global options''' + inc = 'contrib.openstack.templates|inc=*.template' + result = sync.extract_options(inc, global_options=['inc=*.cfg']) + ex = ('contrib.openstack.templates', ['inc=*.template', 'inc=*.cfg']) + self.assertEquals(ex, result) + + def test_extract_multiple_options_with_globals(self): + '''It extracts multiple options from an included item''' + inc = 'contrib.openstack.templates|inc=*.template,inc=foo.*' + result = sync.extract_options(inc, global_options=['inc=*.cfg']) + ex = ('contrib.openstack.templates', + ['inc=*.template', 'inc=foo.*', 'inc=*.cfg']) + self.assertEquals(ex, result) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/utils.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7ac46994dde1311502b93df965aea4b5038f868a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tests/utils.py @@ -0,0 +1,79 @@ +# Copyright 2016 Canonical Ltd +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit test helpers from https://github.com/openstack/charms.openstack/""" + +import contextlib +import io +import mock +import unittest + + +@contextlib.contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = mock.MagicMock(spec=open) + mock_file = mock.MagicMock(spec=io.FileIO) + + @contextlib.contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with mock.patch('builtins.open', stub_open): + yield mock_open, mock_file + + +class BaseTestCase(unittest.TestCase): + + def setUp(self): + self._patches = {} + self._patches_start = {} + + def tearDown(self): + for k, v in self._patches.items(): + v.stop() + setattr(self, k, None) + self._patches = None + self._patches_start = None + + def patch_object(self, obj, attr, return_value=None, name=None, new=None, + **kwargs): + if name is None: + name = attr + if new is not None: + mocked = mock.patch.object(obj, attr, new=new, **kwargs) + else: + mocked = mock.patch.object(obj, attr, **kwargs) + self._patches[name] = mocked + started = mocked.start() + if new is None: + started.return_value = return_value + self._patches_start[name] = started + setattr(self, name, started) + + def patch(self, item, return_value=None, name=None, new=None, **kwargs): + if name is None: + raise RuntimeError("Must pass 'name' to .patch()") + if new is not None: + mocked = mock.patch(item, new=new, **kwargs) + else: + mocked = mock.patch(item, **kwargs) + self._patches[name] = mocked + started = mocked.start() + if new is None: + started.return_value = return_value + self._patches_start[name] = started + setattr(self, name, started) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/README.md b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e22a77231bf068e0b5c208dc956c7dd47fdd7102 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/README.md @@ -0,0 +1,160 @@ +Script for synchronizing charm-helpers into a charm branch. + +This script is intended to be used by charm authors during the development +of their charm. It allows authors to pull in bits of a charm-helpers source +tree and embed directly into their charm, to be deployed with the rest of +their hooks and charm payload. This script is not intended to be called +by the hooks themselves, but instead by the charm author while they are +hacking on a charm offline. Consider it a method of compiling specific +revision of a charm-helpers branch into a given charm source tree. + +Some goals and benefits to using a sync tool to manage this process: + + - Reduces the burden of manually copying in upstream charm helpers code + into a charm and helps ensure we can easily keep a specific charm's + helper code up to date. + + - Allows authors to hack on their own working branch of charm-helpers, + easily sync into their WIP charm. Any changes they've made to charm + helpers can be upstreamed via a merge of their charm-helpers branch + into lp:charm-helpers, ideally at the same time they are upstreaming + the charm itself into the charm store. Separating charm helper + development from charm development can help reduce cases where charms + are shipping locally modified helpers. + + - Avoids the need to ship the *entire* charm-helpers source tree with + a charm. Authors can selectively pick and choose what subset of helpers + to include to satisfy the goals of their charm. + +Allows specifying a list of dependencies to sync in from a charm-helpers +branch. Ideally, each charm should describe its requirements in a yaml +config included in the charm, eg `charm-helpers.yaml` (NOTE: Example module +layout as of 12/18/2019): + + $ cd my-charm + $ cat >charm-helpers.yaml </charm-helpers \ + -d hooks/helpers core contrib.openstack contrib.hahelpers + +or use a specific branch using the @ notation + + $ charm-helpers-sync.py -r https://github.com//charm-helpers@branch_name \ + -d hooks/helpers core contrib.openstack contrib.hahelpers + +Script will create missing `__init__.py`'s to ensure each subdirectory is +importable, assuming the script is run from the charm's top-level directory. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py new file mode 100755 index 0000000000000000000000000000000000000000..7c0c1945dd32d3c29683a4fc3567b527750b5cd4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py @@ -0,0 +1,261 @@ +#!/usr/bin/python + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Authors: +# Adam Gandelman + +import logging +import optparse +import os +import subprocess +import shutil +import sys +import tempfile +import yaml +from fnmatch import fnmatch + +import six + +CHARM_HELPERS_REPO = 'https://github.com/juju/charm-helpers' + + +def parse_config(conf_file): + if not os.path.isfile(conf_file): + logging.error('Invalid config file: %s.' % conf_file) + return False + return yaml.load(open(conf_file).read()) + + +def clone_helpers(work_dir, repo): + dest = os.path.join(work_dir, 'charm-helpers') + logging.info('Cloning out %s to %s.' % (repo, dest)) + branch = None + if '@' in repo: + repo, branch = repo.split('@', 1) + cmd = ['git', 'clone', '--depth=1'] + if branch is not None: + cmd += ['--branch', branch] + cmd += [repo, dest] + subprocess.check_call(cmd) + return dest + + +def _module_path(module): + return os.path.join(*module.split('.')) + + +def _src_path(src, module): + return os.path.join(src, 'charmhelpers', _module_path(module)) + + +def _dest_path(dest, module): + return os.path.join(dest, _module_path(module)) + + +def _is_pyfile(path): + return os.path.isfile(path + '.py') + + +def ensure_init(path): + ''' + ensure directories leading up to path are importable, omitting + parent directory, eg path='/hooks/helpers/foo'/: + hooks/ + hooks/helpers/__init__.py + hooks/helpers/foo/__init__.py + ''' + for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): + _i = os.path.join(d, '__init__.py') + if not os.path.exists(_i): + logging.info('Adding missing __init__.py: %s' % _i) + open(_i, 'wb').close() + + +def sync_pyfile(src, dest): + src = src + '.py' + src_dir = os.path.dirname(src) + logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) + if not os.path.exists(dest): + os.makedirs(dest) + shutil.copy(src, dest) + if os.path.isfile(os.path.join(src_dir, '__init__.py')): + shutil.copy(os.path.join(src_dir, '__init__.py'), + dest) + ensure_init(dest) + + +def get_filter(opts=None): + opts = opts or [] + if 'inc=*' in opts: + # do not filter any files, include everything + return None + + def _filter(dir, ls): + incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] + _filter = [] + for f in ls: + _f = os.path.join(dir, f) + + if not os.path.isdir(_f) and not _f.endswith('.py') and incs: + if True not in [fnmatch(_f, inc) for inc in incs]: + logging.debug('Not syncing %s, does not match include ' + 'filters (%s)' % (_f, incs)) + _filter.append(f) + else: + logging.debug('Including file, which matches include ' + 'filters (%s): %s' % (incs, _f)) + elif (os.path.isfile(_f) and not _f.endswith('.py')): + logging.debug('Not syncing file: %s' % f) + _filter.append(f) + elif (os.path.isdir(_f) and not + os.path.isfile(os.path.join(_f, '__init__.py'))): + logging.debug('Not syncing directory: %s' % f) + _filter.append(f) + return _filter + return _filter + + +def sync_directory(src, dest, opts=None): + if os.path.exists(dest): + logging.debug('Removing existing directory: %s' % dest) + shutil.rmtree(dest) + logging.info('Syncing directory: %s -> %s.' % (src, dest)) + + shutil.copytree(src, dest, ignore=get_filter(opts)) + ensure_init(dest) + + +def sync(src, dest, module, opts=None): + + # Sync charmhelpers/__init__.py for bootstrap code. + sync_pyfile(_src_path(src, '__init__'), dest) + + # Sync other __init__.py files in the path leading to module. + m = [] + steps = module.split('.')[:-1] + while steps: + m.append(steps.pop(0)) + init = '.'.join(m + ['__init__']) + sync_pyfile(_src_path(src, init), + os.path.dirname(_dest_path(dest, init))) + + # Sync the module, or maybe a .py file. + if os.path.isdir(_src_path(src, module)): + sync_directory(_src_path(src, module), _dest_path(dest, module), opts) + elif _is_pyfile(_src_path(src, module)): + sync_pyfile(_src_path(src, module), + os.path.dirname(_dest_path(dest, module))) + else: + logging.warn('Could not sync: %s. Neither a pyfile or directory, ' + 'does it even exist?' % module) + + +def parse_sync_options(options): + if not options: + return [] + return options.split(',') + + +def extract_options(inc, global_options=None): + global_options = global_options or [] + if global_options and isinstance(global_options, six.string_types): + global_options = [global_options] + if '|' not in inc: + return (inc, global_options) + inc, opts = inc.split('|') + return (inc, parse_sync_options(opts) + global_options) + + +def sync_helpers(include, src, dest, options=None): + if os.path.exists(dest): + logging.debug('Removing existing directory: %s' % dest) + shutil.rmtree(dest) + if not os.path.isdir(dest): + os.makedirs(dest) + + global_options = parse_sync_options(options) + + for inc in include: + if isinstance(inc, str): + inc, opts = extract_options(inc, global_options) + sync(src, dest, inc, opts) + elif isinstance(inc, dict): + # could also do nested dicts here. + for k, v in six.iteritems(inc): + if isinstance(v, list): + for m in v: + inc, opts = extract_options(m, global_options) + sync(src, dest, '%s.%s' % (k, inc), opts) + + +if __name__ == '__main__': + parser = optparse.OptionParser() + parser.add_option('-c', '--config', action='store', dest='config', + default=None, help='helper config file') + parser.add_option('-D', '--debug', action='store_true', dest='debug', + default=False, help='debug') + parser.add_option('-r', '--repository', action='store', dest='repo', + help='charm-helpers git repository (overrides config)') + parser.add_option('-d', '--destination', action='store', dest='dest_dir', + help='sync destination dir (overrides config)') + (opts, args) = parser.parse_args() + + if opts.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + if opts.config: + logging.info('Loading charm helper config from %s.' % opts.config) + config = parse_config(opts.config) + if not config: + logging.error('Could not parse config from %s.' % opts.config) + sys.exit(1) + else: + config = {} + + if 'repo' not in config: + config['repo'] = CHARM_HELPERS_REPO + if opts.repo: + config['repo'] = opts.repo + if opts.dest_dir: + config['destination'] = opts.dest_dir + + if 'destination' not in config: + logging.error('No destination dir. specified as option or config.') + sys.exit(1) + + if 'include' not in config: + if not args: + logging.error('No modules to sync specified as option or config.') + sys.exit(1) + config['include'] = [] + [config['include'].append(a) for a in args] + + sync_options = None + if 'options' in config: + sync_options = config['options'] + tmpd = tempfile.mkdtemp() + try: + checkout = clone_helpers(tmpd, config['repo']) + sync_helpers(config['include'], checkout, config['destination'], + options=sync_options) + except Exception as e: + logging.error("Could not sync: %s" % e) + raise e + finally: + logging.debug('Cleaning up %s' % tmpd) + shutil.rmtree(tmpd) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/example-config.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/example-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8563ec01b656a6f6990378dcc137cd89f25dd540 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tools/charm_helpers_sync/example-config.yaml @@ -0,0 +1,14 @@ +# Import from remote git repository. +repo: https://github.com/juju/charm-helpers +# install helpers to ./hooks/charmhelpers +destination: hooks/charmhelpers +include: + # include all of charmhelpers.core + - core + # all of charmhelpers.payload + - payload + # and a subset of charmhelpers.contrib.hahelpers + - contrib.hahelpers: + - openstack_common + - ceph_utils + - utils diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tox.ini b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tox.ini new file mode 100644 index 0000000000000000000000000000000000000000..c4c5ed54d732a0806b8fb1de66a69306ec837c94 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charm-helpers/tox.ini @@ -0,0 +1,53 @@ +[tox] +envlist = pep8,py2,py3 +skipsdist = true +sitepackages = false +# NOTE(beisner): Avoid false positives by not skipping missing interpreters. +# NOTE(beisner): Avoid pollution by not enabling sitepackages. +# NOTE(beisner): the 'py3' env is useful to "just give me whatever py3 is here." +# NOTE(beisner): the 'py3x' envs are useful to use a distinct interpreter version (will fail if not found) +ignore_basepython_conflict = true + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 +install_command = pip install {opts} {packages} +passenv = HOME TERM +commands = nosetests -s --nologcapture {posargs} --with-coverage --cover-package=charmhelpers tests/ +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py2] +basepython = python +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py34] +basepython = python3.4 +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 -v {posargs} charmhelpers tests tools + +[flake8] +ignore = E402,E501,E741,E722,W504 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/LICENSE b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/README.md b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a98a55253f32b50a17989c1fcd7df6e5f82454af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/README.md @@ -0,0 +1,201 @@ +# charms.osm + +A Python library to aid the development of charms for Open Source Mano (OSM) + +## How to install + +```bash +git submodule add https://github.com/canonical/operator mod/operator +git submodule add https://github.com/charmed-osm/charms.osm mod/charms.osm +git submodule add https://github.com/juju/charm-helpers.git mod/charm-helpers # Only for libansible +``` + +## SSHProxyCharm + +In this section, we show the class you should inherit from in order to develop your SSH Proxy charms. + +Example: + +```python +from charms.osm.sshproxy import SSHProxyCharm + +class MySSHProxyCharm(SSHProxyCharm): + + def __init__(self, framework, key): + super().__init__(framework, key) + + # Listen to charm events + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.install, self.on_install) + self.framework.observe(self.on.start, self.on_start) + + # Listen to the touch action event + self.framework.observe(self.on.touch_action, self.on_touch_action) + + def on_config_changed(self, event): + """Handle changes in configuration""" + super().on_config_changed(event) + + def on_install(self, event): + """Called when the charm is being installed""" + super().on_install(event) + + def on_start(self, event): + """Called when the charm is being started""" + super().on_start(event) + + def on_touch_action(self, event): + """Touch a file.""" + + if self.model.unit.is_leader(): + filename = event.params["filename"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run("touch {}".format(filename)) + event.set_results({"output": stdout}) + else: + event.fail("Unit is not leader") + return +``` + +### Attributes and methods available + +- Atttributes: + - state: StoredState object. It can be used to store state data to be shared within a charm across different hooks +- SSH related methods: + - get_ssh_proxy(): Return an SSHProxy object with which you can then execute scp and ssh commands in the remote machine. + - verify_credentials(): Return True if it has the right credentials to SSH the remote machine. It also updates the status of the unit. +- Charm related methods: Methods that should be run in specific hooks/events. + - on_install(): Install dependencies for enabling SSH functionality + - on_start(): Generate needed SSH keys + - on_config_changed(): Check if the SSH + +### config.yaml + +You need to add this in your config.yaml in your charm. + +```yaml +options: + ssh-hostname: + type: string + default: "" + description: "The hostname or IP address of the machine to" + ssh-username: + type: string + default: "" + description: "The username to login as." + ssh-password: + type: string + default: "" + description: "The password used to authenticate." + ssh-public-key: + type: string + default: "" + description: "The public key of this unit." + ssh-key-type: + type: string + default: "rsa" + description: "The type of encryption to use for the SSH key." + ssh-key-bits: + type: int + default: 4096 + description: "The number of bits to use for the SSH key." + +``` + +### metadata.yaml + +You need to add this in your metadata.yaml in your charm. + +```yaml +peers: + proxypeer: + interface: proxypeer +``` + +### actions.yaml + +You need to add this in your actions.yaml in your charm. + +```yaml +# Required by charms.osm.sshproxy +run: + description: "Run an arbitrary command" + params: + command: + description: "The command to execute." + type: string + default: "" + required: + - command +generate-ssh-key: + description: "Generate a new SSH keypair for this unit. This will replace any existing previously generated keypair." +verify-ssh-credentials: + description: "Verify that this unit can authenticate with server specified by ssh-hostname and ssh-username." +get-ssh-public-key: + description: "Get the public SSH key for this unit." +``` + +## SSHProxy + +Example: + +```python +from charms.osm.sshproxy import SSHProxy + +# Check if SSH Proxy has key +if not SSHProxy.has_ssh_key(): + # Generate SSH Key + SSHProxy.generate_ssh_key() + +# Get generated public and private keys +SSHProxy.get_ssh_public_key() +SSHProxy.get_ssh_private_key() + +# Get Proxy +proxy = SSHProxy( + hostname=config["ssh-hostname"], + username=config["ssh-username"], + password=config["ssh-password"], +) + +# Verify credentials +verified = proxy.verify_credentials() + +if verified: + # Run commands in remote machine + proxy.run("touch /home/ubuntu/touch") +``` + +## Libansible + +```python +from charms.osm import libansible + +# Install ansible packages in the charm +libansible.install_ansible_support() + +result = libansible.execute_playbook( + "configure-remote.yaml", # Name of the playbook <-- Put the playbook in playbooks/ folder + config["ssh-hostname"], + config["ssh-username"], + config["ssh-password"], + dict_vars, # Dictionary with variables to populate in the playbook +) +``` + +## Usage + +Import submodules: + +```bash +git submodule add https://github.com/charmed-osm/charms.osm mod/charms.osm +git submodule add https://github.com/juju/charm-helpers.git mod/charm-helpers # Only for libansible +``` + +Add symlinks: + +```bash +mkdir -p lib/charms +ln -s ../mod/charms.osm/charms/osm lib/charms/osm +ln -s ../mod/charm-helpers/charmhelpers lib/charmhelpers # Only for libansible +``` diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/libansible.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/libansible.py new file mode 100644 index 0000000000000000000000000000000000000000..32fd26ae7d63d42edef33982d5438669b191a361 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/libansible.py @@ -0,0 +1,108 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import fnmatch +import os +import yaml +import subprocess +import sys + +sys.path.append("lib") +import charmhelpers.fetch + + +ansible_hosts_path = "/etc/ansible/hosts" + + +def install_ansible_support(from_ppa=True, ppa_location="ppa:ansible/ansible"): + """Installs the ansible package. + + By default it is installed from the `PPA`_ linked from + the ansible `website`_ or from a ppa specified by a charm config.. + + .. _PPA: https://launchpad.net/~rquillo/+archive/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If from_ppa is empty, you must ensure that the package is available + from a configured repository. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install("ansible") + with open(ansible_hosts_path, "w+") as hosts_file: + hosts_file.write("localhost ansible_connection=local") + + +def create_hosts(hostname, username, password, hosts): + inventory_path = "/etc/ansible/hosts" + + with open(inventory_path, "w") as f: + f.write("[{}]\n".format(hosts)) + h1 = "host ansible_host={0} ansible_user={1} ansible_password={2}\n".format( + hostname, username, password + ) + f.write(h1) + + +def create_ansible_cfg(): + ansible_config_path = "/etc/ansible/ansible.cfg" + + with open(ansible_config_path, "w") as f: + f.write("[defaults]\n") + f.write("host_key_checking = False\n") + + +# Function to find the playbook path +def find(pattern, path): + result = "" + for root, dirs, files in os.walk(path): + for name in files: + if fnmatch.fnmatch(name, pattern): + result = os.path.join(root, name) + return result + + +def execute_playbook(playbook_file, hostname, user, password, vars_dict=None): + playbook_path = find(playbook_file, "/var/lib/juju/agents/") + + with open(playbook_path, "r") as f: + playbook_data = yaml.load(f) + + hosts = "all" + if "hosts" in playbook_data[0].keys() and playbook_data[0]["hosts"]: + hosts = playbook_data[0]["hosts"] + + create_ansible_cfg() + create_hosts(hostname, user, password, hosts) + + call = "ansible-playbook {} ".format(playbook_path) + + if vars_dict and isinstance(vars_dict, dict) and len(vars_dict) > 0: + call += "--extra-vars " + + string_var = "" + for k,v in vars_dict.items(): + string_var += "{}={} ".format(k, v) + + string_var = string_var.strip() + call += '"{}"'.format(string_var) + + call = call.strip() + result = subprocess.check_output(call, shell=True) + + return result diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/ns.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/ns.py new file mode 100644 index 0000000000000000000000000000000000000000..25be4056282e48ce946632025be8b466557e3171 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/ns.py @@ -0,0 +1,301 @@ +# A prototype of a library to aid in the development and operation of +# OSM Network Service charms + +import asyncio +import logging +import os +import os.path +import re +import subprocess +import sys +import time +import yaml + +try: + import juju +except ImportError: + # Not all cloud images are created equal + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + + # Install the Python3 package + subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + + # Install the libjuju build dependencies + subprocess.check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "juju"], + ) + +from juju.controller import Controller + +# Quiet the debug logging +logging.getLogger('websockets.protocol').setLevel(logging.INFO) +logging.getLogger('juju.client.connection').setLevel(logging.WARN) +logging.getLogger('juju.model').setLevel(logging.WARN) +logging.getLogger('juju.machine').setLevel(logging.WARN) + + +class NetworkService: + """A lightweight interface to the Juju controller. + + This NetworkService client is specifically designed to allow a higher-level + "NS" charm to interoperate with "VNF" charms, allowing for the execution of + Primitives across other charms within the same model. + """ + endpoint = None + user = 'admin' + secret = None + port = 17070 + loop = None + client = None + model = None + cacert = None + + def __init__(self, user, secret, endpoint=None): + + self.user = user + self.secret = secret + if endpoint is None: + addresses = os.environ['JUJU_API_ADDRESSES'] + for address in addresses.split(' '): + self.endpoint = address + else: + self.endpoint = endpoint + + # Stash the name of the model + self.model = os.environ['JUJU_MODEL_NAME'] + + # Load the ca-cert from agent.conf + AGENT_PATH = os.path.dirname(os.environ['JUJU_CHARM_DIR']) + with open("{}/agent.conf".format(AGENT_PATH), "r") as f: + try: + y = yaml.safe_load(f) + self.cacert = y['cacert'] + except yaml.YAMLError as exc: + print("Unable to find Juju ca-cert.") + raise exc + + # Create our event loop + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + async def connect(self): + """Connect to the Juju controller.""" + controller = Controller() + + print( + "Connecting to controller... ws://{}:{} as {}/{}".format( + self.endpoint, + self.port, + self.user, + self.secret[-4:].rjust(len(self.secret), "*"), + ) + ) + await controller.connect( + endpoint=self.endpoint, + username=self.user, + password=self.secret, + cacert=self.cacert, + ) + + return controller + + def __del__(self): + self.logout() + + async def disconnect(self): + """Disconnect from the Juju controller.""" + if self.client: + print("Disconnecting Juju controller") + await self.client.disconnect() + + def login(self): + """Login to the Juju controller.""" + if not self.client: + # Connect to the Juju API server + self.client = self.loop.run_until_complete(self.connect()) + return self.client + + def logout(self): + """Logout of the Juju controller.""" + + if self.loop: + print("Disconnecting from API") + self.loop.run_until_complete(self.disconnect()) + + def FormatApplicationName(self, *args): + """ + Generate a Juju-compatible Application name + + :param args tuple: Positional arguments to be used to construct the + application name. + + Limitations:: + - Only accepts characters a-z and non-consequitive dashes (-) + - Application name should not exceed 50 characters + + Examples:: + + FormatApplicationName("ping_pong_ns", "ping_vnf", "a") + """ + appname = "" + for c in "-".join(list(args)): + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + appname += c + + return re.sub('-+', '-', appname.lower()) + + def GetApplicationName(self, nsr_name, vnf_name, vnf_member_index): + """Get the runtime application name of a VNF/VDU. + + This will generate an application name matching the name of the deployed charm, + given the right parameters. + + :param nsr_name str: The name of the running Network Service, as specified at instantiation. + :param vnf_name str: The name of the VNF or VDU + :param vnf_member_index: The vnf-member-index as specified in the descriptor + """ + + application_name = self.FormatApplicationName(nsr_name, vnf_member_index, vnf_name) + + # This matches the logic used by the LCM + application_name = application_name[0:48] + vca_index = int(vnf_member_index) - 1 + application_name += '-' + chr(97 + vca_index // 26) + chr(97 + vca_index % 26) + + return application_name + + def ExecutePrimitiveGetOutput(self, application, primitive, params={}, timeout=600): + """Execute a single primitive and return it's output. + + This is a blocking method that will execute a single primitive and wait + for its completion before return it's output. + + :param application str: The application name provided by `GetApplicationName`. + :param primitive str: The name of the primitive to execute. + :param params list: A list of parameters. + :param timeout int: A timeout, in seconds, to wait for the primitive to finish. Defaults to 600 seconds. + """ + uuid = self.ExecutePrimitive(application, primitive, params) + + status = None + output = None + + starttime = time.time() + while(time.time() < starttime + timeout): + status = self.GetPrimitiveStatus(uuid) + if status in ['completed', 'failed']: + break + time.sleep(10) + + # When the primitive is done, get the output + if status in ['completed', 'failed']: + output = self.GetPrimitiveOutput(uuid) + + return output + + def ExecutePrimitive(self, application, primitive, params={}): + """Execute a primitive. + + This is a non-blocking method to execute a primitive. It will return + the UUID of the queued primitive execution, which you can use + for subsequent calls to `GetPrimitiveStatus` and `GetPrimitiveOutput`. + + :param application string: The name of the application + :param primitive string: The name of the Primitive. + :param params list: A list of parameters. + + :returns uuid string: The UUID of the executed Primitive + """ + uuid = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + # Get the application + if application in model.applications: + app = model.applications[application] + + # Execute the primitive + unit = app.units[0] + if unit: + action = self.loop.run_until_complete( + unit.run_action(primitive, **params) + ) + uuid = action.id + print("Executing action: {}".format(uuid)) + self.loop.run_until_complete( + model.disconnect() + ) + else: + # Invalid mapping: application not found. Raise exception + raise Exception("Application not found: {}".format(application)) + + return uuid + + def GetPrimitiveStatus(self, uuid): + """Get the status of a Primitive execution. + + This will return one of the following strings: + - pending + - running + - completed + - failed + + :param uuid string: The UUID of the executed Primitive. + :returns: The status of the executed Primitive + """ + status = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + status = self.loop.run_until_complete( + model.get_action_status(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return status[uuid] + + def GetPrimitiveOutput(self, uuid): + """Get the output of a completed Primitive execution. + + + :param uuid string: The UUID of the executed Primitive. + :returns: The output of the execution, or None if it's still running. + """ + result = None + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + result = self.loop.run_until_complete( + model.get_action_output(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return result diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/proxy_cluster.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/proxy_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..f323a3af88f3011ef9fde794cdfe343be8665abb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/proxy_cluster.py @@ -0,0 +1,59 @@ +import socket + +from ops.framework import Object, StoredState + + +class ProxyCluster(Object): + + state = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self._relation_name = relation_name + self._relation = self.framework.model.get_relation(self._relation_name) + + self.framework.observe(charm.on.ssh_keys_initialized, self.on_ssh_keys_initialized) + + self.state.set_default(ssh_public_key=None) + self.state.set_default(ssh_private_key=None) + + def on_ssh_keys_initialized(self, event): + if not self.framework.model.unit.is_leader(): + raise RuntimeError("The initial unit of a cluster must also be a leader.") + + self.state.ssh_public_key = event.ssh_public_key + self.state.ssh_private_key = event.ssh_private_key + if not self.is_joined: + event.defer() + return + + self._relation.data[self.model.app][ + "ssh_public_key" + ] = self.state.ssh_public_key + self._relation.data[self.model.app][ + "ssh_private_key" + ] = self.state.ssh_private_key + + @property + def is_joined(self): + return self._relation is not None + + @property + def ssh_public_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_public_key") + + @property + def ssh_private_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_private_key") + + @property + def is_cluster_initialized(self): + return ( + True + if self.is_joined + and self._relation.data[self.model.app].get("ssh_public_key") + and self._relation.data[self.model.app].get("ssh_private_key") + else False + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/sshproxy.py b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/sshproxy.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c311e5be8515a7fe19c05dfed9f042ded0576b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/charms.osm/charms/osm/sshproxy.py @@ -0,0 +1,375 @@ +"""Module to help with executing commands over SSH.""" +## +# Copyright 2016 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +# from charmhelpers.core import unitdata +# from charmhelpers.core.hookenv import log + +import io +import ipaddress +import subprocess +import os +import socket +import shlex +import traceback +import sys + +from subprocess import ( + check_call, + Popen, + CalledProcessError, + PIPE, +) + +from ops.charm import CharmBase, CharmEvents +from ops.framework import StoredState, EventBase, EventSource +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess +from .proxy_cluster import ProxyCluster + +import logging + + +logger = logging.getLogger(__name__) + +class SSHKeysInitialized(EventBase): + def __init__(self, handle, ssh_public_key, ssh_private_key): + super().__init__(handle) + self.ssh_public_key = ssh_public_key + self.ssh_private_key = ssh_private_key + + def snapshot(self): + return { + "ssh_public_key": self.ssh_public_key, + "ssh_private_key": self.ssh_private_key, + } + + def restore(self, snapshot): + self.ssh_public_key = snapshot["ssh_public_key"] + self.ssh_private_key = snapshot["ssh_private_key"] + + +class ProxyClusterEvents(CharmEvents): + ssh_keys_initialized = EventSource(SSHKeysInitialized) + + +class SSHProxyCharm(CharmBase): + + state = StoredState() + on = ProxyClusterEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + self.peers = ProxyCluster(self, "proxypeer") + + # SSH Proxy actions (primitives) + self.framework.observe(self.on.generate_ssh_key_action, self.on_generate_ssh_key_action) + self.framework.observe(self.on.get_ssh_public_key_action, self.on_get_ssh_public_key_action) + self.framework.observe(self.on.run_action, self.on_run_action) + self.framework.observe(self.on.verify_ssh_credentials_action, self.on_verify_ssh_credentials_action) + + self.framework.observe(self.on.proxypeer_relation_changed, self.on_proxypeer_relation_changed) + + def get_ssh_proxy(self): + """Get the SSHProxy instance""" + proxy = SSHProxy( + hostname=self.model.config["ssh-hostname"], + username=self.model.config["ssh-username"], + password=self.model.config["ssh-password"], + ) + return proxy + + def on_proxypeer_relation_changed(self, event): + if self.peers.is_cluster_initialized and not SSHProxy.has_ssh_key(): + pubkey = self.peers.ssh_public_key + privkey = self.peers.ssh_private_key + SSHProxy.write_ssh_keys(public=pubkey, private=privkey) + self.verify_credentials() + else: + event.defer() + + def on_config_changed(self, event): + """Handle changes in configuration""" + self.verify_credentials() + + def on_install(self, event): + SSHProxy.install() + + def on_start(self, event): + """Called when the charm is being installed""" + if not self.peers.is_joined: + event.defer() + return + + unit = self.model.unit + + if not SSHProxy.has_ssh_key(): + unit.status = MaintenanceStatus("Generating SSH keys...") + pubkey = None + privkey = None + if self.model.unit.is_leader(): + if self.peers.is_cluster_initialized: + SSHProxy.write_ssh_keys( + public=self.peers.ssh_public_key, + private=self.peers.ssh_private_key, + ) + else: + SSHProxy.generate_ssh_key() + self.on.ssh_keys_initialized.emit( + SSHProxy.get_ssh_public_key(), SSHProxy.get_ssh_private_key() + ) + self.verify_credentials() + + def verify_credentials(self): + unit = self.model.unit + + # Unit should go into a waiting state until verify_ssh_credentials is successful + unit.status = WaitingStatus("Waiting for SSH credentials") + proxy = self.get_ssh_proxy() + verified, _ = proxy.verify_credentials() + if verified: + unit.status = ActiveStatus() + else: + unit.status = BlockedStatus("Invalid SSH credentials.") + return verified + + ##################### + # SSH Proxy methods # + ##################### + def on_generate_ssh_key_action(self, event): + """Generate a new SSH keypair for this unit.""" + if self.model.unit.is_leader(): + if not SSHProxy.generate_ssh_key(): + event.fail("Unable to generate ssh key") + else: + event.fail("Unit is not leader") + return + + def on_get_ssh_public_key_action(self, event): + """Get the SSH public key for this unit.""" + if self.model.unit.is_leader(): + pubkey = SSHProxy.get_ssh_public_key() + event.set_results({"pubkey": SSHProxy.get_ssh_public_key()}) + else: + event.fail("Unit is not leader") + return + + def on_run_action(self, event): + """Run an arbitrary command on the remote host.""" + if self.model.unit.is_leader(): + cmd = event.params["command"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run(cmd) + event.set_results({"output": stdout}) + if len(stderr): + event.fail(stderr) + else: + event.fail("Unit is not leader") + return + + def on_verify_ssh_credentials_action(self, event): + """Verify the SSH credentials for this unit.""" + unit = self.model.unit + if unit.is_leader(): + proxy = self.get_ssh_proxy() + verified, stderr = proxy.verify_credentials() + if verified: + event.set_results({"verified": True}) + unit.status = ActiveStatus() + else: + event.set_results({"verified": False, "stderr": stderr}) + event.fail("Not verified") + unit.status = BlockedStatus("Invalid SSH credentials.") + + else: + event.fail("Unit is not leader") + return + + +class LeadershipError(ModelError): + def __init__(self): + super().__init__("not leader") + +class SSHProxy: + private_key_path = "/root/.ssh/id_sshproxy" + public_key_path = "/root/.ssh/id_sshproxy.pub" + key_type = "rsa" + key_bits = 4096 + + def __init__(self, hostname: str, username: str, password: str = ""): + self.hostname = hostname + self.username = username + self.password = password + + @staticmethod + def install(): + check_call("apt update && apt install -y openssh-client sshpass", shell=True) + + @staticmethod + def generate_ssh_key(): + """Generate a 4096-bit rsa keypair.""" + if not os.path.exists(SSHProxy.private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path, + ) + + try: + check_call(cmd, shell=True) + except CalledProcessError: + return False + + return True + + @staticmethod + def write_ssh_keys(public, private): + """Write a 4096-bit rsa keypair.""" + with open(SSHProxy.public_key_path, "w") as f: + f.write(public) + f.close() + with open(SSHProxy.private_key_path, "w") as f: + f.write(private) + f.close() + + @staticmethod + def get_ssh_public_key(): + publickey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.public_key_path, "r") as f: + publickey = f.read() + return publickey + + @staticmethod + def get_ssh_private_key(): + privatekey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.private_key_path, "r") as f: + privatekey = f.read() + return privatekey + + @staticmethod + def has_ssh_key(): + return True if os.path.exists(SSHProxy.private_key_path) else False + + def run(self, cmd: str) -> (str, str): + """Run a command remotely via SSH. + + Note: The previous behavior was to run the command locally if SSH wasn't + configured, but that can lead to cases where execution succeeds when you'd + expect it not to. + """ + if isinstance(cmd, str): + cmd = shlex.split(cmd) + + host = self._get_hostname() + user = self.username + passwd = self.password + key = self.private_key_path + + # Make sure we have everything we need to connect + if host and user: + return self.ssh(cmd) + + raise Exception("Invalid SSH credentials.") + + def scp(self, source_file, destination_file): + """Execute an scp command. Requires a fully qualified source and + destination. + + :param str source_file: Path to the source file + :param str destination_file: Path to the destination file + :raises: :class:`CalledProcessError` if the command fails + """ + cmd = [ + "sshpass", + "-p", + self.password, + "scp", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + "-B", + ] + destination = "{}@{}:{}".format(self.username, self.hostname, destination_file) + cmd.extend([source_file, destination]) + subprocess.run(cmd, check=True) + + def ssh(self, command): + """Run a command remotely via SSH. + + :param list(str) command: The command to execute + :return: tuple: The stdout and stderr of the command execution + :raises: :class:`CalledProcessError` if the command fails + """ + + destination = "{}@{}".format(self.username, self.hostname) + cmd = [ + "sshpass", + "-p", + self.password, + "ssh", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + destination, + ] + cmd.extend(command) + output = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return (output.stdout.decode("utf-8").strip(), output.stderr.decode("utf-8").strip()) + + def verify_credentials(self): + """Verify the SSH credentials. + + :return (bool, str): Verified, Stderr + """ + verified = False + try: + (stdout, stderr) = self.run("hostname") + verified = True + except CalledProcessError as e: + stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output)) + except (TimeoutError, socket.timeout): + stderr = "Timeout attempting to reach {}".format(self._get_hostname()) + except Exception as error: + tb = traceback.format_exc() + stderr = "Unhandled exception: {}".format(tb) + return verified, stderr + + ################### + # Private methods # + ################### + def _get_hostname(self): + """Get the hostname for the ssh target. + + HACK: This function was added to work around an issue where the + ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first + is the floating ip, and the second the non-floating ip, for an Openstack + instance. + """ + return self.hostname.split(";")[0] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.flake8 b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..61d908155588a7968dd25c90cff349377305a789 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 99 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.gitignore b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5596c502618d6428de0199c4cec069c151c0edd3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.gitignore @@ -0,0 +1,4 @@ +__pycache__ +/sandbox +.idea +docs/_build diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.readthedocs.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.readthedocs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6989fdb6a173e6f75343fd5d2953acb9c24b0a2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.readthedocs.yaml @@ -0,0 +1,6 @@ +version: 2 # required +formats: [] # i.e. no extra formats (for now) +python: + version: "3.5" + install: + - requirements: docs/requirements.txt diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.travis.yml b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..5aa83238027ae5dccb4a797e6c7ed456f1cfe5de --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/.travis.yml @@ -0,0 +1,24 @@ +dist: bionic + +language: python + +arch: + - amd64 + - arm64 + +python: + - "3.5" + - "3.6" + - "3.7" + - "3.8" + +matrix: + include: + - os: osx + language: generic + +install: + - pip3 install -r requirements-dev.txt + +script: + - ./run_tests diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/CODE_OF_CONDUCT.md b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..b5bc1b1f8f266e3c7e15e8b8db490f688a1e5230 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/CODE_OF_CONDUCT.md @@ -0,0 +1,78 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by contacting the project team at +charmcrafters@lists.launchpad.net. All complaints will be reviewed and +investigated and will result in a response that is deemed necessary +and appropriate to the circumstances. The project team is obligated to +maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/LICENSE.txt b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/README.md b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3b4e63f67906e9b9cbaf4fe2055e35005b1380a7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/README.md @@ -0,0 +1,138 @@ +# The Operator Framework + +The Operator Framework provides a simple, lightweight, and powerful way of +writing Juju charms, the best way to encapsulate operational experience in code. + +The framework will help you to: + +* model the integration of your services +* manage the lifecycle of your application +* create reusable and scalable components +* keep your code simple and readable + +## Getting Started + +Charms written using the operator framework are just Python code. The intention +is for it to feel very natural for somebody used to coding in Python, and +reasonably easy to pick up for somebody who might be a domain expert but not +necessarily a pythonista themselves. + +The dependencies of the operator framework are kept as minimal as possible; +currently that's Python 3.5 or greater, and `PyYAML` (both are included by +default in Ubuntu's cloud images from 16.04 on). + + +## A Quick Introduction + +Operator framework charms are just Python code. The entry point to your charm is +a particular Python file. It could be anything that makes sense to your project, +but let's assume this is `src/charm.py`. This file must be executable (and it +must have the appropriate shebang line). + +You need the usual `metadata.yaml` and (probably) `config.yaml` files, and a +`requirements.txt` for any Python dependencies. In other words, your project +might look like this: + +``` +my-charm +├── config.yaml +├── metadata.yaml +├── requirements.txt +└── src/ + └── charm.py +``` + +`src/charm.py` here is the entry point to your charm code. At a minimum, it +needs to define a subclass of `CharmBase` and pass that into the framework's +`main` function: + +```python +from ops.charm import CharmBase +from ops.main import main + +class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.start, self.on_start) + + def on_start(self, event): + # Handle the start event here. + +if __name__ == "__main__": + main(MyCharm) +``` + +That should be enough for you to be able to run + +``` +$ charmcraft build +Done, charm left in 'my-charm.charm' +$ juju deploy my-charm.charm +``` + +> 🛈 More information on [`charmcraft`](https://pypi.org/project/charmcraft/) can +> also be found on its [github page](https://github.com/canonical/charmcraft). + +Happy charming! + +## Testing your charms + +The operator framework provides a testing harness, so that you can test that +your charm does the right thing when presented with different scenarios, without +having to have a full deployment to do so. `pydoc3 ops.testing` has the details +for that, including this example: + +```python +harness = Harness(MyCharm) +# Do initial setup here +relation_id = harness.add_relation('db', 'postgresql') +# Now instantiate the charm to see events as the model changes +harness.begin() +harness.add_relation_unit(relation_id, 'postgresql/0') +harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) +# Check that charm has properly handled the relation_joined event for postgresql/0 +self.assertEqual(harness.charm. ...) +``` + +## Talk to us + +If you need help, have ideas, or would just like to chat with us, reach out on +IRC: we're in [#smooth-operator] on freenode (or try the [webchat]). + +We also pay attention to Juju's [discourse], but currently we don't actively +post there outside of our little corner of the [docs]; most discussion at this +stage is on IRC. + +[webchat]: https://webchat.freenode.net/#smooth-operator +[#smooth-operator]: irc://chat.freenode.net/%23smooth-operator +[discourse]: https://discourse.juju.is/c/charming +[docs]: https://discourse.juju.is/c/docs/operator-framework + +## Operator Framework development + +If you want to work in the framework *itself* you will need Python >= 3.5 and +the dependencies declared in `requirements-dev.txt` installed in your system. +Or you can use a virtualenv: + + virtualenv --python=python3 env + source env/bin/activate + pip install -r requirements-dev.txt + +Then you can try `./run_tests`, it should all go green. + +If you want to build the documentation you'll need the requirements from +`docs/requirements.txt`, or in your virtualenv + + pip install -r docs/requirements.txt + +and then you can run `./build_docs`. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/build_docs b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/build_docs new file mode 100755 index 0000000000000000000000000000000000000000..af8b892f7568bdda5ac892beaeafad5696e607d3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/build_docs @@ -0,0 +1,18 @@ +#!/bin/sh + +set -e + +flavour=html + +if [ "$1" ]; then + if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + flavour=help + else + flavour="$1" + fi + shift +fi + +cd docs + +sphinx-build -M "$flavour" . _build "$@" diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/conf.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..c8d3e3d57b11cbd9ae6bb144336728e433ae1d28 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/conf.py @@ -0,0 +1,97 @@ +# Configuration file for the Sphinx documentation builder. +# +# For a full list of options see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +# -- Path setup -------------------------------------------------------------- + +from pathlib import Path +import sys +sys.path.insert(0, str(Path(__file__).parent.parent)) + + +# -- Project information ----------------------------------------------------- + +project = 'The Operator Framework' +copyright = '2019-2020, Canonical Ltd.' +author = 'Canonical Ltd' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', + 'sphinx.ext.intersphinx', +] + +# The document name of the “master” document, that is, the document +# that contains the root toctree directive. +master_doc = 'index' + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' # 'alabaster' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for sphinx.ext.todo --------------------------------------------- + +# If this is True, todo and todolist produce output, else they +# produce nothing. The default is False. +todo_include_todos = False + + +# -- Options for sphinx.ext.autodoc ------------------------------------------ + +# This value controls how to represents typehints. The setting takes the +# following values: +# 'signature' – Show typehints as its signature (default) +# 'description' – Show typehints as content of function or method +# 'none' – Do not show typehints +autodoc_typehints = 'description' + +# This value selects what content will be inserted into the main body of an +# autoclass directive. The possible values are: +# 'class' - Only the class’ docstring is inserted. This is the +# default. You can still document __init__ as a separate method +# using automethod or the members option to autoclass. +# 'both' - Both the class’ and the __init__ method’s docstring are +# concatenated and inserted. +# 'init' - Only the __init__ method’s docstring is inserted. +autoclass_content = 'both' + +autodoc_default_options = { + 'members': None, # None here means "yes" + 'undoc-members': None, + 'show-inheritance': None, +} + + +# -- Options for sphinx.ext.intersphinx -------------------------------------- + +# This config value contains the locations and names of other projects +# that should be linked to in this documentation. +intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/index.rst b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..424d78d423e1e7dd5d4049c9f88ffaec994c1714 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/index.rst @@ -0,0 +1,58 @@ + +Welcome to The Operator Framework's documentation! +================================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + +ops package +=========== + +.. automodule:: ops + +Submodules +---------- + +ops.charm module +---------------- + +.. automodule:: ops.charm + +ops.framework module +-------------------- + +.. automodule:: ops.framework + +ops.jujuversion module +---------------------- + +.. automodule:: ops.jujuversion + +ops.log module +-------------- + +.. automodule:: ops.log + +ops.main module +--------------- + +.. automodule:: ops.main + +ops.model module +---------------- + +.. automodule:: ops.model + +ops.testing module +------------------ + +.. automodule:: ops.testing + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/requirements.txt b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..217e49dd6e684b07eaac18aec4c2f717e5f9f5f0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/docs/requirements.txt @@ -0,0 +1,5 @@ +-r ../requirements.txt + +sphinx==3.* +sphinx_rtd_theme + diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/charm.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..d898de859fc444814bc19a7f8f0caaaec6f7e5f4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/charm.py @@ -0,0 +1,575 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """A base class for events that trigger because of a Juju hook firing.""" + + +class ActionEvent(EventBase): + """A base class for events that trigger when a user asks for an Action to be run. + + To read the parameters for the action, see the instance variable `params`. + To respond with the result of the action, call `set_results`. To add progress + messages that are visible as the action is progressing use `log`. + + :ivar params: The parameters passed to the action (read by action-get) + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the user is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by Charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Represents the `install` hook from Juju.""" + + +class StartEvent(HookEvent): + """Represents the `start` hook from Juju.""" + + +class StopEvent(HookEvent): + """Represents the `stop` hook from Juju.""" + + +class RemoveEvent(HookEvent): + """Represents the `remove` hook from Juju. """ + + +class ConfigChangedEvent(HookEvent): + """Represents the `config-changed` hook from Juju.""" + + +class UpdateStatusEvent(HookEvent): + """Represents the `update-status` hook from Juju.""" + + +class UpgradeCharmEvent(HookEvent): + """Represents the `upgrade-charm` hook from Juju. + + This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju + has unpacked the upgraded charm code, and so this event will be handled with new code. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Represents the `pre-series-upgrade` hook from Juju. + + This happens when a user has run `juju upgrade-series MACHINE prepare` and + will fire for each unit that is running on the machine, telling them that + the user is preparing to upgrade the Machine's series (eg trusty->bionic). + The charm should take actions to prepare for the upgrade (a database charm + would want to write out a version-independent dump of the database, so that + when a new version of the database is available in a new series, it can be + used.) + Once all units on a machine have run `pre-series-upgrade`, the user will + initiate the steps to actually upgrade the machine (eg `do-release-upgrade`). + When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Represents the `post-series-upgrade` hook from Juju. + + This is run after the user has done a distribution upgrade (or rolled back + and kept the same series). It is called in response to + `juju upgrade-series MACHINE complete`. Charms are expected to do whatever + steps are necessary to reconfigure their applications for the new series. + """ + + +class LeaderElectedEvent(HookEvent): + """Represents the `leader-elected` hook from Juju. + + Juju will trigger this when a new lead unit is chosen for a given application. + This represents the leader of the charm information (not necessarily the primary + of a running application). The main utility is that charm authors can know + that only one unit will be a leader at any given time, so they can do + configuration, etc, that would otherwise require coordination between units. + (eg, selecting a password for a new relation) + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Represents the `leader-settings-changed` hook from Juju. + + Deprecated. This represents when a lead unit would call `leader-set` to inform + the other units of an application that they have new information to handle. + This has been deprecated in favor of using a Peer relation, and having the + leader set a value in the Application data bag for that peer relation. + (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Represents the `collect-metrics` hook from Juju. + + Note that events firing during a CollectMetricsEvent are currently + sandboxed in how they can interact with Juju. To report metrics + use :meth:`.add_metrics`. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Charmers should not be creating RelationEvents directly. The events will be + generated by the framework from Juju related events. Users can observe them + from the various `CharmBase.on[relation_name].relation_*` events. + + Attributes: + relation: The Relation involved in this event + app: The remote application that has triggered this event + unit: The remote unit that has triggered this event. This may be None + if the relation event was triggered as an Application level event + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by Charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by Charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Represents the `relation-created` hook from Juju. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Represents the `relation-joined` hook from Juju. + + This is triggered whenever a new unit of a related application joins the relation. + (eg, a unit was added to an existing related app, or a new relation was established + with an application that already had units.) + """ + + +class RelationChangedEvent(RelationEvent): + """Represents the `relation-changed` hook from Juju. + + This is triggered whenever there is a change to the data bucket for a related + application or unit. Look at `event.relation.data[event.unit/app]` to see the + new information. + """ + + +class RelationDepartedEvent(RelationEvent): + """Represents the `relation-departed` hook from Juju. + + This is the inverse of the RelationJoinedEvent, representing when a unit + is leaving the relation (the unit is being removed, the app is being removed, + the relation is being removed). It is fired once for each unit that is + going away. + """ + + +class RelationBrokenEvent(RelationEvent): + """Represents the `relation-broken` hook from Juju. + + If a relation is being removed (`juju remove-relation` or `juju remove-application`), + once all the units have been removed, RelationBrokenEvent will fire to signal + that the relationship has been fully terminated. + """ + + +class StorageEvent(HookEvent): + """Base class representing Storage related events.""" + + +class StorageAttachedEvent(StorageEvent): + """Represents the `storage-attached` hook from Juju. + + Called when new storage is available for the charm to use. + """ + + +class StorageDetachingEvent(StorageEvent): + """Represents the `storage-detaching` hook from Juju. + + Called when storage a charm has been using is going away. + """ + + +class CharmEvents(ObjectEvents): + """The events that are generated by Juju in response to the lifecycle of an application.""" + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the Charm overall. + + Usually this initialization is done by ops.main.main() rather than Charm authors + directly instantiating a Charm. + + Args: + framework: The framework responsible for managing the Model and events for this + Charm. + key: Ignored; will remove after deprecation period of the signature change. + """ + + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """CharmMeta of this charm. + """ + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the Charm as it is running. + """ + return self.framework.charm_dir + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from metadata.yaml and/or actions.yaml. Generally charms will + define this information, rather than reading it at runtime. This class is + mostly for the framework to understand what the charm has defined. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by Charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/framework.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/framework.py new file mode 100755 index 0000000000000000000000000000000000000000..b7c4749ff2b5bfb4f354bf1a8d4cd6ed64cf0da5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/framework.py @@ -0,0 +1,1067 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return ''.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(ObjectEvents): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint(); if Python < 3.7, this doesn't affect anything + sys.breakpointhook = self.breakpoint + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse once the env var, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/jujuversion.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..b2b8177dbe396f0d8c46b86e26af6b4e54ea046d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/jujuversion.py @@ -0,0 +1,98 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'''^ + (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there + ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - + (\.(?P\d{1,9}))?$ # and sometimes with a number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if not v: + raise RuntimeError('environ has no JUJU_VERSION') + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/lib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edb9fcacea6f0173aed9f07ca8a683cfead989cc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/lib/__init__.py @@ -0,0 +1,194 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import re + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType + + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except OSError: + continue + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except OSError: + continue + finder = get_importer(opslib) + if finder is None or not hasattr(finder, 'find_spec'): + continue + for lib_dir in lib_dirs: + spec = finder.find_spec(lib_dir) + if spec is None: + continue + if spec.loader is None: + # a namespace package; not supported + continue + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 + + +def _parse_lib(spec): + if spec.origin is None: + return None + + _expected = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_expected): + break + if n > _MAX_LIB_LINES: + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _expected: + value = literal_eval(value) + if not isinstance(value, _expected[key]): + return None + libinfo[key] = value + else: + if len(libinfo) != len(_expected): + return None + except Exception: + return None + + return _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {0.name} by {0.author}, API {0.api}, patch {0.patch}>".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/log.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/log.py new file mode 100644 index 0000000000000000000000000000000000000000..4aac5543aec4d84dc393e79b772a30284712d6d4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/log.py @@ -0,0 +1,51 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/main.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/main.py new file mode 100755 index 0000000000000000000000000000000000000000..6dc31c3575044796e8fe1f61b8415395689d6339 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/main.py @@ -0,0 +1,348 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import logging +import os +import subprocess +import sys +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. + # For Windows it is always an absolute path (any symlinks are resolved) + # while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy” hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(sys.argv[0]) + + if 'JUJU_DISPATCH_PATH' in os.environ and (charm_dir / 'dispatch').exists(): + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk""" + + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = self._charm_dir / self._dispatch_path + if not dispatch_path.exists(): + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == self._exec_path.resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning( + "Legacy %s exited with status %d.", + self._dispatch_path, e.returncode) + sys.exit(e.returncode) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """"Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def main(charm_class, use_juju_for_storage=False): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/model.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b96e89154ea9cec2b62a4fab4649412e115c304e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/model.py @@ -0,0 +1,1237 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + +import ops +from ops.jujuversion import JujuVersion + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + + Attributes: + unit: A :class:`Unit` that represents the unit that is running this code (eg yourself) + app: A :class:`Application` that represents the application this unit is a part of. + relations: Mapping of endpoint to list of :class:`Relation` answering the question + "what am I currently related to". See also :meth:`.get_relation` + config: A dict of the config for the current application. + resources: Access to resources for this charm. Use ``model.resources.fetch(resource_name)`` + to get the path on disk where the resource can be found. + storages: Mapping of storage_name to :class:`Storage` for the storage points defined in + metadata.yaml + pod: Used to get access to ``model.pod.set_spec`` to set the container specification + for Kubernetes charms. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(self._backend.unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworKInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info['bind-addresses']: + interface_name = interface_info['interface-name'] + for address_info in interface_info['addresses']: + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info['ingress-addresses']: + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info['egress-subnets']: + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + return self.interfaces[0].address + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + return self.ingress_addresses[0] + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + self.address = ipaddress.ip_address(address_info['value']) + cidr = address_info['cidr'] + if not cidr: + # The cidr field may be empty, see LP: #1864102. + # In this case, make it a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address_info['value']) + else: + self.subnet = ipaddress.ip_network(cidr) + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """"Represents a storage as defined in metadata.yaml + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/storage.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/storage.py new file mode 100755 index 0000000000000000000000000000000000000000..d4310ce1cfbb707c6278b70f84a9751da3ce07af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/storage.py @@ -0,0 +1,318 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer)""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: typing.Optional[str]) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """"Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + return + + def commit(self): + return + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str): + notice_list = self._load_notice_list() + for row in notice_list: + if row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal' + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + @staticmethod + def is_available() -> bool: + """Check if Juju state storage is available. + + This checks if there is a 'state-get' executable in PATH. + """ + p = shutil.which('state-get') + return p is not None + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf-8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + subprocess.run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = subprocess.run( + ["state-get", key], + stdout=subprocess.PIPE, + check=True, + ) + if p.stdout == b'' or p.stdout == b'\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + subprocess.run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/testing.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/testing.py new file mode 100755 index 0000000000000000000000000000000000000000..b4b3fe071216238007c9f3847ca9556be626bf6b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/testing.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pathlib +from textwrap import dedent +import tempfile +import typing +import yaml +import weakref + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None): + # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since + # it would define the default values of config that the charm would see. + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._lazy_resource_dir = None + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + @property + def _resource_dir(self) -> pathlib.Path: + if self._lazy_resource_dir is not None: + return self._lazy_resource_dir + + self.__resource_dir = tempfile.TemporaryDirectory() + self._lazy_resource_dir = pathlib.Path(self.__resource_dir.name) + self._finalizer = weakref.finalize(self, self.__resource_dir.cleanup) + return self._lazy_resource_dir + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling begin(), there is no Charm instance, so changes to the Model won't emit + events. You must call begin before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + resource_dir = self._resource_dir / resource_name + resource_dir.mkdir(exist_ok=True) + resource_file = resource_dir / "contents.yaml" + with resource_file.open('wt', encoding='utf8') as resource_yaml: + yaml.dump(contents, resource_yaml) + self._backend._resources_map[resource_name] = resource_file + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return rel_id + relation = self._model.get_relation(relation_name, rel_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + return rel_id + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event and a `relation_changed` event. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + self._config = {} + self._is_leader = False + self._resources_map = {} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + return self._resources_map[resource_name] + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/version.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/version.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5478555ee0fa948bfb0ad57cc79ba7cef3721 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/ops/version.py @@ -0,0 +1,50 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from pathlib import Path + +__all__ = ('version',) + +_FALLBACK = '0.8' # this gets bumped after release + + +def _get_version(): + version = _FALLBACK + ".dev0+unknown" + + p = Path(__file__).parent + if (p.parent / '.git').exists(): + try: + proc = subprocess.run( + ['git', 'describe', '--tags', '--dirty'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=p, + check=True) + except Exception: + pass + else: + version = proc.stdout.strip().decode('utf8') + if '-' in version: + # version will look like -<#commits>-g[-dirty] + # in terms of PEP 440, the tag we'll make sure is a 'public version identifier'; + # everything after the first - needs to be a 'local version' + public, local = version.split('-', 1) + version = public + '+' + local.replace('-', '.') + # version now +<#commits>.g[.dirty] + # which is PEP440-compliant (as long as is :-) + return version + + +version = _get_version() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/requirements-dev.txt b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..61bd425a6748e8b316bf81948e809b1eba148f16 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/requirements-dev.txt @@ -0,0 +1,5 @@ +-r requirements.txt + +autopep8 +flake8 +logassert diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/requirements.txt b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5500f007d0bf6c6098afc0f2c6d00915e345a569 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/requirements.txt @@ -0,0 +1 @@ +PyYAML diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/run_tests b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/run_tests new file mode 100755 index 0000000000000000000000000000000000000000..56411030fdcd8629ffbfbbebb8a8a0650203a934 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/run_tests @@ -0,0 +1,3 @@ +#!/bin/bash + +python3 -m unittest "$@" diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/setup.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb866815ae1227338a1e2dc67a217a338ae7bbc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/setup.py @@ -0,0 +1,75 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from importlib.util import spec_from_file_location, module_from_spec +from pathlib import Path +from setuptools import setup, find_packages + + +def _read_me() -> str: + with open("README.md", "rt", encoding="utf8") as fh: + readme = fh.read() + return readme + + +def _get_version() -> str: + """Get the version via ops/version.py, without loading ops/__init__.py""" + spec = spec_from_file_location('ops.version', 'ops/version.py') + module = module_from_spec(spec) + spec.loader.exec_module(module) + + return module.version + + +version = _get_version() +version_path = Path("ops/version.py") +version_backup = Path("ops/version.py~") +version_path.rename(version_backup) +try: + with version_path.open("wt", encoding="utf8") as fh: + fh.write('''\ +# this is a generated file + +version = {!r} +'''.format(version)) + + setup( + name="ops", + version=version, + description="The Python library behind great charms", + long_description=_read_me(), + long_description_content_type="text/markdown", + license="Apache-2.0", + url="https://github.com/canonical/operator", + author="The Charmcraft team at Canonical Ltd.", + author_email="charmcraft@lists.launchpad.net", + packages=find_packages(include=('ops', 'ops.*')), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Development Status :: 4 - Beta", + + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + # include Windows once we're running tests there also + # "Operating System :: Microsoft :: Windows", + ], + python_requires='>=3.5', + install_requires=["PyYAML"], + ) + +finally: + version_backup.rename(version_path) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/bin/relation-ids b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/bin/relation-ids new file mode 100755 index 0000000000000000000000000000000000000000..a7e0ead2d3182713bd826696fc403b5a8c54faa6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/bin/relation-ids @@ -0,0 +1,11 @@ +#!/bin/bash + +case $1 in + db) echo '["db:1"]' ;; + mon) echo '["mon:2"]' ;; + ha) echo '[]' ;; + db0) echo '[]' ;; + db1) echo '["db1:4"]' ;; + db2) echo '["db2:5", "db2:6"]' ;; + *) echo '[]' ;; +esac diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/bin/relation-list b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/bin/relation-list new file mode 100755 index 0000000000000000000000000000000000000000..88490159775624108766a17a35a77599ddea8f03 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/bin/relation-list @@ -0,0 +1,16 @@ +#!/bin/bash + +fail_not_found() { + 1>&2 echo "ERROR invalid value \"$1\" for option -r: relation not found" + exit 2 +} + +case $2 in + 1) echo '["remote/0"]' ;; + 2) echo '["remote/0"]' ;; + 3) fail_not_found $2 ;; + 4) echo '["remoteapp1/0"]' ;; + 5) echo '["remoteapp1/0"]' ;; + 6) echo '["remoteapp2/0"]' ;; + *) fail_not_found $2 ;; +esac diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/actions.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d16c94204f2cc0064965553e9b911d3d6075ba17 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/actions.yaml @@ -0,0 +1,29 @@ +foo-bar: + description: Foos the bar. + title: foo-bar + params: + foo-name: + type: string + description: A foo name to bar. + silent: + type: boolean + description: "" + default: false + required: + - foo-name +start: + description: Start the unit. +get-model-name: + description: Return the name of the model +get-status: + description: Return the Status of the unit +log-critical: + description: log a message at Critical level +log-error: + description: log a message at Error level +log-warning: + description: log a message at Warning level +log-info: + description: log a message at Info level +log-debug: + description: log a message at Debug level diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/config.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ffc0186002391ca52273d39bebcc9c4261c47535 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/config.yaml @@ -0,0 +1 @@ +"options": {} diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/charm.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..d898de859fc444814bc19a7f8f0caaaec6f7e5f4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/charm.py @@ -0,0 +1,575 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """A base class for events that trigger because of a Juju hook firing.""" + + +class ActionEvent(EventBase): + """A base class for events that trigger when a user asks for an Action to be run. + + To read the parameters for the action, see the instance variable `params`. + To respond with the result of the action, call `set_results`. To add progress + messages that are visible as the action is progressing use `log`. + + :ivar params: The parameters passed to the action (read by action-get) + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the user is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by Charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Represents the `install` hook from Juju.""" + + +class StartEvent(HookEvent): + """Represents the `start` hook from Juju.""" + + +class StopEvent(HookEvent): + """Represents the `stop` hook from Juju.""" + + +class RemoveEvent(HookEvent): + """Represents the `remove` hook from Juju. """ + + +class ConfigChangedEvent(HookEvent): + """Represents the `config-changed` hook from Juju.""" + + +class UpdateStatusEvent(HookEvent): + """Represents the `update-status` hook from Juju.""" + + +class UpgradeCharmEvent(HookEvent): + """Represents the `upgrade-charm` hook from Juju. + + This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju + has unpacked the upgraded charm code, and so this event will be handled with new code. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Represents the `pre-series-upgrade` hook from Juju. + + This happens when a user has run `juju upgrade-series MACHINE prepare` and + will fire for each unit that is running on the machine, telling them that + the user is preparing to upgrade the Machine's series (eg trusty->bionic). + The charm should take actions to prepare for the upgrade (a database charm + would want to write out a version-independent dump of the database, so that + when a new version of the database is available in a new series, it can be + used.) + Once all units on a machine have run `pre-series-upgrade`, the user will + initiate the steps to actually upgrade the machine (eg `do-release-upgrade`). + When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Represents the `post-series-upgrade` hook from Juju. + + This is run after the user has done a distribution upgrade (or rolled back + and kept the same series). It is called in response to + `juju upgrade-series MACHINE complete`. Charms are expected to do whatever + steps are necessary to reconfigure their applications for the new series. + """ + + +class LeaderElectedEvent(HookEvent): + """Represents the `leader-elected` hook from Juju. + + Juju will trigger this when a new lead unit is chosen for a given application. + This represents the leader of the charm information (not necessarily the primary + of a running application). The main utility is that charm authors can know + that only one unit will be a leader at any given time, so they can do + configuration, etc, that would otherwise require coordination between units. + (eg, selecting a password for a new relation) + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Represents the `leader-settings-changed` hook from Juju. + + Deprecated. This represents when a lead unit would call `leader-set` to inform + the other units of an application that they have new information to handle. + This has been deprecated in favor of using a Peer relation, and having the + leader set a value in the Application data bag for that peer relation. + (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Represents the `collect-metrics` hook from Juju. + + Note that events firing during a CollectMetricsEvent are currently + sandboxed in how they can interact with Juju. To report metrics + use :meth:`.add_metrics`. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Charmers should not be creating RelationEvents directly. The events will be + generated by the framework from Juju related events. Users can observe them + from the various `CharmBase.on[relation_name].relation_*` events. + + Attributes: + relation: The Relation involved in this event + app: The remote application that has triggered this event + unit: The remote unit that has triggered this event. This may be None + if the relation event was triggered as an Application level event + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by Charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by Charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Represents the `relation-created` hook from Juju. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Represents the `relation-joined` hook from Juju. + + This is triggered whenever a new unit of a related application joins the relation. + (eg, a unit was added to an existing related app, or a new relation was established + with an application that already had units.) + """ + + +class RelationChangedEvent(RelationEvent): + """Represents the `relation-changed` hook from Juju. + + This is triggered whenever there is a change to the data bucket for a related + application or unit. Look at `event.relation.data[event.unit/app]` to see the + new information. + """ + + +class RelationDepartedEvent(RelationEvent): + """Represents the `relation-departed` hook from Juju. + + This is the inverse of the RelationJoinedEvent, representing when a unit + is leaving the relation (the unit is being removed, the app is being removed, + the relation is being removed). It is fired once for each unit that is + going away. + """ + + +class RelationBrokenEvent(RelationEvent): + """Represents the `relation-broken` hook from Juju. + + If a relation is being removed (`juju remove-relation` or `juju remove-application`), + once all the units have been removed, RelationBrokenEvent will fire to signal + that the relationship has been fully terminated. + """ + + +class StorageEvent(HookEvent): + """Base class representing Storage related events.""" + + +class StorageAttachedEvent(StorageEvent): + """Represents the `storage-attached` hook from Juju. + + Called when new storage is available for the charm to use. + """ + + +class StorageDetachingEvent(StorageEvent): + """Represents the `storage-detaching` hook from Juju. + + Called when storage a charm has been using is going away. + """ + + +class CharmEvents(ObjectEvents): + """The events that are generated by Juju in response to the lifecycle of an application.""" + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the Charm overall. + + Usually this initialization is done by ops.main.main() rather than Charm authors + directly instantiating a Charm. + + Args: + framework: The framework responsible for managing the Model and events for this + Charm. + key: Ignored; will remove after deprecation period of the signature change. + """ + + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """CharmMeta of this charm. + """ + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the Charm as it is running. + """ + return self.framework.charm_dir + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from metadata.yaml and/or actions.yaml. Generally charms will + define this information, rather than reading it at runtime. This class is + mostly for the framework to understand what the charm has defined. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by Charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/framework.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/framework.py new file mode 100755 index 0000000000000000000000000000000000000000..b7c4749ff2b5bfb4f354bf1a8d4cd6ed64cf0da5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/framework.py @@ -0,0 +1,1067 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return ''.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(ObjectEvents): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint(); if Python < 3.7, this doesn't affect anything + sys.breakpointhook = self.breakpoint + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse once the env var, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/jujuversion.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..b2b8177dbe396f0d8c46b86e26af6b4e54ea046d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/jujuversion.py @@ -0,0 +1,98 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'''^ + (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there + ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - + (\.(?P\d{1,9}))?$ # and sometimes with a number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if not v: + raise RuntimeError('environ has no JUJU_VERSION') + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/lib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edb9fcacea6f0173aed9f07ca8a683cfead989cc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/lib/__init__.py @@ -0,0 +1,194 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import re + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType + + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except OSError: + continue + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except OSError: + continue + finder = get_importer(opslib) + if finder is None or not hasattr(finder, 'find_spec'): + continue + for lib_dir in lib_dirs: + spec = finder.find_spec(lib_dir) + if spec is None: + continue + if spec.loader is None: + # a namespace package; not supported + continue + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 + + +def _parse_lib(spec): + if spec.origin is None: + return None + + _expected = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_expected): + break + if n > _MAX_LIB_LINES: + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _expected: + value = literal_eval(value) + if not isinstance(value, _expected[key]): + return None + libinfo[key] = value + else: + if len(libinfo) != len(_expected): + return None + except Exception: + return None + + return _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {0.name} by {0.author}, API {0.api}, patch {0.patch}>".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/log.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/log.py new file mode 100644 index 0000000000000000000000000000000000000000..4aac5543aec4d84dc393e79b772a30284712d6d4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/log.py @@ -0,0 +1,51 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/main.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/main.py new file mode 100755 index 0000000000000000000000000000000000000000..6dc31c3575044796e8fe1f61b8415395689d6339 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/main.py @@ -0,0 +1,348 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import logging +import os +import subprocess +import sys +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. + # For Windows it is always an absolute path (any symlinks are resolved) + # while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy” hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(sys.argv[0]) + + if 'JUJU_DISPATCH_PATH' in os.environ and (charm_dir / 'dispatch').exists(): + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk""" + + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = self._charm_dir / self._dispatch_path + if not dispatch_path.exists(): + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == self._exec_path.resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning( + "Legacy %s exited with status %d.", + self._dispatch_path, e.returncode) + sys.exit(e.returncode) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """"Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def main(charm_class, use_juju_for_storage=False): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/model.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b96e89154ea9cec2b62a4fab4649412e115c304e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/model.py @@ -0,0 +1,1237 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + +import ops +from ops.jujuversion import JujuVersion + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + + Attributes: + unit: A :class:`Unit` that represents the unit that is running this code (eg yourself) + app: A :class:`Application` that represents the application this unit is a part of. + relations: Mapping of endpoint to list of :class:`Relation` answering the question + "what am I currently related to". See also :meth:`.get_relation` + config: A dict of the config for the current application. + resources: Access to resources for this charm. Use ``model.resources.fetch(resource_name)`` + to get the path on disk where the resource can be found. + storages: Mapping of storage_name to :class:`Storage` for the storage points defined in + metadata.yaml + pod: Used to get access to ``model.pod.set_spec`` to set the container specification + for Kubernetes charms. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(self._backend.unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworKInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info['bind-addresses']: + interface_name = interface_info['interface-name'] + for address_info in interface_info['addresses']: + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info['ingress-addresses']: + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info['egress-subnets']: + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + return self.interfaces[0].address + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + return self.ingress_addresses[0] + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + self.address = ipaddress.ip_address(address_info['value']) + cidr = address_info['cidr'] + if not cidr: + # The cidr field may be empty, see LP: #1864102. + # In this case, make it a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address_info['value']) + else: + self.subnet = ipaddress.ip_network(cidr) + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """"Represents a storage as defined in metadata.yaml + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/storage.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/storage.py new file mode 100755 index 0000000000000000000000000000000000000000..d4310ce1cfbb707c6278b70f84a9751da3ce07af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/storage.py @@ -0,0 +1,318 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer)""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: typing.Optional[str]) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """"Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + return + + def commit(self): + return + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str): + notice_list = self._load_notice_list() + for row in notice_list: + if row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal' + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + @staticmethod + def is_available() -> bool: + """Check if Juju state storage is available. + + This checks if there is a 'state-get' executable in PATH. + """ + p = shutil.which('state-get') + return p is not None + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf-8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + subprocess.run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = subprocess.run( + ["state-get", key], + stdout=subprocess.PIPE, + check=True, + ) + if p.stdout == b'' or p.stdout == b'\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + subprocess.run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/testing.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/testing.py new file mode 100755 index 0000000000000000000000000000000000000000..b4b3fe071216238007c9f3847ca9556be626bf6b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/testing.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pathlib +from textwrap import dedent +import tempfile +import typing +import yaml +import weakref + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None): + # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since + # it would define the default values of config that the charm would see. + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._lazy_resource_dir = None + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + @property + def _resource_dir(self) -> pathlib.Path: + if self._lazy_resource_dir is not None: + return self._lazy_resource_dir + + self.__resource_dir = tempfile.TemporaryDirectory() + self._lazy_resource_dir = pathlib.Path(self.__resource_dir.name) + self._finalizer = weakref.finalize(self, self.__resource_dir.cleanup) + return self._lazy_resource_dir + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling begin(), there is no Charm instance, so changes to the Model won't emit + events. You must call begin before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + resource_dir = self._resource_dir / resource_name + resource_dir.mkdir(exist_ok=True) + resource_file = resource_dir / "contents.yaml" + with resource_file.open('wt', encoding='utf8') as resource_yaml: + yaml.dump(contents, resource_yaml) + self._backend._resources_map[resource_name] = resource_file + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return rel_id + relation = self._model.get_relation(relation_name, rel_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + return rel_id + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event and a `relation_changed` event. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + self._config = {} + self._is_leader = False + self._resources_map = {} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + return self._resources_map[resource_name] + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/version.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/version.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5478555ee0fa948bfb0ad57cc79ba7cef3721 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/lib/ops/version.py @@ -0,0 +1,50 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from pathlib import Path + +__all__ = ('version',) + +_FALLBACK = '0.8' # this gets bumped after release + + +def _get_version(): + version = _FALLBACK + ".dev0+unknown" + + p = Path(__file__).parent + if (p.parent / '.git').exists(): + try: + proc = subprocess.run( + ['git', 'describe', '--tags', '--dirty'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=p, + check=True) + except Exception: + pass + else: + version = proc.stdout.strip().decode('utf8') + if '-' in version: + # version will look like -<#commits>-g[-dirty] + # in terms of PEP 440, the tag we'll make sure is a 'public version identifier'; + # everything after the first - needs to be a 'local version' + public, local = version.split('-', 1) + version = public + '+' + local.replace('-', '.') + # version now +<#commits>.g[.dirty] + # which is PEP440-compliant (as long as is :-) + return version + + +version = _get_version() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/metadata.yaml b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b3aed87e96121224c63916b04009daf40fcab35 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/metadata.yaml @@ -0,0 +1,26 @@ +name: main +summary: A charm used for testing the basic operation of the entrypoint code. +maintainer: Dmitrii Shcherbakov +description: A charm used for testing the basic operation of the entrypoint code. +tags: + - misc +series: + - bionic + - cosmic + - disco +min-juju-version: 2.7.1 +provides: + db: + interface: db +requires: + mon: + interface: monitoring +peers: + ha: + interface: cluster +subordinate: false +storage: + disks: + type: block + multiple: + range: 0- diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/src/charm.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..154b2376b6ce466a94c0d6745b8b48e48dea00dc --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/charms/test_main/src/charm.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import base64 +import pickle +import sys +import logging + +sys.path.append('lib') + +from ops.charm import CharmBase # noqa: E402 (module-level import after non-import code) +from ops.main import main # noqa: E402 (ditto) + +logger = logging.getLogger() + + +class Charm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + # This environment variable controls the test charm behavior. + charm_config = os.environ.get('CHARM_CONFIG') + if charm_config is not None: + self._charm_config = pickle.loads(base64.b64decode(charm_config)) + else: + self._charm_config = {} + + # TODO: refactor to use StoredState + # (this implies refactoring most of test_main.py) + self._state_file = self._charm_config.get('STATE_FILE') + try: + with open(str(self._state_file), 'rb') as f: + self._state = pickle.load(f) + except (FileNotFoundError, EOFError): + self._state = { + 'on_install': [], + 'on_start': [], + 'on_config_changed': [], + 'on_update_status': [], + 'on_leader_settings_changed': [], + 'on_db_relation_joined': [], + 'on_mon_relation_changed': [], + 'on_mon_relation_departed': [], + 'on_ha_relation_broken': [], + 'on_foo_bar_action': [], + 'on_start_action': [], + '_on_get_model_name_action': [], + 'on_collect_metrics': [], + + 'on_log_critical_action': [], + 'on_log_error_action': [], + 'on_log_warning_action': [], + 'on_log_info_action': [], + 'on_log_debug_action': [], + + # Observed event types per invocation. A list is used to preserve the + # order in which charm handlers have observed the events. + 'observed_event_types': [], + } + + self.framework.observe(self.on.install, self._on_install) + self.framework.observe(self.on.start, self._on_start) + self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.update_status, self._on_update_status) + self.framework.observe(self.on.leader_settings_changed, self._on_leader_settings_changed) + # Test relation events with endpoints from different + # sections (provides, requires, peers) as well. + self.framework.observe(self.on.db_relation_joined, self._on_db_relation_joined) + self.framework.observe(self.on.mon_relation_changed, self._on_mon_relation_changed) + self.framework.observe(self.on.mon_relation_departed, self._on_mon_relation_departed) + self.framework.observe(self.on.ha_relation_broken, self._on_ha_relation_broken) + + if self._charm_config.get('USE_ACTIONS'): + self.framework.observe(self.on.start_action, self._on_start_action) + self.framework.observe(self.on.foo_bar_action, self._on_foo_bar_action) + self.framework.observe(self.on.get_model_name_action, self._on_get_model_name_action) + self.framework.observe(self.on.get_status_action, self._on_get_status_action) + + self.framework.observe(self.on.collect_metrics, self._on_collect_metrics) + + if self._charm_config.get('USE_LOG_ACTIONS'): + self.framework.observe(self.on.log_critical_action, self._on_log_critical_action) + self.framework.observe(self.on.log_error_action, self._on_log_error_action) + self.framework.observe(self.on.log_warning_action, self._on_log_warning_action) + self.framework.observe(self.on.log_info_action, self._on_log_info_action) + self.framework.observe(self.on.log_debug_action, self._on_log_debug_action) + + if self._charm_config.get('TRY_EXCEPTHOOK'): + raise RuntimeError("failing as requested") + + def _write_state(self): + """Write state variables so that the parent process can read them. + + Each invocation will override the previous state which is intentional. + """ + if self._state_file is not None: + with self._state_file.open('wb') as f: + pickle.dump(self._state, f) + + def _on_install(self, event): + self._state['on_install'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_start(self, event): + self._state['on_start'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_config_changed(self, event): + self._state['on_config_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + event.defer() + self._write_state() + + def _on_update_status(self, event): + self._state['on_update_status'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_leader_settings_changed(self, event): + self._state['on_leader_settings_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_db_relation_joined(self, event): + assert event.app is not None, 'application name cannot be None for a relation-joined event' + self._state['on_db_relation_joined'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['db_relation_joined_data'] = event.snapshot() + self._write_state() + + def _on_mon_relation_changed(self, event): + assert event.app is not None, ( + 'application name cannot be None for a relation-changed event') + if os.environ.get('JUJU_REMOTE_UNIT'): + assert event.unit is not None, ( + 'a unit name cannot be None for a relation-changed event' + ' associated with a remote unit') + self._state['on_mon_relation_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['mon_relation_changed_data'] = event.snapshot() + self._write_state() + + def _on_mon_relation_departed(self, event): + assert event.app is not None, ( + 'application name cannot be None for a relation-departed event') + self._state['on_mon_relation_departed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['mon_relation_departed_data'] = event.snapshot() + self._write_state() + + def _on_ha_relation_broken(self, event): + assert event.app is None, ( + 'relation-broken events cannot have a reference to a remote application') + assert event.unit is None, ( + 'relation broken events cannot have a reference to a remote unit') + self._state['on_ha_relation_broken'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['ha_relation_broken_data'] = event.snapshot() + self._write_state() + + def _on_start_action(self, event): + assert event.handle.kind == 'start_action', ( + 'event action name cannot be different from the one being handled') + self._state['on_start_action'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_foo_bar_action(self, event): + assert event.handle.kind == 'foo_bar_action', ( + 'event action name cannot be different from the one being handled') + self._state['on_foo_bar_action'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_get_status_action(self, event): + self._state['status_name'] = self.unit.status.name + self._state['status_message'] = self.unit.status.message + self._write_state() + + def _on_collect_metrics(self, event): + self._state['on_collect_metrics'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + event.add_metrics({'foo': 42}, {'bar': 4.2}) + self._write_state() + + def _on_log_critical_action(self, event): + logger.critical('super critical') + + def _on_log_error_action(self, event): + logger.error('grave error') + + def _on_log_warning_action(self, event): + logger.warning('wise warning') + + def _on_log_info_action(self, event): + logger.info('useful info') + + def _on_log_debug_action(self, event): + logger.debug('insightful debug') + + def _on_get_model_name_action(self, event): + self._state['_on_get_model_name_action'].append(self.model.name) + self._write_state() + + +if __name__ == '__main__': + main(Charm) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_charm.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_charm.py new file mode 100755 index 0000000000000000000000000000000000000000..f698aec3b71a837134ecd6d2d83164763e9a1957 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_charm.py @@ -0,0 +1,322 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +import tempfile +import shutil + +from pathlib import Path + +from ops.charm import ( + CharmBase, + CharmMeta, + CharmEvents, +) +from ops.framework import Framework, EventSource, EventBase +from ops.model import Model, _ModelBackend +from ops.storage import SQLiteStorage + +from .test_helpers import fake_script, fake_script_calls + + +class TestCharm(unittest.TestCase): + + def setUp(self): + def restore_env(env): + os.environ.clear() + os.environ.update(env) + self.addCleanup(restore_env, os.environ.copy()) + + os.environ['PATH'] = "{}:{}".format(Path(__file__).parent / 'bin', os.environ['PATH']) + os.environ['JUJU_UNIT_NAME'] = 'local/0' + + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(self.tmpdir)) + self.meta = CharmMeta() + + class CustomEvent(EventBase): + pass + + class TestCharmEvents(CharmEvents): + custom = EventSource(CustomEvent) + + # Relations events are defined dynamically and modify the class attributes. + # We use a subclass temporarily to prevent these side effects from leaking. + CharmBase.on = TestCharmEvents() + + def cleanup(): + CharmBase.on = CharmEvents() + self.addCleanup(cleanup) + + def create_framework(self): + model = Model(self.meta, _ModelBackend('local/0')) + framework = Framework(SQLiteStorage(':memory:'), self.tmpdir, self.meta, model) + self.addCleanup(framework.close) + return framework + + def test_basic(self): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + self.started = False + framework.observe(self.on.start, self._on_start) + + def _on_start(self, event): + self.started = True + + events = list(MyCharm.on.events()) + self.assertIn('install', events) + self.assertIn('custom', events) + + framework = self.create_framework() + charm = MyCharm(framework) + charm.on.start.emit() + + self.assertEqual(charm.started, True) + + with self.assertRaisesRegex(TypeError, "observer methods must now be explicitly provided"): + framework.observe(charm.on.start, charm) + + def test_helper_properties(self): + framework = self.create_framework() + + class MyCharm(CharmBase): + pass + + charm = MyCharm(framework) + self.assertEqual(charm.app, framework.model.app) + self.assertEqual(charm.unit, framework.model.unit) + self.assertEqual(charm.meta, framework.meta) + self.assertEqual(charm.charm_dir, framework.charm_dir) + + def test_relation_events(self): + + class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.seen = [] + for rel in ('req1', 'req-2', 'pro1', 'pro-2', 'peer1', 'peer-2'): + # Hook up relation events to generic handler. + self.framework.observe(self.on[rel].relation_joined, self.on_any_relation) + self.framework.observe(self.on[rel].relation_changed, self.on_any_relation) + self.framework.observe(self.on[rel].relation_departed, self.on_any_relation) + self.framework.observe(self.on[rel].relation_broken, self.on_any_relation) + + def on_any_relation(self, event): + assert event.relation.name == 'req1' + assert event.relation.app.name == 'remote' + self.seen.append(type(event).__name__) + + # language=YAML + self.meta = CharmMeta.from_yaml(metadata=''' +name: my-charm +requires: + req1: + interface: req1 + req-2: + interface: req2 +provides: + pro1: + interface: pro1 + pro-2: + interface: pro2 +peers: + peer1: + interface: peer1 + peer-2: + interface: peer2 +''') + + charm = MyCharm(self.create_framework()) + + rel = charm.framework.model.get_relation('req1', 1) + unit = charm.framework.model.get_unit('remote/0') + charm.on['req1'].relation_joined.emit(rel, unit) + charm.on['req1'].relation_changed.emit(rel, unit) + charm.on['req-2'].relation_changed.emit(rel, unit) + charm.on['pro1'].relation_departed.emit(rel, unit) + charm.on['pro-2'].relation_departed.emit(rel, unit) + charm.on['peer1'].relation_broken.emit(rel) + charm.on['peer-2'].relation_broken.emit(rel) + + self.assertEqual(charm.seen, [ + 'RelationJoinedEvent', + 'RelationChangedEvent', + 'RelationChangedEvent', + 'RelationDepartedEvent', + 'RelationDepartedEvent', + 'RelationBrokenEvent', + 'RelationBrokenEvent', + ]) + + def test_storage_events(self): + + class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.seen = [] + self.framework.observe(self.on['stor1'].storage_attached, self._on_stor1_attach) + self.framework.observe(self.on['stor2'].storage_detaching, self._on_stor2_detach) + self.framework.observe(self.on['stor3'].storage_attached, self._on_stor3_attach) + self.framework.observe(self.on['stor-4'].storage_attached, self._on_stor4_attach) + + def _on_stor1_attach(self, event): + self.seen.append(type(event).__name__) + + def _on_stor2_detach(self, event): + self.seen.append(type(event).__name__) + + def _on_stor3_attach(self, event): + self.seen.append(type(event).__name__) + + def _on_stor4_attach(self, event): + self.seen.append(type(event).__name__) + + # language=YAML + self.meta = CharmMeta.from_yaml(''' +name: my-charm +storage: + stor-4: + multiple: + range: 2-4 + type: filesystem + stor1: + type: filesystem + stor2: + multiple: + range: "2" + type: filesystem + stor3: + multiple: + range: 2- + type: filesystem +''') + + self.assertIsNone(self.meta.storages['stor1'].multiple_range) + self.assertEqual(self.meta.storages['stor2'].multiple_range, (2, 2)) + self.assertEqual(self.meta.storages['stor3'].multiple_range, (2, None)) + self.assertEqual(self.meta.storages['stor-4'].multiple_range, (2, 4)) + + charm = MyCharm(self.create_framework()) + + charm.on['stor1'].storage_attached.emit() + charm.on['stor2'].storage_detaching.emit() + charm.on['stor3'].storage_attached.emit() + charm.on['stor-4'].storage_attached.emit() + + self.assertEqual(charm.seen, [ + 'StorageAttachedEvent', + 'StorageDetachingEvent', + 'StorageAttachedEvent', + 'StorageAttachedEvent', + ]) + + @classmethod + def _get_action_test_meta(cls): + # language=YAML + return CharmMeta.from_yaml(metadata=''' +name: my-charm +''', actions=''' +foo-bar: + description: "Foos the bar." + params: + foo-name: + description: "A foo name to bar" + type: string + silent: + default: false + description: "" + type: boolean + required: foo-bar + title: foo-bar +start: + description: "Start the unit." +''') + + def _test_action_events(self, cmd_type): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + framework.observe(self.on.foo_bar_action, self._on_foo_bar_action) + framework.observe(self.on.start_action, self._on_start_action) + + def _on_foo_bar_action(self, event): + self.seen_action_params = event.params + event.log('test-log') + event.set_results({'res': 'val with spaces'}) + event.fail('test-fail') + + def _on_start_action(self, event): + pass + + fake_script(self, cmd_type + '-get', """echo '{"foo-name": "name", "silent": true}'""") + fake_script(self, cmd_type + '-set', "") + fake_script(self, cmd_type + '-log', "") + fake_script(self, cmd_type + '-fail', "") + self.meta = self._get_action_test_meta() + + os.environ['JUJU_{}_NAME'.format(cmd_type.upper())] = 'foo-bar' + framework = self.create_framework() + charm = MyCharm(framework) + + events = list(MyCharm.on.events()) + self.assertIn('foo_bar_action', events) + self.assertIn('start_action', events) + + charm.on.foo_bar_action.emit() + self.assertEqual(charm.seen_action_params, {"foo-name": "name", "silent": True}) + self.assertEqual(fake_script_calls(self), [ + [cmd_type + '-get', '--format=json'], + [cmd_type + '-log', "test-log"], + [cmd_type + '-set', "res=val with spaces"], + [cmd_type + '-fail', "test-fail"], + ]) + + # Make sure that action events that do not match the current context are + # not possible to emit by hand. + with self.assertRaises(RuntimeError): + charm.on.start_action.emit() + + def test_action_events(self): + self._test_action_events('action') + + def _test_action_event_defer_fails(self, cmd_type): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + framework.observe(self.on.start_action, self._on_start_action) + + def _on_start_action(self, event): + event.defer() + + fake_script(self, cmd_type + '-get', """echo '{"foo-name": "name", "silent": true}'""") + self.meta = self._get_action_test_meta() + + os.environ['JUJU_{}_NAME'.format(cmd_type.upper())] = 'start' + framework = self.create_framework() + charm = MyCharm(framework) + + with self.assertRaises(RuntimeError): + charm.on.start_action.emit() + + def test_action_event_defer_fails(self): + self._test_action_event_defer_fails('action') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_framework.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_framework.py new file mode 100755 index 0000000000000000000000000000000000000000..2c4a7bf0de34396572d73918518f9617b4f7dc55 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_framework.py @@ -0,0 +1,1798 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import gc +import inspect +import io +import os +import re +import shutil +import sys +import tempfile +from unittest.mock import patch +from pathlib import Path + +import logassert + +from ops import charm +from ops.framework import ( + _BREAKPOINT_WELCOME_MESSAGE, + BoundStoredState, + CommitEvent, + EventBase, + _event_regex, + ObjectEvents, + EventSource, + Framework, + Handle, + Object, + PreCommitEvent, + StoredList, + StoredState, + StoredStateData, +) +from ops.storage import NoSnapshotError, SQLiteStorage +from test.test_helpers import fake_script, BaseTestCase + + +class TestFramework(BaseTestCase): + + def setUp(self): + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(self.tmpdir)) + + patcher = patch('ops.storage.SQLiteStorage.DB_LOCK_TIMEOUT', datetime.timedelta(0)) + patcher.start() + self.addCleanup(patcher.stop) + logassert.setup(self, 'ops') + + def test_deprecated_init(self): + # For 0.7, this still works, but it is deprecated. + framework = Framework(':memory:', None, None, None) + self.assertLoggedWarning( + "deprecated: Framework now takes a Storage not a path") + self.assertIsInstance(framework._storage, SQLiteStorage) + + def test_handle_path(self): + cases = [ + (Handle(None, "root", None), "root"), + (Handle(None, "root", "1"), "root[1]"), + (Handle(Handle(None, "root", None), "child", None), "root/child"), + (Handle(Handle(None, "root", "1"), "child", "2"), "root[1]/child[2]"), + ] + for handle, path in cases: + self.assertEqual(str(handle), path) + self.assertEqual(Handle.from_path(path), handle) + + def test_handle_attrs_readonly(self): + handle = Handle(None, 'kind', 'key') + with self.assertRaises(AttributeError): + handle.parent = 'foo' + with self.assertRaises(AttributeError): + handle.kind = 'foo' + with self.assertRaises(AttributeError): + handle.key = 'foo' + with self.assertRaises(AttributeError): + handle.path = 'foo' + + def test_restore_unknown(self): + framework = self.create_framework() + + class Foo(Object): + pass + + handle = Handle(None, "a_foo", "some_key") + + framework.register_type(Foo, None, handle.kind) + + try: + framework.load_snapshot(handle) + except NoSnapshotError as e: + self.assertEqual(e.handle_path, str(handle)) + self.assertEqual(str(e), "no snapshot data found for a_foo[some_key] object") + else: + self.fail("exception NoSnapshotError not raised") + + def test_snapshot_roundtrip(self): + class Foo: + def __init__(self, handle, n): + self.handle = handle + self.my_n = n + + def snapshot(self): + return {"My N!": self.my_n} + + def restore(self, snapshot): + self.my_n = snapshot["My N!"] + 1 + + handle = Handle(None, "a_foo", "some_key") + event = Foo(handle, 1) + + framework1 = self.create_framework(tmpdir=self.tmpdir) + framework1.register_type(Foo, None, handle.kind) + framework1.save_snapshot(event) + framework1.commit() + framework1.close() + + framework2 = self.create_framework(tmpdir=self.tmpdir) + framework2.register_type(Foo, None, handle.kind) + event2 = framework2.load_snapshot(handle) + self.assertEqual(event2.my_n, 2) + + framework2.save_snapshot(event2) + del event2 + gc.collect() + event3 = framework2.load_snapshot(handle) + self.assertEqual(event3.my_n, 3) + + framework2.drop_snapshot(event.handle) + framework2.commit() + framework2.close() + + framework3 = self.create_framework(tmpdir=self.tmpdir) + framework3.register_type(Foo, None, handle.kind) + + self.assertRaises(NoSnapshotError, framework3.load_snapshot, handle) + + def test_simple_event_observer(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + baz = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_any(self, event): + self.seen.append("on_any:" + event.handle.kind) + + def on_foo(self, event): + self.seen.append("on_foo:" + event.handle.kind) + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs.on_any) + framework.observe(pub.bar, obs.on_any) + + with self.assertRaisesRegex(RuntimeError, "^Framework.observe requires a method"): + framework.observe(pub.baz, obs) + + pub.foo.emit() + pub.bar.emit() + + self.assertEqual(obs.seen, ["on_any:foo", "on_any:bar"]) + + def test_bad_sig_observer(self): + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + baz = EventSource(MyEvent) + qux = EventSource(MyEvent) + + class MyObserver(Object): + def _on_foo(self): + assert False, 'should not be reached' + + def _on_bar(self, event, extra): + assert False, 'should not be reached' + + def _on_baz(self, event, extra=None, *, k): + assert False, 'should not be reached' + + def _on_qux(self, event, extra=None): + assert False, 'should not be reached' + + framework = self.create_framework() + pub = MyNotifier(framework, "pub") + obs = MyObserver(framework, "obs") + + with self.assertRaisesRegex(TypeError, "must accept event parameter"): + framework.observe(pub.foo, obs._on_foo) + with self.assertRaisesRegex(TypeError, "has extra required parameter"): + framework.observe(pub.bar, obs._on_bar) + with self.assertRaisesRegex(TypeError, "has extra required parameter"): + framework.observe(pub.baz, obs._on_baz) + framework.observe(pub.qux, obs._on_qux) + + def test_on_pre_commit_emitted(self): + framework = self.create_framework(tmpdir=self.tmpdir) + + class PreCommitObserver(Object): + + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + self._stored.myinitdata = 40 + + def on_pre_commit(self, event): + self._stored.myinitdata = 41 + self._stored.mydata = 42 + self.seen.append(type(event)) + + def on_commit(self, event): + # Modifications made here will not be persisted. + self._stored.myinitdata = 42 + self._stored.mydata = 43 + self._stored.myotherdata = 43 + self.seen.append(type(event)) + + obs = PreCommitObserver(framework, None) + + framework.observe(framework.on.pre_commit, obs.on_pre_commit) + + framework.commit() + + self.assertEqual(obs._stored.myinitdata, 41) + self.assertEqual(obs._stored.mydata, 42) + self.assertTrue(obs.seen, [PreCommitEvent, CommitEvent]) + framework.close() + + other_framework = self.create_framework(tmpdir=self.tmpdir) + + new_obs = PreCommitObserver(other_framework, None) + + self.assertEqual(obs._stored.myinitdata, 41) + self.assertEqual(new_obs._stored.mydata, 42) + + with self.assertRaises(AttributeError): + new_obs._stored.myotherdata + + def test_defer_and_reemit(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier1(Object): + a = EventSource(MyEvent) + b = EventSource(MyEvent) + + class MyNotifier2(Object): + c = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + self.done = {} + + def on_any(self, event): + self.seen.append(event.handle.kind) + if not self.done.get(event.handle.kind): + event.defer() + + pub1 = MyNotifier1(framework, "1") + pub2 = MyNotifier2(framework, "1") + obs1 = MyObserver(framework, "1") + obs2 = MyObserver(framework, "2") + + framework.observe(pub1.a, obs1.on_any) + framework.observe(pub1.b, obs1.on_any) + framework.observe(pub1.a, obs2.on_any) + framework.observe(pub1.b, obs2.on_any) + framework.observe(pub2.c, obs2.on_any) + + pub1.a.emit() + pub1.b.emit() + pub2.c.emit() + + # Events remain stored because they were deferred. + ev_a_handle = Handle(pub1, "a", "1") + framework.load_snapshot(ev_a_handle) + ev_b_handle = Handle(pub1, "b", "2") + framework.load_snapshot(ev_b_handle) + ev_c_handle = Handle(pub2, "c", "3") + framework.load_snapshot(ev_c_handle) + # make sure the objects are gone before we reemit them + gc.collect() + + framework.reemit() + obs1.done["a"] = True + obs2.done["b"] = True + framework.reemit() + framework.reemit() + obs1.done["b"] = True + obs2.done["a"] = True + framework.reemit() + obs2.done["c"] = True + framework.reemit() + framework.reemit() + framework.reemit() + + self.assertEqual(" ".join(obs1.seen), "a b a b a b b b") + self.assertEqual(" ".join(obs2.seen), "a b c a b c a b c a c a c c") + + # Now the event objects must all be gone from storage. + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_a_handle) + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_b_handle) + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_c_handle) + + def test_custom_event_data(self): + framework = self.create_framework() + + class MyEvent(EventBase): + def __init__(self, handle, n): + super().__init__(handle) + self.my_n = n + + def snapshot(self): + return {"My N!": self.my_n} + + def restore(self, snapshot): + super().restore(snapshot) + self.my_n = snapshot["My N!"] + 1 + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}={}".format(event.handle.kind, event.my_n)) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs._on_foo) + + pub.foo.emit(1) + + framework.reemit() + + # Two things being checked here: + # + # 1. There's a restore roundtrip before the event is first observed. + # That means the data is safe before it's ever seen, and the + # roundtrip logic is tested under normal circumstances. + # + # 2. The renotification restores from the pristine event, not + # from the one modified during the first restore (otherwise + # we'd get a foo=3). + # + self.assertEqual(obs.seen, ["on_foo:foo=2", "on_foo:foo=2"]) + + def test_weak_observer(self): + framework = self.create_framework() + + observed_events = [] + + class MyEvent(EventBase): + pass + + class MyEvents(ObjectEvents): + foo = EventSource(MyEvent) + + class MyNotifier(Object): + on = MyEvents() + + class MyObserver(Object): + def _on_foo(self, event): + observed_events.append("foo") + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "2") + + framework.observe(pub.on.foo, obs._on_foo) + pub.on.foo.emit() + self.assertEqual(observed_events, ["foo"]) + # Now delete the observer, and note that when we emit the event, it + # doesn't update the local slice again + del obs + gc.collect() + pub.on.foo.emit() + self.assertEqual(observed_events, ["foo"]) + + def test_forget_and_multiple_objects(self): + framework = self.create_framework() + + class MyObject(Object): + pass + + o1 = MyObject(framework, "path") + # Creating a second object at the same path should fail with RuntimeError + with self.assertRaises(RuntimeError): + o2 = MyObject(framework, "path") + # Unless we _forget the object first + framework._forget(o1) + o2 = MyObject(framework, "path") + self.assertEqual(o1.handle.path, o2.handle.path) + # Deleting the tracked object should also work + del o2 + gc.collect() + o3 = MyObject(framework, "path") + self.assertEqual(o1.handle.path, o3.handle.path) + framework.close() + # Or using a second framework + framework_copy = self.create_framework() + o_copy = MyObject(framework_copy, "path") + self.assertEqual(o1.handle.path, o_copy.handle.path) + + def test_forget_and_multiple_objects_with_load_snapshot(self): + framework = self.create_framework(tmpdir=self.tmpdir) + + class MyObject(Object): + def __init__(self, parent, name): + super().__init__(parent, name) + self.value = name + + def snapshot(self): + return self.value + + def restore(self, value): + self.value = value + + framework.register_type(MyObject, None, MyObject.handle_kind) + o1 = MyObject(framework, "path") + framework.save_snapshot(o1) + framework.commit() + o_handle = o1.handle + del o1 + gc.collect() + o2 = framework.load_snapshot(o_handle) + # Trying to load_snapshot a second object at the same path should fail with RuntimeError + with self.assertRaises(RuntimeError): + framework.load_snapshot(o_handle) + # Unless we _forget the object first + framework._forget(o2) + o3 = framework.load_snapshot(o_handle) + self.assertEqual(o2.value, o3.value) + # A loaded object also prevents direct creation of an object + with self.assertRaises(RuntimeError): + MyObject(framework, "path") + framework.close() + # But we can create an object, or load a snapshot in a copy of the framework + framework_copy1 = self.create_framework(tmpdir=self.tmpdir) + o_copy1 = MyObject(framework_copy1, "path") + self.assertEqual(o_copy1.value, "path") + framework_copy1.close() + framework_copy2 = self.create_framework(tmpdir=self.tmpdir) + framework_copy2.register_type(MyObject, None, MyObject.handle_kind) + o_copy2 = framework_copy2.load_snapshot(o_handle) + self.assertEqual(o_copy2.value, "path") + + def test_events_base(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyEvents(ObjectEvents): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + + class MyNotifier(Object): + on = MyEvents() + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}".format(event.handle.kind)) + event.defer() + + def _on_bar(self, event): + self.seen.append("on_bar:{}".format(event.handle.kind)) + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + # Confirm that temporary persistence of BoundEvents doesn't cause errors, + # and that events can be observed. + for bound_event, handler in [(pub.on.foo, obs._on_foo), (pub.on.bar, obs._on_bar)]: + framework.observe(bound_event, handler) + + # Confirm that events can be emitted and seen. + pub.on.foo.emit() + + self.assertEqual(obs.seen, ["on_foo:foo"]) + + def test_conflicting_event_attributes(self): + class MyEvent(EventBase): + pass + + event = EventSource(MyEvent) + + class MyEvents(ObjectEvents): + foo = event + + with self.assertRaises(RuntimeError) as cm: + class OtherEvents(ObjectEvents): + foo = event + self.assertEqual( + str(cm.exception), + "EventSource(MyEvent) reused as MyEvents.foo and OtherEvents.foo") + + with self.assertRaises(RuntimeError) as cm: + class MyNotifier(Object): + on = MyEvents() + bar = event + self.assertEqual( + str(cm.exception), + "EventSource(MyEvent) reused as MyEvents.foo and MyNotifier.bar") + + def test_reemit_ignores_unknown_event_type(self): + # The event type may have been gone for good, and nobody cares, + # so this shouldn't be an error scenario. + + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append(event.handle) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs._on_foo) + pub.foo.emit() + + event_handle = obs.seen[0] + self.assertEqual(event_handle.kind, "foo") + + framework.commit() + framework.close() + + framework_copy = self.create_framework() + + # No errors on missing event types here. + framework_copy.reemit() + + # Register the type and check that the event is gone from storage. + framework_copy.register_type(MyEvent, event_handle.parent, event_handle.kind) + self.assertRaises(NoSnapshotError, framework_copy.load_snapshot, event_handle) + + def test_auto_register_event_types(self): + framework = self.create_framework() + + class MyFoo(EventBase): + pass + + class MyBar(EventBase): + pass + + class MyEvents(ObjectEvents): + foo = EventSource(MyFoo) + + class MyNotifier(Object): + on = MyEvents() + bar = EventSource(MyBar) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + def _on_bar(self, event): + self.seen.append("on_bar:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + pub.on.foo.emit() + pub.bar.emit() + + framework.observe(pub.on.foo, obs._on_foo) + framework.observe(pub.bar, obs._on_bar) + + pub.on.foo.emit() + pub.bar.emit() + + self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"]) + + def test_dynamic_event_types(self): + framework = self.create_framework() + + class MyEventsA(ObjectEvents): + handle_kind = 'on_a' + + class MyEventsB(ObjectEvents): + handle_kind = 'on_b' + + class MyNotifier(Object): + on_a = MyEventsA() + on_b = MyEventsB() + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + def _on_bar(self, event): + self.seen.append("on_bar:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + class MyFoo(EventBase): + pass + + class MyBar(EventBase): + pass + + class DeadBeefEvent(EventBase): + pass + + class NoneEvent(EventBase): + pass + + pub.on_a.define_event("foo", MyFoo) + pub.on_b.define_event("bar", MyBar) + + framework.observe(pub.on_a.foo, obs._on_foo) + framework.observe(pub.on_b.bar, obs._on_bar) + + pub.on_a.foo.emit() + pub.on_b.bar.emit() + + self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"]) + + # Definitions remained local to the specific type. + self.assertRaises(AttributeError, lambda: pub.on_a.bar) + self.assertRaises(AttributeError, lambda: pub.on_b.foo) + + # Try to use an event name which is not a valid python identifier. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("dead-beef", DeadBeefEvent) + + # Try to use a python keyword for an event name. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("None", NoneEvent) + + # Try to override an existing attribute. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("foo", MyFoo) + + def test_event_key_roundtrip(self): + class MyEvent(EventBase): + def __init__(self, handle, value): + super().__init__(handle) + self.value = value + + def snapshot(self): + return self.value + + def restore(self, value): + self.value = value + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + has_deferred = False + + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append((event.handle.key, event.value)) + # Only defer the first event and once. + if not MyObserver.has_deferred: + event.defer() + MyObserver.has_deferred = True + + framework1 = self.create_framework(tmpdir=self.tmpdir) + pub1 = MyNotifier(framework1, "pub") + obs1 = MyObserver(framework1, "obs") + framework1.observe(pub1.foo, obs1._on_foo) + pub1.foo.emit('first') + self.assertEqual(obs1.seen, [('1', 'first')]) + + framework1.commit() + framework1.close() + del framework1 + + framework2 = self.create_framework(tmpdir=self.tmpdir) + pub2 = MyNotifier(framework2, "pub") + obs2 = MyObserver(framework2, "obs") + framework2.observe(pub2.foo, obs2._on_foo) + pub2.foo.emit('second') + framework2.reemit() + + # First observer didn't get updated, since framework it was bound to is gone. + self.assertEqual(obs1.seen, [('1', 'first')]) + # Second observer saw the new event plus the reemit of the first event. + # (The event key goes up by 2 due to the pre-commit and commit events.) + self.assertEqual(obs2.seen, [('4', 'second'), ('1', 'first')]) + + def test_helper_properties(self): + framework = self.create_framework() + framework.model = 'test-model' + framework.meta = 'test-meta' + + my_obj = Object(framework, 'my_obj') + self.assertEqual(my_obj.model, framework.model) + + def test_ban_concurrent_frameworks(self): + f = self.create_framework(tmpdir=self.tmpdir) + with self.assertRaises(Exception) as cm: + self.create_framework(tmpdir=self.tmpdir) + self.assertIn('database is locked', str(cm.exception)) + f.close() + + def test_snapshot_saving_restricted_to_simple_types(self): + # this can not be saved, as it has not simple types! + to_be_saved = {"bar": TestFramework} + + class FooEvent(EventBase): + def snapshot(self): + return to_be_saved + + handle = Handle(None, "a_foo", "some_key") + event = FooEvent(handle) + + framework = self.create_framework() + framework.register_type(FooEvent, None, handle.kind) + with self.assertRaises(ValueError) as cm: + framework.save_snapshot(event) + expected = ( + "unable to save the data for FooEvent, it must contain only simple types: " + "{'bar': }") + self.assertEqual(str(cm.exception), expected) + + def test_unobserved_events_dont_leave_cruft(self): + class FooEvent(EventBase): + def snapshot(self): + return {'content': 1} + + class Events(ObjectEvents): + foo = EventSource(FooEvent) + + class Emitter(Object): + on = Events() + + framework = self.create_framework() + e = Emitter(framework, 'key') + e.on.foo.emit() + ev_1_handle = Handle(e.on, "foo", "1") + with self.assertRaises(NoSnapshotError): + framework.load_snapshot(ev_1_handle) + # Committing will save the framework's state, but no other snapshots should be saved + framework.commit() + events = framework._storage.list_snapshots() + self.assertEqual(list(events), [framework._stored.handle.path]) + + def test_event_regex(self): + examples = [ + 'Ubuntu/on/config_changed[7]', + 'on/commit[9]', + 'on/pre_commit[8]', + ] + non_examples = [ + 'StoredStateData[_stored]', + 'ObjectWithSTorage[obj]StoredStateData[_stored]', + ] + regex = re.compile(_event_regex) + for e in examples: + self.assertIsNotNone(regex.match(e)) + for e in non_examples: + self.assertIsNone(regex.match(e)) + + def test_remove_unreferenced_events(self): + framework = self.create_framework() + + class Evt(EventBase): + pass + + class Events(ObjectEvents): + event = EventSource(Evt) + + class ObjectWithStorage(Object): + _stored = StoredState() + on = Events() + + def __init__(self, framework, key): + super().__init__(framework, key) + self._stored.set_default(foo=2) + self.framework.observe(self.on.event, self._on_event) + + def _on_event(self, event): + event.defer() + + # This is an event that 'happened in the past' that doesn't have an associated notice. + o = ObjectWithStorage(framework, 'obj') + handle = Handle(o.on, 'event', '100') + event = Evt(handle) + framework.save_snapshot(event) + self.assertEqual(list(framework._storage.list_snapshots()), [handle.path]) + o.on.event.emit() + self.assertEqual( + list(framework._storage.notices('')), + [('ObjectWithStorage[obj]/on/event[1]', 'ObjectWithStorage[obj]', '_on_event')]) + framework.commit() + self.assertEqual( + sorted(framework._storage.list_snapshots()), + sorted(['ObjectWithStorage[obj]/on/event[100]', + 'StoredStateData[_stored]', + 'ObjectWithStorage[obj]/StoredStateData[_stored]', + 'ObjectWithStorage[obj]/on/event[1]'])) + framework.remove_unreferenced_events() + self.assertEqual( + sorted(framework._storage.list_snapshots()), + sorted([ + 'StoredStateData[_stored]', + 'ObjectWithStorage[obj]/StoredStateData[_stored]', + 'ObjectWithStorage[obj]/on/event[1]'])) + + +class TestStoredState(BaseTestCase): + + def setUp(self): + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(self.tmpdir)) + + def test_basic_state_storage(self): + class SomeObject(Object): + _stored = StoredState() + + self._stored_state_tests(SomeObject) + + def test_straight_subclass(self): + class SomeObject(Object): + _stored = StoredState() + + class Sub(SomeObject): + pass + + self._stored_state_tests(Sub) + + def test_straight_sub_subclass(self): + class SomeObject(Object): + _stored = StoredState() + + class Sub(SomeObject): + pass + + class SubSub(SomeObject): + pass + + self._stored_state_tests(SubSub) + + def test_two_subclasses(self): + class SomeObject(Object): + _stored = StoredState() + + class SubA(SomeObject): + pass + + class SubB(SomeObject): + pass + + self._stored_state_tests(SubA) + self._stored_state_tests(SubB) + + def test_the_crazy_thing(self): + class NoState(Object): + pass + + class StatedObject(NoState): + _stored = StoredState() + + class Sibling(NoState): + pass + + class FinalChild(StatedObject, Sibling): + pass + + self._stored_state_tests(FinalChild) + + def _stored_state_tests(self, cls): + framework = self.create_framework(tmpdir=self.tmpdir) + obj = cls(framework, "1") + + try: + obj._stored.foo + except AttributeError as e: + self.assertEqual(str(e), "attribute 'foo' is not stored") + else: + self.fail("AttributeError not raised") + + try: + obj._stored.on = "nonono" + except AttributeError as e: + self.assertEqual(str(e), "attribute 'on' is reserved and cannot be set") + else: + self.fail("AttributeError not raised") + + obj._stored.foo = 41 + obj._stored.foo = 42 + obj._stored.bar = "s" + obj._stored.baz = 4.2 + obj._stored.bing = True + + self.assertEqual(obj._stored.foo, 42) + + framework.commit() + + # This won't be committed, and should not be seen. + obj._stored.foo = 43 + + framework.close() + + # Since this has the same absolute object handle, it will get its state back. + framework_copy = self.create_framework(tmpdir=self.tmpdir) + obj_copy = cls(framework_copy, "1") + self.assertEqual(obj_copy._stored.foo, 42) + self.assertEqual(obj_copy._stored.bar, "s") + self.assertEqual(obj_copy._stored.baz, 4.2) + self.assertEqual(obj_copy._stored.bing, True) + + framework_copy.close() + + def test_two_subclasses_no_conflicts(self): + class Base(Object): + _stored = StoredState() + + class SubA(Base): + pass + + class SubB(Base): + pass + + framework = self.create_framework(tmpdir=self.tmpdir) + a = SubA(framework, None) + b = SubB(framework, None) + z = Base(framework, None) + + a._stored.foo = 42 + b._stored.foo = "hello" + z._stored.foo = {1} + + framework.commit() + framework.close() + + framework2 = self.create_framework(tmpdir=self.tmpdir) + a2 = SubA(framework2, None) + b2 = SubB(framework2, None) + z2 = Base(framework2, None) + + self.assertEqual(a2._stored.foo, 42) + self.assertEqual(b2._stored.foo, "hello") + self.assertEqual(z2._stored.foo, {1}) + + def test_two_names_one_state(self): + class Mine(Object): + _stored = StoredState() + _stored2 = _stored + + framework = self.create_framework() + obj = Mine(framework, None) + + with self.assertRaises(RuntimeError): + obj._stored.foo = 42 + + with self.assertRaises(RuntimeError): + obj._stored2.foo = 42 + + framework.close() + + # make sure we're not changing the object on failure + self.assertNotIn("_stored", obj.__dict__) + self.assertNotIn("_stored2", obj.__dict__) + + def test_same_name_two_classes(self): + class Base(Object): + pass + + class A(Base): + _stored = StoredState() + + class B(Base): + _stored = A._stored + + framework = self.create_framework() + a = A(framework, None) + b = B(framework, None) + + # NOTE it's the second one that actually triggers the + # exception, but that's an implementation detail + a._stored.foo = 42 + + with self.assertRaises(RuntimeError): + b._stored.foo = "xyzzy" + + framework.close() + + # make sure we're not changing the object on failure + self.assertNotIn("_stored", b.__dict__) + + def test_mutable_types_invalid(self): + framework = self.create_framework() + + class SomeObject(Object): + _stored = StoredState() + + obj = SomeObject(framework, '1') + try: + class CustomObject: + pass + obj._stored.foo = CustomObject() + except AttributeError as e: + self.assertEqual( + str(e), + "attribute 'foo' cannot be a CustomObject: must be int/float/dict/list/etc") + else: + self.fail('AttributeError not raised') + + framework.commit() + + def test_mutable_types(self): + # Test and validation functions in a list of 2-tuples. + # Assignment and keywords like del are not supported in lambdas + # so functions are used instead. + test_operations = [( + lambda: {}, # Operand A. + None, # Operand B. + {}, # Expected result. + lambda a, b: None, # Operation to perform. + lambda res, expected_res: self.assertEqual(res, expected_res) # Validation to perform. + ), ( + lambda: {}, + {'a': {}}, + {'a': {}}, + lambda a, b: a.update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {}}, + {'b': 'c'}, + {'a': {'b': 'c'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {'b': 'c'}}, + {'d': 'e'}, + {'a': {'b': 'c', 'd': 'e'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {'b': 'c', 'd': 'e'}}, + 'd', + {'a': {'b': 'c'}}, + lambda a, b: a['a'].pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'s': set()}, + 'a', + {'s': {'a'}}, + lambda a, b: a['s'].add(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'s': {'a'}}, + 'a', + {'s': set()}, + lambda a, b: a['s'].discard(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [], + None, + [], + lambda a, b: None, + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [], + 'a', + ['a'], + lambda a, b: a.append(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a'], + ['c'], + ['a', ['c']], + lambda a, b: a.append(b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList), + ) + ), ( + lambda: ['a', ['c']], + 'b', + ['b', 'a', ['c']], + lambda a, b: a.insert(0, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], 'a', ['c']], + lambda a, b: a.insert(1, b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList) + ), + ), ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], ['c']], + # a[1] = b + lambda a, b: a.__setitem__(1, b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList) + ), + ), ( + lambda: ['b', ['d'], 'a', ['c']], + 0, + [['d'], 'a', ['c']], + lambda a, b: a.pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [['d'], 'a', ['c']], + ['d'], + ['a', ['c']], + lambda a, b: a.remove(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].append(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c', 'd']], + 1, + ['a', ['c']], + lambda a, b: a[1].pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].insert(1, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c', 'd']], + 'd', + ['a', ['c']], + lambda a, b: a[1].remove(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + None, + set(), + lambda a, b: None, + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + 'a', + set(['a']), + lambda a, b: a.add(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(['a']), + 'a', + set(), + lambda a, b: a.discard(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + {'a'}, + set(), + # Nested sets are not allowed as sets themselves are not hashable. + lambda a, b: self.assertRaises(TypeError, a.add, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + )] + + class SomeObject(Object): + _stored = StoredState() + + class WrappedFramework(Framework): + def __init__(self, store, charm_dir, meta, model): + super().__init__(store, charm_dir, meta, model) + self.snapshots = [] + + def save_snapshot(self, value): + if value.handle.path == 'SomeObject[1]/StoredStateData[_stored]': + self.snapshots.append((type(value), value.snapshot())) + return super().save_snapshot(value) + + # Validate correctness of modification operations. + for get_a, b, expected_res, op, validate_op in test_operations: + storage = SQLiteStorage(self.tmpdir / "framework.data") + framework = WrappedFramework(storage, self.tmpdir, None, None) + obj = SomeObject(framework, '1') + + obj._stored.a = get_a() + self.assertTrue(isinstance(obj._stored, BoundStoredState)) + + op(obj._stored.a, b) + validate_op(obj._stored.a, expected_res) + + obj._stored.a = get_a() + framework.commit() + # We should see an update for initializing a + self.assertEqual(framework.snapshots, [ + (StoredStateData, {'a': get_a()}), + ]) + del obj + gc.collect() + obj_copy1 = SomeObject(framework, '1') + self.assertEqual(obj_copy1._stored.a, get_a()) + + op(obj_copy1._stored.a, b) + validate_op(obj_copy1._stored.a, expected_res) + framework.commit() + framework.close() + + storage_copy = SQLiteStorage(self.tmpdir / "framework.data") + framework_copy = WrappedFramework(storage_copy, self.tmpdir, None, None) + + obj_copy2 = SomeObject(framework_copy, '1') + + validate_op(obj_copy2._stored.a, expected_res) + + # Commit saves the pre-commit and commit events, and the framework + # event counter, but shouldn't update the stored state of my object + framework.snapshots.clear() + framework_copy.commit() + self.assertEqual(framework_copy.snapshots, []) + framework_copy.close() + + def test_comparison_operations(self): + test_operations = [( + {"1"}, # Operand A. + {"1", "2"}, # Operand B. + lambda a, b: a < b, # Operation to test. + True, # Result of op(A, B). + False, # Result of op(B, A). + ), ( + {"1"}, + {"1", "2"}, + lambda a, b: a > b, + False, + True + ), ( + # Empty set comparison. + set(), + set(), + lambda a, b: a == b, + True, + True + ), ( + {"a", "c"}, + {"c", "a"}, + lambda a, b: a == b, + True, + True + ), ( + dict(), + dict(), + lambda a, b: a == b, + True, + True + ), ( + {"1": "2"}, + {"1": "2"}, + lambda a, b: a == b, + True, + True + ), ( + {"1": "2"}, + {"1": "3"}, + lambda a, b: a == b, + False, + False + ), ( + [], + [], + lambda a, b: a == b, + True, + True + ), ( + [1, 2], + [1, 2], + lambda a, b: a == b, + True, + True + ), ( + [1, 2, 5, 6], + [1, 2, 5, 8, 10], + lambda a, b: a <= b, + True, + False + ), ( + [1, 2, 5, 6], + [1, 2, 5, 8, 10], + lambda a, b: a < b, + True, + False + ), ( + [1, 2, 5, 8], + [1, 2, 5, 6, 10], + lambda a, b: a > b, + True, + False + ), ( + [1, 2, 5, 8], + [1, 2, 5, 6, 10], + lambda a, b: a >= b, + True, + False + )] + + class SomeObject(Object): + _stored = StoredState() + + framework = self.create_framework() + + for i, (a, b, op, op_ab, op_ba) in enumerate(test_operations): + obj = SomeObject(framework, str(i)) + obj._stored.a = a + self.assertEqual(op(obj._stored.a, b), op_ab) + self.assertEqual(op(b, obj._stored.a), op_ba) + + def test_set_operations(self): + test_operations = [( + {"1"}, # A set to test an operation against (other_set). + lambda a, b: a | b, # An operation to test. + {"1", "a", "b"}, # The expected result of operation(obj._stored.set, other_set). + {"1", "a", "b"} # The expected result of operation(other_set, obj._stored.set). + ), ( + {"a", "c"}, + lambda a, b: a - b, + {"b"}, + {"c"} + ), ( + {"a", "c"}, + lambda a, b: a & b, + {"a"}, + {"a"} + ), ( + {"a", "c", "d"}, + lambda a, b: a ^ b, + {"b", "c", "d"}, + {"b", "c", "d"} + ), ( + set(), + lambda a, b: set(a), + {"a", "b"}, + set() + )] + + class SomeObject(Object): + _stored = StoredState() + + framework = self.create_framework() + + # Validate that operations between StoredSet and built-in sets + # only result in built-in sets being returned. + # Make sure that commutativity is preserved and that the + # original sets are not changed or used as a result. + for i, (variable_operand, operation, ab_res, ba_res) in enumerate(test_operations): + obj = SomeObject(framework, str(i)) + obj._stored.set = {"a", "b"} + + for a, b, expected in [ + (obj._stored.set, variable_operand, ab_res), + (variable_operand, obj._stored.set, ba_res)]: + old_a = set(a) + old_b = set(b) + + result = operation(a, b) + self.assertEqual(result, expected) + + # Common sanity checks + self.assertIsNot(obj._stored.set._under, result) + self.assertIsNot(result, a) + self.assertIsNot(result, b) + self.assertEqual(a, old_a) + self.assertEqual(b, old_b) + + def test_set_default(self): + framework = self.create_framework() + + class StatefulObject(Object): + _stored = StoredState() + parent = StatefulObject(framework, 'key') + parent._stored.set_default(foo=1) + self.assertEqual(parent._stored.foo, 1) + parent._stored.set_default(foo=2) + # foo was already set, so it doesn't get replaced + self.assertEqual(parent._stored.foo, 1) + parent._stored.set_default(foo=3, bar=4) + self.assertEqual(parent._stored.foo, 1) + self.assertEqual(parent._stored.bar, 4) + # reloading the state still leaves things at the default values + framework.commit() + del parent + parent = StatefulObject(framework, 'key') + parent._stored.set_default(foo=5, bar=6) + self.assertEqual(parent._stored.foo, 1) + self.assertEqual(parent._stored.bar, 4) + # TODO: jam 2020-01-30 is there a clean way to tell that + # parent._stored._data.dirty is False? + + +class GenericObserver(Object): + """Generic observer for the tests.""" + + def __init__(self, parent, key): + super().__init__(parent, key) + self.called = False + + def callback_method(self, event): + """Set the instance .called to True.""" + self.called = True + + +@patch('sys.stderr', new_callable=io.StringIO) +class BreakpointTests(BaseTestCase): + + def setUp(self): + super().setUp() + logassert.setup(self, 'ops') + + def test_ignored(self, fake_stderr): + # It doesn't do anything really unless proper environment is there. + with patch.dict(os.environ): + os.environ.pop('JUJU_DEBUG_AT', None) + framework = self.create_framework() + + with patch('pdb.Pdb.set_trace') as mock: + framework.breakpoint() + self.assertEqual(mock.call_count, 0) + self.assertEqual(fake_stderr.getvalue(), "") + self.assertNotLoggedWarning("Breakpoint", "skipped") + + def test_pdb_properly_called(self, fake_stderr): + # The debugger needs to leave the user in the frame where the breakpoint is executed, + # which for the test is the frame we're calling it here in the test :). + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + framework = self.create_framework() + + with patch('pdb.Pdb.set_trace') as mock: + this_frame = inspect.currentframe() + framework.breakpoint() + + self.assertEqual(mock.call_count, 1) + self.assertEqual(mock.call_args, ((this_frame,), {})) + + def test_welcome_message(self, fake_stderr): + # Check that an initial message is shown to the user when code is interrupted. + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + framework = self.create_framework() + with patch('pdb.Pdb.set_trace'): + framework.breakpoint() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + + def test_welcome_message_not_multiple(self, fake_stderr): + # Check that an initial message is NOT shown twice if the breakpoint is exercised + # twice in the same run. + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + framework = self.create_framework() + with patch('pdb.Pdb.set_trace'): + framework.breakpoint() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + framework.breakpoint() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + + def test_builtin_breakpoint_hooked(self, fake_stderr): + # Verify that the proper hook is set. + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + self.create_framework() # creating the framework setups the hook + with patch('pdb.Pdb.set_trace') as mock: + # Calling through sys, not breakpoint() directly, so we can run the + # tests with Py < 3.7. + sys.breakpointhook() + self.assertEqual(mock.call_count, 1) + + def test_breakpoint_names(self, fake_stderr): + framework = self.create_framework() + + # Name rules: + # - must start and end with lowercase alphanumeric characters + # - only contain lowercase alphanumeric characters, or the hyphen "-" + good_names = [ + 'foobar', + 'foo-bar-baz', + 'foo-------bar', + 'foo123', + '778', + '77-xx', + 'a-b', + 'ab', + 'x', + ] + for name in good_names: + with self.subTest(name=name): + framework.breakpoint(name) + + bad_names = [ + '', + '.', + '-', + '...foo', + 'foo.bar', + 'bar--' + 'FOO', + 'FooBar', + 'foo bar', + 'foo_bar', + '/foobar', + 'break-here-☚', + ] + msg = 'breakpoint names must look like "foo" or "foo-bar"' + for name in bad_names: + with self.subTest(name=name): + with self.assertRaises(ValueError) as cm: + framework.breakpoint(name) + self.assertEqual(str(cm.exception), msg) + + reserved_names = [ + 'all', + 'hook', + ] + msg = 'breakpoint names "all" and "hook" are reserved' + for name in reserved_names: + with self.subTest(name=name): + with self.assertRaises(ValueError) as cm: + framework.breakpoint(name) + self.assertEqual(str(cm.exception), msg) + + not_really_names = [ + 123, + 1.1, + False, + ] + for name in not_really_names: + with self.subTest(name=name): + with self.assertRaises(TypeError) as cm: + framework.breakpoint(name) + self.assertEqual(str(cm.exception), 'breakpoint names must be strings') + + def check_trace_set(self, envvar_value, breakpoint_name, call_count): + """Helper to check the diverse combinations of situations.""" + with patch.dict(os.environ, {'JUJU_DEBUG_AT': envvar_value}): + framework = self.create_framework() + with patch('pdb.Pdb.set_trace') as mock: + framework.breakpoint(breakpoint_name) + self.assertEqual(mock.call_count, call_count) + + def test_unnamed_indicated_all(self, fake_stderr): + # If 'all' is indicated, unnamed breakpoints will always activate. + self.check_trace_set('all', None, 1) + + def test_unnamed_indicated_hook(self, fake_stderr): + # Special value 'hook' was indicated, nothing to do with any call. + self.check_trace_set('hook', None, 0) + + def test_named_indicated_specifically(self, fake_stderr): + # Some breakpoint was indicated, and the framework call used exactly that name. + self.check_trace_set('mybreak', 'mybreak', 1) + + def test_named_indicated_unnamed(self, fake_stderr): + # Some breakpoint was indicated, but the framework call was unnamed + self.check_trace_set('some-breakpoint', None, 0) + self.assertLoggedWarning( + "Breakpoint None skipped", + "not found in the requested breakpoints: ['some-breakpoint']") + + def test_named_indicated_somethingelse(self, fake_stderr): + # Some breakpoint was indicated, but the framework call was with a different name + self.check_trace_set('some-breakpoint', 'other-name', 0) + self.assertLoggedWarning( + "Breakpoint 'other-name' skipped", + "not found in the requested breakpoints: ['some-breakpoint']") + + def test_named_indicated_ingroup(self, fake_stderr): + # A multiple breakpoint was indicated, and the framework call used a name among those. + self.check_trace_set('some,mybreak,foobar', 'mybreak', 1) + + def test_named_indicated_all(self, fake_stderr): + # The framework indicated 'all', which includes any named breakpoint set. + self.check_trace_set('all', 'mybreak', 1) + + def test_named_indicated_hook(self, fake_stderr): + # The framework indicated the special value 'hook', nothing to do with any named call. + self.check_trace_set('hook', 'mybreak', 0) + + +class DebugHookTests(BaseTestCase): + + def test_envvar_parsing_missing(self): + with patch.dict(os.environ): + os.environ.pop('JUJU_DEBUG_AT', None) + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ()) + + def test_envvar_parsing_empty(self): + with patch.dict(os.environ, {'JUJU_DEBUG_AT': ''}): + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ()) + + def test_envvar_parsing_simple(self): + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'hook'}): + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ['hook']) + + def test_envvar_parsing_multiple(self): + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'foo,bar,all'}): + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ['foo', 'bar', 'all']) + + def test_basic_interruption_enabled(self): + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr: + with patch('pdb.runcall') as mock: + publisher.install.emit() + + # Check that the pdb module was used correctly and that the callback method was NOT + # called (as we intercepted the normal pdb behaviour! this is to check that the + # framework didn't call the callback directly) + self.assertEqual(mock.call_count, 1) + expected_callback, expected_event = mock.call_args[0] + self.assertEqual(expected_callback, observer.callback_method) + self.assertIsInstance(expected_event, EventBase) + self.assertFalse(observer.called) + + # Verify proper message was given to the user. + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + + def test_actions_are_interrupted(self): + test_model = self.create_model() + framework = self.create_framework(model=test_model) + framework._juju_debug_at = ['hook'] + + class CustomEvents(ObjectEvents): + foobar_action = EventSource(charm.ActionEvent) + + publisher = CustomEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.foobar_action, observer.callback_method) + fake_script(self, 'action-get', "echo {}") + + with patch('sys.stderr', new_callable=io.StringIO): + with patch('pdb.runcall') as mock: + with patch.dict(os.environ, {'JUJU_ACTION_NAME': 'foobar'}): + publisher.foobar_action.emit() + + self.assertEqual(mock.call_count, 1) + self.assertFalse(observer.called) + + def test_internal_events_not_interrupted(self): + class MyNotifier(Object): + """Generic notifier for the tests.""" + bar = EventSource(EventBase) + + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = MyNotifier(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.bar, observer.callback_method) + + with patch('pdb.runcall') as mock: + publisher.bar.emit() + + self.assertEqual(mock.call_count, 0) + self.assertTrue(observer.called) + + def test_envvar_mixed(self): + framework = self.create_framework() + framework._juju_debug_at = ['foo', 'hook', 'all', 'whatever'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('sys.stderr', new_callable=io.StringIO): + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 1) + self.assertFalse(observer.called) + + def test_no_registered_method(self): + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 0) + self.assertFalse(observer.called) + + def test_envvar_nohook(self): + framework = self.create_framework() + framework._juju_debug_at = ['something-else'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'something-else'}): + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 0) + self.assertTrue(observer.called) + + def test_envvar_missing(self): + framework = self.create_framework() + framework._juju_debug_at = () + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 0) + self.assertTrue(observer.called) + + def test_welcome_message_not_multiple(self): + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr: + with patch('pdb.runcall') as mock: + publisher.install.emit() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + publisher.install.emit() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + self.assertEqual(mock.call_count, 2) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_helpers.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_helpers.py new file mode 100755 index 0000000000000000000000000000000000000000..f7c6fc29a60362940d4e0259351d1d53eb9dbe93 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_helpers.py @@ -0,0 +1,121 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pathlib +import subprocess +import shutil +import tempfile +import unittest + +from ops.framework import Framework +from ops.model import Model, _ModelBackend +from ops.charm import CharmMeta +from ops.storage import SQLiteStorage + + +def fake_script(test_case, name, content): + if not hasattr(test_case, 'fake_script_path'): + fake_script_path = tempfile.mkdtemp('-fake_script') + os.environ['PATH'] = '{}:{}'.format(fake_script_path, os.environ["PATH"]) + + def cleanup(): + shutil.rmtree(fake_script_path) + os.environ['PATH'] = os.environ['PATH'].replace(fake_script_path + ':', '') + + test_case.addCleanup(cleanup) + test_case.fake_script_path = pathlib.Path(fake_script_path) + + template_args = { + 'name': name, + 'path': test_case.fake_script_path, + 'content': content, + } + + with (test_case.fake_script_path / name).open('wt') as f: + # Before executing the provided script, dump the provided arguments in calls.txt. + # ASCII 1E is RS 'record separator', and 1C is FS 'file separator', which seem appropriate. + f.write('''#!/bin/sh +{{ printf {name}; printf "\\036%s" "$@"; printf "\\034"; }} >> {path}/calls.txt +{content}'''.format_map(template_args)) + os.chmod(str(test_case.fake_script_path / name), 0o755) + + +def fake_script_calls(test_case, clear=False): + try: + with (test_case.fake_script_path / 'calls.txt').open('r+t') as f: + calls = [line.split('\x1e') for line in f.read().split('\x1c')[:-1]] + if clear: + f.truncate(0) + return calls + except FileNotFoundError: + return [] + + +class FakeScriptTest(unittest.TestCase): + + def test_fake_script_works(self): + fake_script(self, 'foo', 'echo foo runs') + fake_script(self, 'bar', 'echo bar runs') + output = subprocess.getoutput('foo a "b c "; bar "d e" f') + self.assertEqual(output, 'foo runs\nbar runs') + self.assertEqual(fake_script_calls(self), [ + ['foo', 'a', 'b c '], + ['bar', 'd e', 'f'], + ]) + + def test_fake_script_clear(self): + fake_script(self, 'foo', 'echo foo runs') + + output = subprocess.getoutput('foo a "b c"') + self.assertEqual(output, 'foo runs') + + self.assertEqual(fake_script_calls(self, clear=True), [['foo', 'a', 'b c']]) + + fake_script(self, 'bar', 'echo bar runs') + + output = subprocess.getoutput('bar "d e" f') + self.assertEqual(output, 'bar runs') + + self.assertEqual(fake_script_calls(self, clear=True), [['bar', 'd e', 'f']]) + + self.assertEqual(fake_script_calls(self, clear=True), []) + + +class BaseTestCase(unittest.TestCase): + + def create_framework(self, *, model=None, tmpdir=None): + """Create a Framework object. + + By default operate in-memory; pass a temporary directory via the 'tmpdir' + parameter if you whish to instantiate several frameworks sharing the + same dir (e.g. for storing state). + """ + if tmpdir is None: + data_fpath = ":memory:" + charm_dir = 'non-existant' + else: + data_fpath = tmpdir / "framework.data" + charm_dir = tmpdir + + framework = Framework(SQLiteStorage(data_fpath), charm_dir, meta=None, model=model) + self.addCleanup(framework.close) + return framework + + def create_model(self): + """Create a Model object.""" + backend = _ModelBackend(unit_name='myapp/0') + meta = CharmMeta() + model = Model(meta, backend) + return model diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_infra.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_infra.py new file mode 100644 index 0000000000000000000000000000000000000000..5133b35d11be89a702558d6f961facb8884553ec --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_infra.py @@ -0,0 +1,155 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import itertools +import os +import re +import subprocess +import sys +import tempfile +import unittest +from unittest.mock import patch + +import autopep8 +from flake8.api.legacy import get_style_guide + +import ops + + +def get_python_filepaths(): + """Helper to retrieve paths of Python files.""" + python_paths = ['setup.py'] + for root in ['ops', 'test']: + for dirpath, dirnames, filenames in os.walk(root): + for filename in filenames: + if filename.endswith(".py"): + python_paths.append(os.path.join(dirpath, filename)) + return python_paths + + +class InfrastructureTests(unittest.TestCase): + + def test_pep8(self): + # verify all files are nicely styled + python_filepaths = get_python_filepaths() + style_guide = get_style_guide() + fake_stdout = io.StringIO() + with patch('sys.stdout', fake_stdout): + report = style_guide.check_files(python_filepaths) + + # if flake8 didnt' report anything, we're done + if report.total_errors == 0: + return + + # grab on which files we have issues + flake8_issues = fake_stdout.getvalue().split('\n') + broken_filepaths = {item.split(':')[0] for item in flake8_issues if item} + + # give hints to the developer on how files' style could be improved + options = autopep8.parse_args(['']) + options.aggressive = 1 + options.diff = True + options.max_line_length = 99 + + issues = [] + for filepath in broken_filepaths: + diff = autopep8.fix_file(filepath, options=options) + if diff: + issues.append(diff) + + report = ["Please fix files as suggested by autopep8:"] + issues + report += ["\n-- Original flake8 reports:"] + flake8_issues + self.fail("\n".join(report)) + + def test_quote_backslashes(self): + # ensure we're not using unneeded backslash to escape strings + issues = [] + for filepath in get_python_filepaths(): + with open(filepath, "rt", encoding="utf8") as fh: + for idx, line in enumerate(fh, 1): + if (r'\"' in line or r"\'" in line) and "NOQA" not in line: + issues.append((filepath, idx, line.rstrip())) + if issues: + msgs = ["{}:{:d}:{}".format(*issue) for issue in issues] + self.fail("Spurious backslashes found, please fix these quotings:\n" + "\n".join(msgs)) + + def test_ensure_copyright(self): + # all non-empty Python files must have a proper copyright somewhere in the first 5 lines + issues = [] + regex = re.compile(r"# Copyright \d\d\d\d(-\d\d\d\d)? Canonical Ltd.\n") + for filepath in get_python_filepaths(): + if os.stat(filepath).st_size == 0: + continue + + with open(filepath, "rt", encoding="utf8") as fh: + for line in itertools.islice(fh, 5): + if regex.match(line): + break + else: + issues.append(filepath) + if issues: + self.fail("Please add copyright headers to the following files:\n" + "\n".join(issues)) + + def _run_setup(self, *args): + proc = subprocess.run( + (sys.executable, 'setup.py') + args, + stdout=subprocess.PIPE, + check=True) + return proc.stdout.strip().decode("utf8") + + def test_setup_version(self): + setup_version = self._run_setup('--version') + + self.assertEqual(setup_version, ops.__version__) + + def test_setup_description(self): + with open("README.md", "rt", encoding="utf8") as fh: + disk_readme = fh.read().strip() + + setup_readme = self._run_setup('--long-description') + + self.assertEqual(setup_readme, disk_readme) + + def test_check(self): + self._run_setup('check', '--strict') + + +class ImportersTestCase(unittest.TestCase): + + template = "from ops import {module_name}" + + def test_imports(self): + mod_names = [ + 'charm', + 'framework', + 'main', + 'model', + 'testing', + ] + + for name in mod_names: + with self.subTest(name=name): + self.check(name) + + def check(self, name): + """Helper function to run the test.""" + _, testfile = tempfile.mkstemp() + self.addCleanup(os.unlink, testfile) + + with open(testfile, 'wt', encoding='utf8') as fh: + fh.write(self.template.format(module_name=name)) + + proc = subprocess.run([sys.executable, testfile], env={'PYTHONPATH': os.getcwd()}) + self.assertEqual(proc.returncode, 0) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_jujuversion.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..1e3821017c22b1fb07c74a089b8f678d3b055fe6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_jujuversion.py @@ -0,0 +1,145 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +import unittest.mock # in this file, importing just 'patch' would be confusing + +from ops.jujuversion import JujuVersion + + +class TestJujuVersion(unittest.TestCase): + + def test_parsing(self): + test_cases = [ + ("0.0.0", 0, 0, '', 0, 0), + ("0.0.2", 0, 0, '', 2, 0), + ("0.1.0", 0, 1, '', 0, 0), + ("0.2.3", 0, 2, '', 3, 0), + ("10.234.3456", 10, 234, '', 3456, 0), + ("10.234.3456.1", 10, 234, '', 3456, 1), + ("1.21-alpha12", 1, 21, 'alpha', 12, 0), + ("1.21-alpha1.34", 1, 21, 'alpha', 1, 34), + ("2.7", 2, 7, '', 0, 0) + ] + + for vs, major, minor, tag, patch, build in test_cases: + v = JujuVersion(vs) + self.assertEqual(v.major, major) + self.assertEqual(v.minor, minor) + self.assertEqual(v.tag, tag) + self.assertEqual(v.patch, patch) + self.assertEqual(v.build, build) + + @unittest.mock.patch('os.environ', new={}) + def test_from_environ(self): + with self.assertRaisesRegex(RuntimeError, 'environ has no JUJU_VERSION'): + JujuVersion.from_environ() + + os.environ['JUJU_VERSION'] = 'no' + with self.assertRaisesRegex(RuntimeError, 'not a valid Juju version'): + JujuVersion.from_environ() + + os.environ['JUJU_VERSION'] = '2.8.0' + v = JujuVersion.from_environ() + self.assertEqual(v, JujuVersion('2.8.0')) + + def test_has_app_data(self): + self.assertTrue(JujuVersion('2.8.0').has_app_data()) + self.assertTrue(JujuVersion('2.7.0').has_app_data()) + self.assertFalse(JujuVersion('2.6.9').has_app_data()) + + def test_parsing_errors(self): + invalid_versions = [ + "xyz", + "foo.bar", + "foo.bar.baz", + "dead.beef.ca.fe", + "1234567890.2.1", # The major version is too long. + "0.2..1", # Two periods next to each other. + "1.21.alpha1", # Tag comes after period. + "1.21-alpha", # No patch number but a tag is present. + "1.21-alpha1beta", # Non-numeric string after the patch number. + "1.21-alpha-dev", # Tag duplication. + "1.21-alpha_dev3", # Underscore in a tag. + "1.21-alpha123dev3", # Non-numeric string after the patch number. + ] + for v in invalid_versions: + with self.assertRaises(RuntimeError): + JujuVersion(v) + + def test_equality(self): + test_cases = [ + ("1.0.0", "1.0.0", True), + ("01.0.0", "1.0.0", True), + ("10.0.0", "9.0.0", False), + ("1.0.0", "1.0.1", False), + ("1.0.1", "1.0.0", False), + ("1.0.0", "1.1.0", False), + ("1.1.0", "1.0.0", False), + ("1.0.0", "2.0.0", False), + ("1.2-alpha1", "1.2.0", False), + ("1.2-alpha2", "1.2-alpha1", False), + ("1.2-alpha2.1", "1.2-alpha2", False), + ("1.2-alpha2.2", "1.2-alpha2.1", False), + ("1.2-beta1", "1.2-alpha1", False), + ("1.2-beta1", "1.2-alpha2.1", False), + ("1.2-beta1", "1.2.0", False), + ("1.2.1", "1.2.0", False), + ("2.0.0", "1.0.0", False), + ("2.0.0.0", "2.0.0", True), + ("2.0.0.0", "2.0.0.0", True), + ("2.0.0.1", "2.0.0.0", False), + ("2.0.1.10", "2.0.0.0", False), + ] + + for a, b, expected in test_cases: + self.assertEqual(JujuVersion(a) == JujuVersion(b), expected) + self.assertEqual(JujuVersion(a) == b, expected) + + def test_comparison(self): + test_cases = [ + ("1.0.0", "1.0.0", False, True), + ("01.0.0", "1.0.0", False, True), + ("10.0.0", "9.0.0", False, False), + ("1.0.0", "1.0.1", True, True), + ("1.0.1", "1.0.0", False, False), + ("1.0.0", "1.1.0", True, True), + ("1.1.0", "1.0.0", False, False), + ("1.0.0", "2.0.0", True, True), + ("1.2-alpha1", "1.2.0", True, True), + ("1.2-alpha2", "1.2-alpha1", False, False), + ("1.2-alpha2.1", "1.2-alpha2", False, False), + ("1.2-alpha2.2", "1.2-alpha2.1", False, False), + ("1.2-beta1", "1.2-alpha1", False, False), + ("1.2-beta1", "1.2-alpha2.1", False, False), + ("1.2-beta1", "1.2.0", True, True), + ("1.2.1", "1.2.0", False, False), + ("2.0.0", "1.0.0", False, False), + ("2.0.0.0", "2.0.0", False, True), + ("2.0.0.0", "2.0.0.0", False, True), + ("2.0.0.1", "2.0.0.0", False, False), + ("2.0.1.10", "2.0.0.0", False, False), + ] + + for a, b, expected_strict, expected_weak in test_cases: + self.assertEqual(JujuVersion(a) < JujuVersion(b), expected_strict) + self.assertEqual(JujuVersion(a) <= JujuVersion(b), expected_weak) + self.assertEqual(JujuVersion(b) > JujuVersion(a), expected_strict) + self.assertEqual(JujuVersion(b) >= JujuVersion(a), expected_weak) + # Implicit conversion. + self.assertEqual(JujuVersion(a) < b, expected_strict) + self.assertEqual(JujuVersion(a) <= b, expected_weak) + self.assertEqual(b > JujuVersion(a), expected_strict) + self.assertEqual(b >= JujuVersion(a), expected_weak) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_lib.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..7e9af8565449b2ff1ef13c905d8e2e201a7cefad --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_lib.py @@ -0,0 +1,545 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from importlib.machinery import ModuleSpec +from pathlib import Path +from tempfile import mkdtemp, mkstemp +from unittest import TestCase +from unittest.mock import patch +from random import shuffle +from shutil import rmtree +from textwrap import dedent + +import ops.lib + + +def _mklib(topdir: str, pkgname: str, libname: str) -> Path: + """Make a for-testing library. + + Args: + topdir: the toplevel directory in which the package will be created. + This directory must already exist. + pkgname: the name of the package to create in the toplevel directory. + this package will have an empty __init__.py. + libname: the name of the library directory to create under the package. + + Returns: + a :class:`Path` to the ``__init__.py`` of the created library. + This file will not have been created yet. + """ + pkg = Path(topdir) / pkgname + try: + pkg.mkdir() + except FileExistsError: + pass + else: + (pkg / '__init__.py').write_text('') + + lib = pkg / 'opslib' / libname + lib.mkdir(parents=True) + + return lib / '__init__.py' + + +def _flatten(specgen): + return sorted([os.path.dirname(spec.origin) for spec in specgen]) + + +class TestLibFinder(TestCase): + def _mkdtemp(self) -> str: + tmpdir = mkdtemp() + self.addCleanup(rmtree, tmpdir) + return tmpdir + + def test_single(self): + tmpdir = self._mkdtemp() + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + _mklib(tmpdir, "foo", "bar").write_text("") + + self.assertEqual( + _flatten(ops.lib._find_all_specs([tmpdir])), + [tmpdir + '/foo/opslib/bar']) + + def test_multi(self): + tmpdirA = self._mkdtemp() + tmpdirB = self._mkdtemp() + + if tmpdirA > tmpdirB: + # keep sorting happy + tmpdirA, tmpdirB = tmpdirB, tmpdirA + + dirs = [tmpdirA, tmpdirB] + + for top in [tmpdirA, tmpdirB]: + for pkg in ["bar", "baz"]: + for lib in ["meep", "quux"]: + _mklib(top, pkg, lib).write_text("") + + expected = [ + os.path.join(tmpdirA, "bar", "opslib", "meep"), + os.path.join(tmpdirA, "bar", "opslib", "quux"), + os.path.join(tmpdirA, "baz", "opslib", "meep"), + os.path.join(tmpdirA, "baz", "opslib", "quux"), + os.path.join(tmpdirB, "bar", "opslib", "meep"), + os.path.join(tmpdirB, "bar", "opslib", "quux"), + os.path.join(tmpdirB, "baz", "opslib", "meep"), + os.path.join(tmpdirB, "baz", "opslib", "quux"), + ] + + self.assertEqual(_flatten(ops.lib._find_all_specs(dirs)), expected) + + def test_cwd(self): + tmpcwd = self._mkdtemp() + cwd = os.getcwd() + os.chdir(tmpcwd) + self.addCleanup(os.chdir, cwd) + + dirs = [""] + + self.assertEqual(list(ops.lib._find_all_specs(dirs)), []) + + _mklib(tmpcwd, "foo", "bar").write_text("") + + self.assertEqual( + _flatten(ops.lib._find_all_specs(dirs)), + ['./foo/opslib/bar']) + + def test_bogus_topdir(self): + """Check that having one bogus dir in sys.path doesn't cause the finder to abort.""" + tmpdir = self._mkdtemp() + + dirs = [tmpdir, "/bogus"] + + self.assertEqual(list(ops.lib._find_all_specs(dirs)), []) + + _mklib(tmpdir, "foo", "bar").write_text("") + + self.assertEqual( + _flatten(ops.lib._find_all_specs(dirs)), + [tmpdir + '/foo/opslib/bar']) + + def test_bogus_opsdir(self): + """Check that having one bogus opslib doesn't cause the finder to abort.""" + + tmpdir = self._mkdtemp() + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + _mklib(tmpdir, "foo", "bar").write_text('') + + path = Path(tmpdir) / 'baz' + path.mkdir() + (path / 'opslib').write_text('') + + self.assertEqual( + _flatten(ops.lib._find_all_specs([tmpdir])), + [tmpdir + '/foo/opslib/bar']) + + def test_namespace(self): + """Check that namespace packages are ignored.""" + tmpdir = self._mkdtemp() + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + _mklib(tmpdir, "foo", "bar") # no __init__.py => a namespace package + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + +class TestLibParser(TestCase): + def _mkmod(self, name: str, content: str = None) -> ModuleSpec: + fd, fname = mkstemp(text=True) + self.addCleanup(os.unlink, fname) + if content is not None: + with os.fdopen(fd, mode='wt', closefd=False) as f: + f.write(dedent(content)) + os.close(fd) + return ModuleSpec(name=name, loader=None, origin=fname) + + def test_simple(self): + """Check that we can load a reasonably straightforward lib""" + m = self._mkmod('foo', ''' + LIBNAME = "foo" + LIBEACH = float('-inf') + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + LIBANANA = True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice@example.com, API 2, patch 42>') + + def test_libauthor_has_dashes(self): + m = self._mkmod('foo', ''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice-someone@example.com" + LIBANANA = True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice-someone@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice-someone@example.com, API 2, patch 42>') + + def test_lib_definitions_without_spaces(self): + m = self._mkmod('foo', ''' + LIBNAME="foo" + LIBAPI=2 + LIBPATCH=42 + LIBAUTHOR="alice@example.com" + LIBANANA=True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice@example.com, API 2, patch 42>') + + def test_lib_definitions_trailing_comments(self): + m = self._mkmod('foo', ''' + LIBNAME = "foo" # comment style 1 + LIBAPI = 2 = comment style 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com"anything after the quote is a comment + LIBANANA = True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice@example.com, API 2, patch 42>') + + def test_incomplete(self): + """Check that if anything is missing, nothing is returned""" + m = self._mkmod('foo', ''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_too_long(self): + """Check that if the file is too long, nothing is returned""" + m = self._mkmod('foo', '\n' * ops.lib._MAX_LIB_LINES + ''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_no_origin(self): + """Check that _parse_lib doesn't choke when given a spec with no origin""" + # 'just don't crash' + lib = ops.lib._parse_lib(ModuleSpec(name='hi', loader=None, origin=None)) + self.assertIsNone(lib) + + def test_bogus_origin(self): + """Check that if the origin is messed up, we don't crash""" + # 'just don't crash' + lib = ops.lib._parse_lib(ModuleSpec(name='hi', loader=None, origin='/')) + self.assertIsNone(lib) + + def test_bogus_lib(self): + """Check our behaviour when the lib is messed up""" + # note the syntax error (that is carefully chosen to pass the initial regexp) + m = self._mkmod('foo', ''' + LIBNAME = "1' + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_name_is_number(self): + """Check our behaviour when the name in the lib is a number""" + m = self._mkmod('foo', ''' + LIBNAME = 1 + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_api_is_string(self): + """Check our behaviour when the api in the lib is a string""" + m = self._mkmod('foo', ''' + LIBNAME = 'foo' + LIBAPI = '2' + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_patch_is_string(self): + """Check our behaviour when the patch in the lib is a string""" + m = self._mkmod('foo', ''' + LIBNAME = 'foo' + LIBAPI = 2 + LIBPATCH = '42' + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_author_is_number(self): + """Check our behaviour when the author in the lib is a number""" + m = self._mkmod('foo', ''' + LIBNAME = 'foo' + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = 43 + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_other_encoding(self): + """Check that we don't crash when a library is not UTF-8""" + m = self._mkmod('foo') + with open(m.origin, 'wt', encoding='latin-1') as f: + f.write(dedent(''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + LIBANANA = "Ñoño" + ''')) + self.assertIsNone(ops.lib._parse_lib(m)) + + +class TestLib(TestCase): + + def test_lib_comparison(self): + self.assertNotEqual( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 0), + ops.lib._Lib(None, "bar", "bob@example.com", 0, 1)) + self.assertEqual( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + + self.assertLess( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 0), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + self.assertLess( + ops.lib._Lib(None, "foo", "alice@example.com", 0, 1), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + self.assertLess( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1), + ops.lib._Lib(None, "foo", "bob@example.com", 1, 1)) + self.assertLess( + ops.lib._Lib(None, "bar", "alice@example.com", 1, 1), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + + with self.assertRaises(TypeError): + 42 < ops.lib._Lib(None, "bar", "alice@example.com", 1, 1) + with self.assertRaises(TypeError): + ops.lib._Lib(None, "bar", "alice@example.com", 1, 1) < 42 + + # these two might be surprising in that they don't raise an exception, + # but they are correct: our __eq__ bailing means Python falls back to + # its default of checking object identity. + self.assertNotEqual(ops.lib._Lib(None, "bar", "alice@example.com", 1, 1), 42) + self.assertNotEqual(42, ops.lib._Lib(None, "bar", "alice@example.com", 1, 1)) + + def test_lib_order(self): + a = ops.lib._Lib(None, "bar", "alice@example.com", 1, 0) + b = ops.lib._Lib(None, "bar", "alice@example.com", 1, 1) + c = ops.lib._Lib(None, "foo", "alice@example.com", 1, 0) + d = ops.lib._Lib(None, "foo", "alice@example.com", 1, 1) + e = ops.lib._Lib(None, "foo", "bob@example.com", 1, 1) + + for i in range(20): + with self.subTest(i): + libs = [a, b, c, d, e] + shuffle(libs) + self.assertEqual(sorted(libs), [a, b, c, d, e]) + + def test_use_bad_args_types(self): + with self.assertRaises(TypeError): + ops.lib.use(1, 2, 'bob@example.com') + with self.assertRaises(TypeError): + ops.lib.use('foo', '2', 'bob@example.com') + with self.assertRaises(TypeError): + ops.lib.use('foo', 2, ops.lib.use) + + def test_use_bad_args_values(self): + with self.assertRaises(ValueError): + ops.lib.use('--help', 2, 'alice@example.com') + with self.assertRaises(ValueError): + ops.lib.use('foo', -2, 'alice@example.com') + with self.assertRaises(ValueError): + ops.lib.use('foo', 1, 'example.com') + + +@patch('sys.path', new=()) +class TestLibFunctional(TestCase): + + def _mkdtemp(self) -> str: + tmpdir = mkdtemp() + self.addCleanup(rmtree, tmpdir) + return tmpdir + + def test_use_finds_subs(self): + """Test that ops.lib.use("baz") works when baz is inside a package in the python path.""" + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + """)) + + # autoimport to reset things + ops.lib.autoimport() + + # ops.lib.use done by charm author + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBNAME, 'baz') + self.assertEqual(baz.LIBAPI, 2) + self.assertEqual(baz.LIBPATCH, 42) + self.assertEqual(baz.LIBAUTHOR, 'alice@example.com') + + def test_use_finds_best_same_toplevel(self): + """Test that ops.lib.use("baz") works when there are two baz in the same toplevel.""" + + pkg_b = "foo" + lib_b = "bar" + patch_b = 40 + for pkg_a in ["foo", "fooA"]: + for lib_a in ["bar", "barA"]: + if (pkg_a, lib_a) == (pkg_b, lib_b): + # everything-is-the-same :-) + continue + for patch_a in [38, 42]: + desc = "A: {}/{}/{}; B: {}/{}/{}".format( + pkg_a, lib_a, patch_a, pkg_b, lib_b, patch_b) + with self.subTest(desc): + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, pkg_a, lib_a).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_a)) + + _mklib(tmpdir, pkg_b, lib_b).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_b)) + + # autoimport to reset things + ops.lib.autoimport() + + # ops.lib.use done by charm author + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBNAME, 'baz') + self.assertEqual(baz.LIBAPI, 2) + self.assertEqual(baz.LIBPATCH, max(patch_a, patch_b)) + self.assertEqual(baz.LIBAUTHOR, 'alice@example.com') + + def test_use_finds_best_diff_toplevel(self): + """Test that ops.lib.use("baz") works when there are two baz in the different toplevels.""" + + pkg_b = "foo" + lib_b = "bar" + patch_b = 40 + for pkg_a in ["foo", "fooA"]: + for lib_a in ["bar", "barA"]: + for patch_a in [38, 42]: + desc = "A: {}/{}/{}; B: {}/{}/{}".format( + pkg_a, lib_a, patch_a, pkg_b, lib_b, patch_b) + with self.subTest(desc): + tmpdirA = self._mkdtemp() + tmpdirB = self._mkdtemp() + sys.path = [tmpdirA, tmpdirB] + + _mklib(tmpdirA, pkg_a, lib_a).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_a)) + + _mklib(tmpdirB, pkg_b, lib_b).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_b)) + + # autoimport to reset things + ops.lib.autoimport() + + # ops.lib.use done by charm author + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBNAME, 'baz') + self.assertEqual(baz.LIBAPI, 2) + self.assertEqual(baz.LIBPATCH, max(patch_a, patch_b)) + self.assertEqual(baz.LIBAUTHOR, 'alice@example.com') + + def test_none_found(self): + with self.assertRaises(ImportError): + ops.lib.use('foo', 1, 'alice@example.com') + + def test_from_scratch(self): + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + """)) + + # hard reset + ops.lib._libraries = None + + # sanity check that ops.lib.use works + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBAPI, 2) + + def test_others_found(self): + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + """)) + + # reload + ops.lib.autoimport() + + # sanity check that ops.lib.use works + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBAPI, 2) + + with self.assertRaises(ImportError): + ops.lib.use('baz', 1, 'alice@example.com') + + with self.assertRaises(ImportError): + ops.lib.use('baz', 2, 'bob@example.com') diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_log.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_log.py new file mode 100644 index 0000000000000000000000000000000000000000..b7f74d5c901ffb7833c1e5523f9f69a1959999e1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_log.py @@ -0,0 +1,140 @@ +#!/usr/bin/python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import unittest +from unittest.mock import patch +import importlib + +import logging +import ops.log + + +class FakeModelBackend: + + def __init__(self): + self._calls = [] + + def calls(self, clear=False): + calls = self._calls + if clear: + self._calls = [] + return calls + + def juju_log(self, message, level): + self._calls.append((message, level)) + + +def reset_logging(): + logging.shutdown() + importlib.reload(logging) + + +class TestLogging(unittest.TestCase): + + def setUp(self): + self.backend = FakeModelBackend() + + reset_logging() + self.addCleanup(reset_logging) + + def test_default_logging(self): + ops.log.setup_root_logging(self.backend) + + logger = logging.getLogger() + self.assertEqual(logger.level, logging.DEBUG) + self.assertIsInstance(logger.handlers[0], ops.log.JujuLogHandler) + + test_cases = [( + lambda: logger.critical('critical'), [('CRITICAL', 'critical')] + ), ( + lambda: logger.error('error'), [('ERROR', 'error')] + ), ( + lambda: logger.warning('warning'), [('WARNING', 'warning')] + ), ( + lambda: logger.info('info'), [('INFO', 'info')] + ), ( + lambda: logger.debug('debug'), [('DEBUG', 'debug')] + )] + + for do, res in test_cases: + do() + calls = self.backend.calls(clear=True) + self.assertEqual(calls, res) + + def test_handler_filtering(self): + logger = logging.getLogger() + logger.setLevel(logging.INFO) + logger.addHandler(ops.log.JujuLogHandler(self.backend, logging.WARNING)) + logger.info('foo') + self.assertEqual(self.backend.calls(), []) + logger.warning('bar') + self.assertEqual(self.backend.calls(), [('WARNING', 'bar')]) + + def test_no_stderr_without_debug(self): + buffer = io.StringIO() + with patch('sys.stderr', buffer): + ops.log.setup_root_logging(self.backend, debug=False) + logger = logging.getLogger() + logger.debug('debug message') + logger.info('info message') + logger.warning('warning message') + logger.critical('critical message') + self.assertEqual( + self.backend.calls(), + [('DEBUG', 'debug message'), + ('INFO', 'info message'), + ('WARNING', 'warning message'), + ('CRITICAL', 'critical message'), + ]) + self.assertEqual(buffer.getvalue(), "") + + def test_debug_logging(self): + buffer = io.StringIO() + with patch('sys.stderr', buffer): + ops.log.setup_root_logging(self.backend, debug=True) + logger = logging.getLogger() + logger.debug('debug message') + logger.info('info message') + logger.warning('warning message') + logger.critical('critical message') + self.assertEqual( + self.backend.calls(), + [('DEBUG', 'debug message'), + ('INFO', 'info message'), + ('WARNING', 'warning message'), + ('CRITICAL', 'critical message'), + ]) + self.assertRegex( + buffer.getvalue(), + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG debug message\n" + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO info message\n" + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING warning message\n" + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL critical message\n" + ) + + def test_reduced_logging(self): + ops.log.setup_root_logging(self.backend) + logger = logging.getLogger() + logger.setLevel(logging.WARNING) + logger.debug('debug') + logger.info('info') + logger.warning('warning') + self.assertEqual(self.backend.calls(), [('WARNING', 'warning')]) + + +if __name__ == '__main__': + unittest.main() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_main.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_main.py new file mode 100755 index 0000000000000000000000000000000000000000..f62c859332d672fb4140488f2c9f1ad3841b30eb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_main.py @@ -0,0 +1,878 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import base64 +import logging +import os +import pickle +import shutil +import subprocess +import sys +import tempfile +import unittest +import importlib.util +import warnings +from pathlib import Path +from unittest.mock import patch + +from ops.charm import ( + CharmBase, + CharmEvents, + HookEvent, + InstallEvent, + StartEvent, + ConfigChangedEvent, + UpgradeCharmEvent, + UpdateStatusEvent, + LeaderSettingsChangedEvent, + RelationJoinedEvent, + RelationChangedEvent, + RelationDepartedEvent, + RelationBrokenEvent, + RelationEvent, + StorageAttachedEvent, + ActionEvent, + CollectMetricsEvent, +) +from ops.main import main +from ops.version import version + +from .test_helpers import fake_script, fake_script_calls + +# This relies on the expected repository structure to find a path to +# source of the charm under test. +TEST_CHARM_DIR = Path(__file__ + '/../charms/test_main').resolve() + +VERSION_LOGLINE = [ + 'juju-log', '--log-level', 'DEBUG', + 'Operator Framework {} up and running.'.format(version), +] +logger = logging.getLogger(__name__) + + +class SymlinkTargetError(Exception): + pass + + +class EventSpec: + def __init__(self, event_type, event_name, env_var=None, + relation_id=None, remote_app=None, remote_unit=None, + charm_config=None, model_name=None): + self.event_type = event_type + self.event_name = event_name + self.env_var = env_var + self.relation_id = relation_id + self.remote_app = remote_app + self.remote_unit = remote_unit + self.charm_config = charm_config + self.model_name = model_name + + +class CharmInitTestCase(unittest.TestCase): + + def _check(self, charm_class): + """Helper for below tests.""" + fake_environ = { + 'JUJU_UNIT_NAME': 'test_main/0', + 'JUJU_MODEL_NAME': 'mymodel', + } + with patch.dict(os.environ, fake_environ): + with patch('ops.main._emit_charm_event'): + with patch('ops.main._get_charm_dir') as mock_charmdir: + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = Path(tmpdirname) + fake_metadata = tmpdirname / 'metadata.yaml' + with fake_metadata.open('wb') as fh: + fh.write(b'name: test') + mock_charmdir.return_value = tmpdirname + + with warnings.catch_warnings(record=True) as warnings_cm: + main(charm_class) + + return warnings_cm + + def test_init_signature_passthrough(self): + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + warn_cm = self._check(MyCharm) + self.assertFalse(warn_cm) + + def test_init_signature_both_arguments(self): + class MyCharm(CharmBase): + + def __init__(self, framework, somekey): + super().__init__(framework, somekey) + + warn_cm = self._check(MyCharm) + self.assertEqual(len(warn_cm), 1) + (warn,) = warn_cm + self.assertTrue(issubclass(warn.category, DeprecationWarning)) + self.assertEqual(str(warn.message), ( + "the second argument, 'key', has been deprecated and will be removed " + "after the 0.7 release")) + + def test_init_signature_only_framework(self): + class MyCharm(CharmBase): + + def __init__(self, framework): + super().__init__(framework) + + warn_cm = self._check(MyCharm) + self.assertFalse(warn_cm) + + +class _TestMain(abc.ABC): + + @abc.abstractmethod + def _setup_entry_point(self, directory, entry_point): + """Set up the given entry point in the given directory. + + If not using dispatch, that would be a symlink / + pointing at src/charm.py; if using dispatch that would be the dispatch + symlink. It could also not be a symlink... + """ + return NotImplemented + + @abc.abstractmethod + def _call_event(self, rel_path, env): + """Set up the environment and call (i.e. run) the given event.""" + return NotImplemented + + @abc.abstractmethod + def test_setup_event_links(self): + """Test auto-creation of symlinks caused by initial events. + + Depending on the combination of dispatch and non-dispatch, this should + be checking for the creation or the _lack_ of creation, as appropriate. + """ + return NotImplemented + + def setUp(self): + self._setup_charm_dir() + + _, tmp_file = tempfile.mkstemp(dir=str(self._tmpdir)) + self._state_file = Path(tmp_file) + + # Relations events are defined dynamically and modify the class attributes. + # We use a subclass temporarily to prevent these side effects from leaking. + class TestCharmEvents(CharmEvents): + pass + CharmBase.on = TestCharmEvents() + + def cleanup(): + CharmBase.on = CharmEvents() + self.addCleanup(cleanup) + + fake_script(self, 'juju-log', "exit 0") + + # set to something other than None for tests that care + self.stdout = None + self.stderr = None + + def _setup_charm_dir(self): + self._tmpdir = Path(tempfile.mkdtemp(prefix='tmp-ops-test-')) + self.addCleanup(shutil.rmtree, str(self._tmpdir)) + self.JUJU_CHARM_DIR = self._tmpdir / 'test_main' + self.hooks_dir = self.JUJU_CHARM_DIR / 'hooks' + charm_path = str(self.JUJU_CHARM_DIR / 'src/charm.py') + self.charm_exec_path = os.path.relpath(charm_path, + str(self.hooks_dir)) + shutil.copytree(str(TEST_CHARM_DIR), str(self.JUJU_CHARM_DIR)) + + charm_spec = importlib.util.spec_from_file_location("charm", charm_path) + self.charm_module = importlib.util.module_from_spec(charm_spec) + charm_spec.loader.exec_module(self.charm_module) + + self._prepare_initial_hooks() + + def _prepare_initial_hooks(self): + initial_hooks = ('install', 'start', 'upgrade-charm', 'disks-storage-attached') + self.hooks_dir.mkdir() + for hook in initial_hooks: + self._setup_entry_point(self.hooks_dir, hook) + + def _prepare_actions(self): + # TODO: jam 2020-06-16 this same work could be done just triggering the 'install' event + # of the charm, it might be cleaner to not set up entry points directly here. + actions_dir_name = 'actions' + actions_dir = self.JUJU_CHARM_DIR / actions_dir_name + actions_dir.mkdir() + for action_name in ('start', 'foo-bar', 'get-model-name', 'get-status'): + self._setup_entry_point(actions_dir, action_name) + + def _read_and_clear_state(self): + state = None + if self._state_file.stat().st_size: + with self._state_file.open('r+b') as state_file: + state = pickle.load(state_file) + state_file.truncate(0) + return state + + def _simulate_event(self, event_spec): + ppath = Path(__file__).parent + pypath = str(ppath.parent) + if 'PYTHONPATH' in os.environ: + pypath += ':' + os.environ['PYTHONPATH'] + env = { + 'PATH': "{}:{}".format(ppath / 'bin', os.environ['PATH']), + 'PYTHONPATH': pypath, + 'JUJU_CHARM_DIR': str(self.JUJU_CHARM_DIR), + 'JUJU_UNIT_NAME': 'test_main/0', + 'CHARM_CONFIG': event_spec.charm_config, + } + if issubclass(event_spec.event_type, RelationEvent): + rel_name = event_spec.event_name.split('_')[0] + env.update({ + 'JUJU_RELATION': rel_name, + 'JUJU_RELATION_ID': str(event_spec.relation_id), + }) + remote_app = event_spec.remote_app + # For juju < 2.7 app name is extracted from JUJU_REMOTE_UNIT. + if remote_app is not None: + env['JUJU_REMOTE_APP'] = remote_app + + remote_unit = event_spec.remote_unit + if remote_unit is None: + remote_unit = '' + + env['JUJU_REMOTE_UNIT'] = remote_unit + else: + env.update({ + 'JUJU_REMOTE_UNIT': '', + 'JUJU_REMOTE_APP': '', + }) + if issubclass(event_spec.event_type, ActionEvent): + event_filename = event_spec.event_name[:-len('_action')].replace('_', '-') + env.update({ + event_spec.env_var: event_filename, + }) + if event_spec.env_var == 'JUJU_ACTION_NAME': + event_dir = 'actions' + else: + raise RuntimeError('invalid envar name specified for a action event') + else: + event_filename = event_spec.event_name.replace('_', '-') + event_dir = 'hooks' + if event_spec.model_name is not None: + env['JUJU_MODEL_NAME'] = event_spec.model_name + + self._call_event(Path(event_dir, event_filename), env) + return self._read_and_clear_state() + + def test_event_reemitted(self): + # base64 encoding is used to avoid null bytes. + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # First run "install" to make sure all hooks are set up. + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + state = self._simulate_event(EventSpec(ConfigChangedEvent, 'config-changed', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent]) + + # Re-emit should pick the deferred config-changed. + state = self._simulate_event(EventSpec(UpdateStatusEvent, 'update-status', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent, UpdateStatusEvent]) + + def test_no_reemission_on_collect_metrics(self): + # base64 encoding is used to avoid null bytes. + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + fake_script(self, 'add-metric', 'exit 0') + + # First run "install" to make sure all hooks are set up. + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + state = self._simulate_event(EventSpec(ConfigChangedEvent, 'config-changed', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent]) + + # Re-emit should not pick the deferred config-changed because + # collect-metrics runs in a restricted context. + state = self._simulate_event(EventSpec(CollectMetricsEvent, 'collect-metrics', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [CollectMetricsEvent]) + + def test_multiple_events_handled(self): + self._prepare_actions() + + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + actions_charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_ACTIONS': True, + })) + + fake_script(self, 'action-get', "echo '{}'") + + # Sample events with a different amount of dashes used + # and with endpoints from different sections of metadata.yaml + events_under_test = [( + EventSpec(InstallEvent, 'install', + charm_config=charm_config), + {}, + ), ( + EventSpec(StartEvent, 'start', + charm_config=charm_config), + {}, + ), ( + EventSpec(UpdateStatusEvent, 'update_status', + charm_config=charm_config), + {}, + ), ( + EventSpec(LeaderSettingsChangedEvent, 'leader_settings_changed', + charm_config=charm_config), + {}, + ), ( + EventSpec(RelationJoinedEvent, 'db_relation_joined', + relation_id=1, + remote_app='remote', remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'db', + 'relation_id': 1, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', + relation_id=2, + remote_app='remote', remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', + relation_id=2, + remote_app='remote', remote_unit=None, + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': None}, + ), ( + EventSpec(RelationDepartedEvent, 'mon_relation_departed', + relation_id=2, + remote_app='remote', remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationBrokenEvent, 'ha_relation_broken', + relation_id=3, + charm_config=charm_config), + {'relation_name': 'ha', + 'relation_id': 3}, + ), ( + # Events without a remote app specified (for Juju < 2.7). + EventSpec(RelationJoinedEvent, 'db_relation_joined', + relation_id=1, + remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'db', + 'relation_id': 1, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', + relation_id=2, + remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationDepartedEvent, 'mon_relation_departed', + relation_id=2, + remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(ActionEvent, 'start_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config), + {}, + ), ( + EventSpec(ActionEvent, 'foo_bar_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config), + {}, + )] + + logger.debug('Expected events %s', events_under_test) + + # First run "install" to make sure all hooks are set up. + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + + # Simulate hook executions for every event. + for event_spec, expected_event_data in events_under_test: + state = self._simulate_event(event_spec) + + state_key = 'on_' + event_spec.event_name + handled_events = state.get(state_key, []) + + # Make sure that a handler for that event was called once. + self.assertEqual(len(handled_events), 1) + # Make sure the event handled by the Charm has the right type. + handled_event_type = handled_events[0] + self.assertEqual(handled_event_type, event_spec.event_type) + + self.assertEqual(state['observed_event_types'], [event_spec.event_type]) + + if event_spec.event_name in expected_event_data: + self.assertEqual(state[event_spec.event_name + '_data'], + expected_event_data[event_spec.event_name]) + + def test_event_not_implemented(self): + """Make sure events without implementation do not cause non-zero exit. + """ + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # Simulate a scenario where there is a symlink for an event that + # a charm does not know how to handle. + hook_path = self.JUJU_CHARM_DIR / 'hooks/not-implemented-event' + # This will be cleared up in tearDown. + hook_path.symlink_to('install') + + try: + self._simulate_event(EventSpec(HookEvent, 'not-implemented-event', + charm_config=charm_config)) + except subprocess.CalledProcessError: + self.fail('Event simulation for an unsupported event' + ' results in a non-zero exit code returned') + + def test_collect_metrics(self): + indicator_file = self.JUJU_CHARM_DIR / 'indicator' + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'INDICATOR_FILE': indicator_file + })) + fake_script(self, 'add-metric', 'exit 0') + fake_script(self, 'juju-log', 'exit 0') + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + # Clear the calls during 'install' + fake_script_calls(self, clear=True) + self._simulate_event(EventSpec(CollectMetricsEvent, 'collect_metrics', + charm_config=charm_config)) + + expected = [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event collect_metrics.'], + ['add-metric', '--labels', 'bar=4.2', 'foo=42'], + ] + calls = fake_script_calls(self) + + if self.has_dispatch: + expected.insert(1, [ + 'juju-log', '--log-level', 'DEBUG', + 'Legacy hooks/collect-metrics does not exist.']) + + self.assertEqual(calls, expected) + + def test_logger(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_LOG_ACTIONS': True, + })) + fake_script(self, 'action-get', "echo '{}'") + + test_cases = [( + EventSpec(ActionEvent, 'log_critical_action', env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'CRITICAL', 'super critical'], + ), ( + EventSpec(ActionEvent, 'log_error_action', + env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'ERROR', 'grave error'], + ), ( + EventSpec(ActionEvent, 'log_warning_action', + env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'WARNING', 'wise warning'], + ), ( + EventSpec(ActionEvent, 'log_info_action', + env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'INFO', 'useful info'], + )] + + # Set up action symlinks. + self._simulate_event(EventSpec(InstallEvent, 'install', + charm_config=charm_config)) + + for event_spec, calls in test_cases: + self._simulate_event(event_spec) + self.assertIn(calls, fake_script_calls(self, clear=True)) + + def test_excepthook(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'TRY_EXCEPTHOOK': True, + })) + with self.assertRaises(subprocess.CalledProcessError): + self._simulate_event(EventSpec(InstallEvent, 'install', + charm_config=charm_config)) + + calls = [' '.join(i) for i in fake_script_calls(self)] + + self.assertEqual(calls.pop(0), ' '.join(VERSION_LOGLINE)) + + if self.has_dispatch: + self.assertEqual( + calls.pop(0), + 'juju-log --log-level DEBUG Legacy hooks/install does not exist.') + + self.maxDiff = None + self.assertRegex( + calls[0], + '(?ms)juju-log --log-level ERROR Uncaught exception while in charm code:\n' + 'Traceback .most recent call last.:\n' + ' .*' + ' raise RuntimeError."failing as requested".\n' + 'RuntimeError: failing as requested' + ) + self.assertEqual(len(calls), 1, "expected 1 call, but got extra: {}".format(calls[1:])) + + def test_sets_model_name(self): + self._prepare_actions() + + actions_charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_ACTIONS': True, + })) + + fake_script(self, 'action-get', "echo '{}'") + state = self._simulate_event(EventSpec( + ActionEvent, 'get_model_name_action', + env_var='JUJU_ACTION_NAME', + model_name='test-model-name', + charm_config=actions_charm_config)) + self.assertIsNotNone(state) + self.assertEqual(state['_on_get_model_name_action'], ['test-model-name']) + + def test_has_valid_status(self): + self._prepare_actions() + + actions_charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_ACTIONS': True, + })) + + fake_script(self, 'action-get', "echo '{}'") + fake_script(self, 'status-get', """echo '{"status": "unknown", "message": ""}'""") + state = self._simulate_event(EventSpec( + ActionEvent, 'get_status_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config)) + self.assertIsNotNone(state) + self.assertEqual(state['status_name'], 'unknown') + self.assertEqual(state['status_message'], '') + fake_script( + self, 'status-get', """echo '{"status": "blocked", "message": "help meeee"}'""") + state = self._simulate_event(EventSpec( + ActionEvent, 'get_status_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config)) + self.assertIsNotNone(state) + self.assertEqual(state['status_name'], 'blocked') + self.assertEqual(state['status_message'], 'help meeee') + + def test_foo(self): + # base64 encoding is used to avoid null bytes. + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # First run "install" to make sure all hooks are set up. + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + +class TestMainWithNoDispatch(_TestMain, unittest.TestCase): + has_dispatch = False + hooks_are_symlinks = True + + def _setup_entry_point(self, directory, entry_point): + path = directory / entry_point + path.symlink_to(self.charm_exec_path) + + def _call_event(self, rel_path, env): + event_file = self.JUJU_CHARM_DIR / rel_path + # Note that sys.executable is used to make sure we are using the same + # interpreter for the child process to support virtual environments. + subprocess.run( + [sys.executable, str(event_file)], + check=True, env=env, cwd=str(self.JUJU_CHARM_DIR)) + + def test_setup_event_links(self): + """Test auto-creation of symlinks caused by initial events. + """ + all_event_hooks = ['hooks/' + e.replace("_", "-") + for e in self.charm_module.Charm.on.events().keys()] + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + initial_events = { + EventSpec(InstallEvent, 'install', charm_config=charm_config), + EventSpec(StorageAttachedEvent, 'disks-storage-attached', charm_config=charm_config), + EventSpec(StartEvent, 'start', charm_config=charm_config), + EventSpec(UpgradeCharmEvent, 'upgrade-charm', charm_config=charm_config), + } + + def _assess_event_links(event_spec): + self.assertTrue(self.hooks_dir / event_spec.event_name in self.hooks_dir.iterdir()) + for event_hook in all_event_hooks: + hook_path = self.JUJU_CHARM_DIR / event_hook + self.assertTrue(hook_path.exists(), 'Missing hook: ' + event_hook) + if self.hooks_are_symlinks: + self.assertEqual(os.readlink(str(hook_path)), self.charm_exec_path) + + for initial_event in initial_events: + self._setup_charm_dir() + + self._simulate_event(initial_event) + _assess_event_links(initial_event) + # Make sure it is idempotent. + self._simulate_event(initial_event) + _assess_event_links(initial_event) + + def test_setup_action_links(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + # foo-bar is one of the actions defined in actions.yaml + action_hook = self.JUJU_CHARM_DIR / 'actions' / 'foo-bar' + self.assertTrue(action_hook.exists()) + + +class TestMainWithNoDispatchButJujuIsDispatchAware(TestMainWithNoDispatch): + def _call_event(self, rel_path, env): + env["JUJU_DISPATCH_PATH"] = str(rel_path) + super()._call_event(rel_path, env) + + +class TestMainWithNoDispatchButScriptsAreCopies(TestMainWithNoDispatch): + hooks_are_symlinks = False + + def _setup_entry_point(self, directory, entry_point): + charm_path = str(self.JUJU_CHARM_DIR / 'src/charm.py') + path = directory / entry_point + shutil.copy(charm_path, str(path)) + + +class TestMainWithDispatch(_TestMain, unittest.TestCase): + has_dispatch = True + + def _setup_entry_point(self, directory, entry_point): + path = self.JUJU_CHARM_DIR / 'dispatch' + if not path.exists(): + path.symlink_to('src/charm.py') + + def _call_event(self, rel_path, env): + env["JUJU_DISPATCH_PATH"] = str(rel_path) + dispatch = self.JUJU_CHARM_DIR / 'dispatch' + subprocess.run( + [sys.executable, str(dispatch)], + stdout=self.stdout, + stderr=self.stderr, + check=True, env=env, cwd=str(self.JUJU_CHARM_DIR)) + + def test_setup_event_links(self): + """Test auto-creation of symlinks caused by initial events does _not_ happen when using dispatch. + """ + all_event_hooks = ['hooks/' + e.replace("_", "-") + for e in self.charm_module.Charm.on.events().keys()] + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + initial_events = { + EventSpec(InstallEvent, 'install', charm_config=charm_config), + EventSpec(StorageAttachedEvent, 'disks-storage-attached', charm_config=charm_config), + EventSpec(StartEvent, 'start', charm_config=charm_config), + EventSpec(UpgradeCharmEvent, 'upgrade-charm', charm_config=charm_config), + } + + def _assess_event_links(event_spec): + self.assertNotIn(self.hooks_dir / event_spec.event_name, self.hooks_dir.iterdir()) + for event_hook in all_event_hooks: + self.assertFalse((self.JUJU_CHARM_DIR / event_hook).exists(), + 'Spurious hook: ' + event_hook) + + for initial_event in initial_events: + self._setup_charm_dir() + + self._simulate_event(initial_event) + _assess_event_links(initial_event) + + def test_hook_and_dispatch(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + old_path = self.fake_script_path + self.fake_script_path = self.hooks_dir + fake_script(self, 'install', 'exit 0') + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + + # the script was called, *and*, the .on. was called + self.assertEqual(fake_script_calls(self), [['install', '']]) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + self.fake_script_path = old_path + self.assertEqual(fake_script_calls(self), [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'INFO', 'Running legacy hooks/install.'], + ['juju-log', '--log-level', 'DEBUG', 'Legacy hooks/install exited with status 0.'], + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event install.'], + ]) + + def test_non_executable_hook_and_dispatch(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + (self.hooks_dir / "install").write_text("") + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + self.assertEqual(fake_script_calls(self), [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'WARNING', + 'Legacy hooks/install exists but is not executable.'], + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event install.'], + ]) + + def test_hook_and_dispatch_with_failing_hook(self): + self.stdout = self.stderr = tempfile.TemporaryFile() + self.addCleanup(self.stdout.close) + + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + old_path = self.fake_script_path + self.fake_script_path = self.hooks_dir + fake_script(self, 'install', 'exit 42') + event = EventSpec(InstallEvent, 'install', charm_config=charm_config) + with self.assertRaises(subprocess.CalledProcessError): + self._simulate_event(event) + self.fake_script_path = old_path + + self.stdout.seek(0) + self.assertEqual(self.stdout.read(), b'') + calls = fake_script_calls(self) + expected = [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'INFO', 'Running legacy hooks/install.'], + ['juju-log', '--log-level', 'WARNING', 'Legacy hooks/install exited with status 42.'], + ] + self.assertEqual(calls, expected) + + def test_hook_and_dispatch_but_hook_is_dispatch(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + event = EventSpec(InstallEvent, 'install', charm_config=charm_config) + hook_path = self.hooks_dir / 'install' + for ((rel, ind), path) in { + # relative and indirect + (True, True): Path('../dispatch'), + # relative and direct + (True, False): Path(self.charm_exec_path), + # absolute and direct + (False, False): (self.hooks_dir / self.charm_exec_path).resolve(), + # absolute and indirect + (False, True): self.JUJU_CHARM_DIR / 'dispatch', + }.items(): + with self.subTest(path=path, rel=rel, ind=ind): + # sanity check + self.assertEqual(path.is_absolute(), not rel) + self.assertEqual(path.name == 'dispatch', ind) + try: + hook_path.symlink_to(path) + + state = self._simulate_event(event) + + # the .on. was only called once + self.assertEqual(state['observed_event_types'], [InstallEvent]) + self.assertEqual(state['on_install'], [InstallEvent]) + finally: + hook_path.unlink() + + def test_hook_and_dispatch_but_hook_is_dispatch_copy(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + hook_path = self.hooks_dir / 'install' + path = (self.hooks_dir / self.charm_exec_path).resolve() + shutil.copy(str(path), str(hook_path)) + fake_script(self, 'juju-log', 'exit 0') + + event = EventSpec(InstallEvent, 'install', charm_config=charm_config) + state = self._simulate_event(event) + + # the .on. was only called once + self.assertEqual(state['observed_event_types'], [InstallEvent]) + self.assertEqual(state['on_install'], [InstallEvent]) + self.assertEqual(fake_script_calls(self), [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'INFO', 'Running legacy hooks/install.'], + VERSION_LOGLINE, # because it called itself + ['juju-log', '--log-level', 'DEBUG', 'Charm called itself via hooks/install.'], + ['juju-log', '--log-level', 'DEBUG', 'Legacy hooks/install exited with status 0.'], + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event install.'], + ]) + + +class TestMainWithDispatchAsScript(TestMainWithDispatch): + """Here dispatch is a script that execs the charm.py instead of a symlink. + """ + + has_dispatch = True + + def _setup_entry_point(self, directory, entry_point): + path = self.JUJU_CHARM_DIR / 'dispatch' + if not path.exists(): + path.write_text('#!/bin/sh\nexec "{}" "{}"\n'.format( + sys.executable, + self.JUJU_CHARM_DIR / 'src/charm.py')) + path.chmod(0o755) + + def _call_event(self, rel_path, env): + env["JUJU_DISPATCH_PATH"] = str(rel_path) + dispatch = self.JUJU_CHARM_DIR / 'dispatch' + subprocess.check_call([str(dispatch)], + env=env, cwd=str(self.JUJU_CHARM_DIR)) diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_model.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_model.py new file mode 100755 index 0000000000000000000000000000000000000000..8415efa76a3b85a1bfe3ef20497ca00987c1d6a2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_model.py @@ -0,0 +1,1329 @@ +#!/usr/bin/python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import json +import ipaddress +import os +import pathlib +from textwrap import dedent +import unittest + +import ops.model +import ops.charm +import ops.testing +from ops.charm import RelationMeta, RelationRole + +from test.test_helpers import fake_script, fake_script_calls + + +class TestModel(unittest.TestCase): + + def setUp(self): + self.harness = ops.testing.Harness(ops.charm.CharmBase, meta=''' + name: myapp + provides: + db0: + interface: db0 + requires: + db1: + interface: db1 + peers: + db2: + interface: db2 + ''') + self.relation_id_db0 = self.harness.add_relation('db0', 'db') + self.model = self.harness.model + + def test_model_attributes(self): + self.assertIs(self.model.app, self.model.unit.app) + self.assertIsNone(self.model.name) + + def test_model_name_from_backend(self): + self.harness.set_model_name('default') + m = ops.model.Model(ops.charm.CharmMeta(), self.harness._backend) + self.assertEqual(m.name, 'default') + with self.assertRaises(AttributeError): + m.name = "changes-disallowed" + + def test_relations_keys(self): + rel_app1 = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(rel_app1, 'remoteapp1/0') + self.harness.add_relation_unit(rel_app1, 'remoteapp1/1') + rel_app2 = self.harness.add_relation('db1', 'remoteapp2') + self.harness.add_relation_unit(rel_app2, 'remoteapp2/0') + + # We invalidate db1 so that it causes us to reload it + self.model.relations._invalidate('db1') + self.resetBackendCalls() + for relation in self.model.relations['db1']: + self.assertIn(self.model.unit, relation.data) + unit_from_rel = next(filter(lambda u: u.name == 'myapp/0', relation.data.keys())) + self.assertIs(self.model.unit, unit_from_rel) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', rel_app1), + ('relation_list', rel_app2), + ]) + + def test_get_relation(self): + # one relation on db1 + # two relations on db0 + # no relations on db2 + relation_id_db1 = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id_db1, 'remoteapp1/0') + relation_id_db0_b = self.harness.add_relation('db0', 'another') + self.resetBackendCalls() + + with self.assertRaises(ops.model.ModelError): + # You have to specify it by just the integer ID + self.model.get_relation('db1', 'db1:{}'.format(relation_id_db1)) + rel_db1 = self.model.get_relation('db1', relation_id_db1) + self.assertIsInstance(rel_db1, ops.model.Relation) + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id_db1), + ]) + dead_rel = self.model.get_relation('db1', 7) + self.assertIsInstance(dead_rel, ops.model.Relation) + self.assertEqual(set(dead_rel.data.keys()), {self.model.unit, self.model.unit.app}) + self.assertEqual(dead_rel.data[self.model.unit], {}) + self.assertBackendCalls([ + ('relation_list', 7), + ('relation_get', 7, 'myapp/0', False), + ]) + + self.assertIsNone(self.model.get_relation('db2')) + self.assertBackendCalls([ + ('relation_ids', 'db2'), + ]) + self.assertIs(self.model.get_relation('db1'), rel_db1) + with self.assertRaises(ops.model.TooManyRelatedAppsError): + self.model.get_relation('db0') + + self.assertBackendCalls([ + ('relation_ids', 'db0'), + ('relation_list', self.relation_id_db0), + ('relation_list', relation_id_db0_b), + ]) + + def test_peer_relation_app(self): + self.harness.add_relation('db2', 'myapp') + rel_dbpeer = self.model.get_relation('db2') + self.assertIs(rel_dbpeer.app, self.model.app) + + def test_remote_units_is_our(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + self.resetBackendCalls() + + for u in self.model.get_relation('db1').units: + self.assertFalse(u._is_our_unit) + self.assertFalse(u.app._is_our_app) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id) + ]) + + def test_our_unit_is_our(self): + self.assertTrue(self.model.unit._is_our_unit) + self.assertTrue(self.model.unit.app._is_our_app) + + def test_unit_relation_data(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.update_relation_data( + relation_id, + 'remoteapp1/0', + {'host': 'remoteapp1-0'}) + self.model.relations._invalidate('db1') + self.resetBackendCalls() + + random_unit = self.model.get_unit('randomunit/0') + with self.assertRaises(KeyError): + self.model.get_relation('db1').data[random_unit] + remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', + self.model.get_relation('db1').units)) + self.assertEqual(self.model.get_relation('db1').data[remoteapp1_0], + {'host': 'remoteapp1-0'}) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1/0', False), + ]) + + def test_remote_app_relation_data(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + # Try to get relation data for an invalid remote application. + random_app = self.model._cache.get(ops.model.Application, 'randomapp') + with self.assertRaises(KeyError): + rel_db1.data[random_app] + + remoteapp1 = rel_db1.app + self.assertEqual(remoteapp1.name, 'remoteapp1') + self.assertEqual(rel_db1.data[remoteapp1], + {'secret': 'cafedeadbeef'}) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1', True), + ]) + + def test_relation_data_modify_remote(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.update_relation_data(relation_id, 'remoteapp1/0', {'host': 'remoteapp1/0'}) + self.model.relations._invalidate('db1') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', + self.model.get_relation('db1').units)) + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[remoteapp1_0]) + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[remoteapp1_0]['foo'] = 'bar' + self.assertNotIn('foo', rel_db1.data[remoteapp1_0]) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1/0', False), + ]) + + def test_relation_data_modify_our(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'nothing'}) + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + rel_db1.data[self.model.unit]['host'] = 'bar' + self.assertEqual(rel_db1.data[self.model.unit]['host'], 'bar') + + self.assertBackendCalls([ + ('relation_get', relation_id, 'myapp/0', False), + ('relation_set', relation_id, 'host', 'bar', False), + ]) + + def test_app_relation_data_modify_local_as_leader(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp', {'password': 'deadbeefcafe'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.set_leader(True) + self.resetBackendCalls() + + local_app = self.model.unit.app + + rel_db1 = self.model.get_relation('db1') + self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'}) + + rel_db1.data[local_app]['password'] = 'foo' + + self.assertEqual(rel_db1.data[local_app]['password'], 'foo') + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp', True), + ('is_leader',), + ('relation_set', relation_id, 'password', 'foo', True), + ]) + + def test_app_relation_data_modify_local_as_minion(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp', {'password': 'deadbeefcafe'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.set_leader(False) + self.resetBackendCalls() + + local_app = self.model.unit.app + + rel_db1 = self.model.get_relation('db1') + self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'}) + + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[local_app]['password'] = 'foobar' + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp', True), + ('is_leader',), + ]) + + def test_relation_data_del_key(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'bar'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + del rel_db1.data[self.model.unit]['host'] + self.assertNotIn('host', rel_db1.data[self.model.unit]) + self.assertEqual({}, self.harness.get_relation_data(relation_id, 'myapp/0')) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ('relation_set', relation_id, 'host', '', False), + ]) + + def test_relation_set_fail(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'myapp-0'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.resetBackendCalls() + + backend = self.harness._backend + # TODO: jam 2020-03-06 This is way too much information about relation_set + # The original test forced 'relation-set' to return exit code 2, + # but there was nothing illegal about the data that was being set, + # for us to properly test the side effects of relation-set failing. + + def broken_relation_set(relation_id, key, value, is_app): + backend._calls.append(('relation_set', relation_id, key, value, is_app)) + raise ops.model.ModelError() + backend.relation_set = broken_relation_set + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + with self.assertRaises(ops.model.ModelError): + rel_db1.data[self.model.unit]['host'] = 'bar' + self.assertEqual(rel_db1.data[self.model.unit]['host'], 'myapp-0') + with self.assertRaises(ops.model.ModelError): + del rel_db1.data[self.model.unit]['host'] + self.assertIn('host', rel_db1.data[self.model.unit]) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ('relation_set', relation_id, 'host', 'bar', False), + ('relation_set', relation_id, 'host', '', False), + ]) + + def test_relation_data_type_check(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'myapp-0'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = 1 + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = {'foo': 'bar'} + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = None + # No data has actually been changed + self.assertEqual(dict(rel_db1.data[self.model.unit]), {'host': 'myapp-0'}) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ]) + + def test_config(self): + self.harness.update_config({'foo': 'foo', 'bar': 1, 'qux': True}) + self.assertEqual(self.model.config, { + 'foo': 'foo', + 'bar': 1, + 'qux': True, + }) + with self.assertRaises(TypeError): + # Confirm that we cannot modify config values. + self.model.config['foo'] = 'bar' + + self.assertBackendCalls([('config_get',)]) + + def test_is_leader(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.set_leader(True) + self.resetBackendCalls() + + def check_remote_units(): + # Cannot determine leadership for remote units. + for u in self.model.get_relation('db1').units: + with self.assertRaises(RuntimeError): + u.is_leader() + + self.assertTrue(self.model.unit.is_leader()) + + check_remote_units() + + # Create a new model and backend to drop a cached is-leader output. + self.harness.set_leader(False) + self.assertFalse(self.model.unit.is_leader()) + + check_remote_units() + + self.assertBackendCalls([ + ('is_leader',), + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('is_leader',), + ]) + + def test_workload_version(self): + self.model.unit.set_workload_version('1.2.3') + self.assertBackendCalls([ + ('application_version_set', '1.2.3'), + ]) + + def test_workload_version_invalid(self): + with self.assertRaises(TypeError) as cm: + self.model.unit.set_workload_version(5) + self.assertEqual(str(cm.exception), "workload version must be a str, not int: 5") + self.assertBackendCalls([]) + + def test_resources(self): + # TODO: (jam) 2020-05-07 Harness doesn't yet support resource-get issue #262 + meta = ops.charm.CharmMeta() + meta.resources = {'foo': None, 'bar': None} + model = ops.model.Model(meta, ops.model._ModelBackend('myapp/0')) + + with self.assertRaises(RuntimeError): + model.resources.fetch('qux') + + fake_script(self, 'resource-get', 'exit 1') + with self.assertRaises(ops.model.ModelError): + model.resources.fetch('foo') + + fake_script(self, 'resource-get', + 'echo /var/lib/juju/agents/unit-test-0/resources/$1/$1.tgz') + self.assertEqual(model.resources.fetch('foo').name, 'foo.tgz') + self.assertEqual(model.resources.fetch('bar').name, 'bar.tgz') + + def test_pod_spec(self): + # TODO: (jam) 2020-05-07 Harness doesn't yet expose pod-spec-set issue #261 + meta = ops.charm.CharmMeta.from_yaml(''' + name: myapp + ''') + model = ops.model.Model(meta, ops.model._ModelBackend('myapp/0')) + fake_script(self, 'pod-spec-set', """ + cat $2 > $(dirname $0)/spec.json + [ -n "$4" ] && cat "$4" > $(dirname $0)/k8s_res.json || true + """) + fake_script(self, 'is-leader', 'echo true') + spec_path = self.fake_script_path / 'spec.json' + k8s_res_path = self.fake_script_path / 'k8s_res.json' + + def check_calls(calls): + # There may 1 or 2 calls because of is-leader. + self.assertLessEqual(len(fake_calls), 2) + pod_spec_call = next(filter(lambda c: c[0] == 'pod-spec-set', calls)) + self.assertEqual(pod_spec_call[:2], ['pod-spec-set', '--file']) + + # 8 bytes are used as of python 3.4.0, see Python bug #12015. + # Other characters are from POSIX 3.282 (Portable Filename + # Character Set) a subset of which Python's mkdtemp uses. + self.assertRegex(pod_spec_call[2], '.*/tmp[A-Za-z0-9._-]{8}-pod-spec-set') + + model.pod.set_spec({'foo': 'bar'}) + self.assertEqual(spec_path.read_text(), '{"foo": "bar"}') + self.assertFalse(k8s_res_path.exists()) + + fake_calls = fake_script_calls(self, clear=True) + check_calls(fake_calls) + + model.pod.set_spec({'bar': 'foo'}, {'qux': 'baz'}) + self.assertEqual(spec_path.read_text(), '{"bar": "foo"}') + self.assertEqual(k8s_res_path.read_text(), '{"qux": "baz"}') + + fake_calls = fake_script_calls(self, clear=True) + check_calls(fake_calls) + + # Create a new model to drop is-leader caching result. + self.backend = ops.model._ModelBackend('myapp/0') + meta = ops.charm.CharmMeta() + model = ops.model.Model(meta, self.backend) + fake_script(self, 'is-leader', 'echo false') + with self.assertRaises(ops.model.ModelError): + model.pod.set_spec({'foo': 'bar'}) + + def test_base_status_instance_raises(self): + with self.assertRaises(TypeError): + ops.model.StatusBase('test') + + class NoNameStatus(ops.model.StatusBase): + pass + + with self.assertRaises(AttributeError): + ops.model.StatusBase.register_status(NoNameStatus) + + def test_status_repr(self): + test_cases = { + "ActiveStatus('Seashell')": ops.model.ActiveStatus('Seashell'), + "MaintenanceStatus('Red')": ops.model.MaintenanceStatus('Red'), + "BlockedStatus('Magenta')": ops.model.BlockedStatus('Magenta'), + "WaitingStatus('Thistle')": ops.model.WaitingStatus('Thistle'), + 'UnknownStatus()': ops.model.UnknownStatus(), + } + for expected, status in test_cases.items(): + self.assertEqual(repr(status), expected) + + def test_status_eq(self): + status_types = [ + ops.model.ActiveStatus, + ops.model.MaintenanceStatus, + ops.model.BlockedStatus, + ops.model.WaitingStatus, + ] + + self.assertEqual(ops.model.UnknownStatus(), ops.model.UnknownStatus()) + for (i, t1) in enumerate(status_types): + self.assertNotEqual(t1(''), ops.model.UnknownStatus()) + for (j, t2) in enumerate(status_types): + self.assertNotEqual(t1('one'), t2('two')) + if i == j: + self.assertEqual(t1('one'), t2('one')) + else: + self.assertNotEqual(t1('one'), t2('one')) + + def test_active_message_default(self): + self.assertEqual(ops.model.ActiveStatus().message, '') + + def test_local_set_valid_unit_status(self): + test_cases = [( + 'active', + ops.model.ActiveStatus('Green'), + ('status_set', 'active', 'Green', {'is_app': False}), + ), ( + 'maintenance', + ops.model.MaintenanceStatus('Yellow'), + ('status_set', 'maintenance', 'Yellow', {'is_app': False}), + ), ( + 'blocked', + ops.model.BlockedStatus('Red'), + ('status_set', 'blocked', 'Red', {'is_app': False}), + ), ( + 'waiting', + ops.model.WaitingStatus('White'), + ('status_set', 'waiting', 'White', {'is_app': False}), + )] + + for test_case, target_status, backend_call in test_cases: + with self.subTest(test_case): + self.model.unit.status = target_status + self.assertEqual(self.model.unit.status, target_status) + self.model.unit._invalidate() + self.assertEqual(self.model.unit.status, target_status) + self.assertBackendCalls([backend_call, ('status_get', {'is_app': False})]) + + def test_local_set_valid_app_status(self): + self.harness.set_leader(True) + test_cases = [( + 'active', + ops.model.ActiveStatus('Green'), + ('status_set', 'active', 'Green', {'is_app': True}), + ), ( + 'maintenance', + ops.model.MaintenanceStatus('Yellow'), + ('status_set', 'maintenance', 'Yellow', {'is_app': True}), + ), ( + 'blocked', + ops.model.BlockedStatus('Red'), + ('status_set', 'blocked', 'Red', {'is_app': True}), + ), ( + 'waiting', + ops.model.WaitingStatus('White'), + ('status_set', 'waiting', 'White', {'is_app': True}), + )] + + for test_case, target_status, backend_call in test_cases: + with self.subTest(test_case): + self.model.app.status = target_status + self.assertEqual(self.model.app.status, target_status) + self.model.app._invalidate() + self.assertEqual(self.model.app.status, target_status) + # There is a backend call to check if we can set the value, + # and then another check each time we assert the status above + expected_calls = [ + ('is_leader',), backend_call, + ('is_leader',), + ('is_leader',), ('status_get', {'is_app': True}), + ] + self.assertBackendCalls(expected_calls) + + def test_set_app_status_non_leader_raises(self): + self.harness.set_leader(False) + with self.assertRaises(RuntimeError): + self.model.app.status + + with self.assertRaises(RuntimeError): + self.model.app.status = ops.model.ActiveStatus() + + def test_set_unit_status_invalid(self): + with self.assertRaises(ops.model.InvalidStatusError): + self.model.unit.status = 'blocked' + + def test_set_app_status_invalid(self): + with self.assertRaises(ops.model.InvalidStatusError): + self.model.app.status = 'blocked' + + def test_remote_unit_status(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + remote_unit = next(filter(lambda u: u.name == 'remoteapp1/0', + self.model.get_relation('db1').units)) + self.resetBackendCalls() + + # Remote unit status is always unknown. + self.assertEqual(remote_unit.status, ops.model.UnknownStatus()) + + test_statuses = ( + ops.model.UnknownStatus(), + ops.model.ActiveStatus('Green'), + ops.model.MaintenanceStatus('Yellow'), + ops.model.BlockedStatus('Red'), + ops.model.WaitingStatus('White'), + ) + + for target_status in test_statuses: + with self.subTest(target_status.name): + with self.assertRaises(RuntimeError): + remote_unit.status = target_status + self.assertBackendCalls([]) + + def test_remote_app_status(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + remoteapp1 = self.model.get_relation('db1').app + self.resetBackendCalls() + + # Remote application status is always unknown. + self.assertIsInstance(remoteapp1.status, ops.model.UnknownStatus) + + test_statuses = ( + ops.model.UnknownStatus(), + ops.model.ActiveStatus(), + ops.model.MaintenanceStatus('Upgrading software'), + ops.model.BlockedStatus('Awaiting manual resolution'), + ops.model.WaitingStatus('Awaiting related app updates'), + ) + for target_status in test_statuses: + with self.subTest(target_status.name): + with self.assertRaises(RuntimeError): + remoteapp1.status = target_status + self.assertBackendCalls([]) + + def test_storage(self): + # TODO: (jam) 2020-05-07 Harness doesn't yet expose storage-get issue #263 + meta = ops.charm.CharmMeta() + meta.storages = {'disks': None, 'data': None} + model = ops.model.Model(meta, ops.model._ModelBackend('myapp/0')) + + fake_script(self, 'storage-list', ''' + if [ "$1" = disks ]; then + echo '["disks/0", "disks/1"]' + else + echo '[]' + fi + ''') + fake_script(self, 'storage-get', ''' + if [ "$2" = disks/0 ]; then + echo '"/var/srv/disks/0"' + elif [ "$2" = disks/1 ]; then + echo '"/var/srv/disks/1"' + else + exit 2 + fi + ''') + fake_script(self, 'storage-add', '') + + self.assertEqual(len(model.storages), 2) + self.assertEqual(model.storages.keys(), meta.storages.keys()) + self.assertIn('disks', model.storages) + test_cases = { + 0: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/0')}, + 1: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/1')}, + } + for storage in model.storages['disks']: + self.assertEqual(storage.name, 'disks') + self.assertIn(storage.id, test_cases) + self.assertEqual(storage.name, test_cases[storage.id]['name']) + self.assertEqual(storage.location, test_cases[storage.id]['location']) + + self.assertEqual(fake_script_calls(self, clear=True), [ + ['storage-list', 'disks', '--format=json'], + ['storage-get', '-s', 'disks/0', 'location', '--format=json'], + ['storage-get', '-s', 'disks/1', 'location', '--format=json'], + ]) + + self.assertSequenceEqual(model.storages['data'], []) + model.storages.request('data', count=3) + self.assertEqual(fake_script_calls(self), [ + ['storage-list', 'data', '--format=json'], + ['storage-add', 'data=3'], + ]) + + # Try to add storage not present in charm metadata. + with self.assertRaises(ops.model.ModelError): + model.storages.request('deadbeef') + + # Invalid count parameter types. + for count_v in [None, False, 2.0, 'a', b'beef', object]: + with self.assertRaises(TypeError): + model.storages.request('data', count_v) + + def resetBackendCalls(self): + self.harness._get_backend_calls(reset=True) + + def assertBackendCalls(self, expected, *, reset=True): + self.assertEqual(expected, self.harness._get_backend_calls(reset=reset)) + + +class TestModelBindings(unittest.TestCase): + + def setUp(self): + meta = ops.charm.CharmMeta() + meta.relations = { + 'db0': RelationMeta( + RelationRole.provides, 'db0', {'interface': 'db0', 'scope': 'global'}), + 'db1': RelationMeta( + RelationRole.requires, 'db1', {'interface': 'db1', 'scope': 'global'}), + 'db2': RelationMeta( + RelationRole.peer, 'db2', {'interface': 'db2', 'scope': 'global'}), + } + self.backend = ops.model._ModelBackend('myapp/0') + self.model = ops.model.Model(meta, self.backend) + + fake_script(self, 'relation-ids', + """([ "$1" = db0 ] && echo '["db0:4"]') || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") + self.network_get_out = '''{ + "bind-addresses": [ + { + "mac-address": "de:ad:be:ef:ca:fe", + "interface-name": "lo", + "addresses": [ + { + "hostname": "", + "value": "192.0.2.2", + "cidr": "192.0.2.0/24" + }, + { + "hostname": "deadbeef.example", + "value": "dead:beef::1", + "cidr": "dead:beef::/64" + } + ] + }, + { + "mac-address": "", + "interface-name": "tun", + "addresses": [ + { + "hostname": "", + "value": "192.0.3.3", + "cidr": "" + }, + { + "hostname": "", + "value": "2001:db8::3", + "cidr": "" + }, + { + "hostname": "deadbeef.local", + "value": "fe80::1:1", + "cidr": "fe80::/64" + } + ] + } + ], + "egress-subnets": [ + "192.0.2.2/32", + "192.0.3.0/24", + "dead:beef::/64", + "2001:db8::3/128" + ], + "ingress-addresses": [ + "192.0.2.2", + "192.0.3.3", + "dead:beef::1", + "2001:db8::3" + ] +}''' + + def _check_binding_data(self, binding_name, binding): + self.assertEqual(binding.name, binding_name) + self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2')) + self.assertEqual(binding.network.ingress_address, ipaddress.ip_address('192.0.2.2')) + # /32 and /128 CIDRs are valid one-address networks for IPv{4,6}Network types respectively. + self.assertEqual(binding.network.egress_subnets, [ipaddress.ip_network('192.0.2.2/32'), + ipaddress.ip_network('192.0.3.0/24'), + ipaddress.ip_network('dead:beef::/64'), + ipaddress.ip_network('2001:db8::3/128')]) + + for (i, (name, address, subnet)) in enumerate([ + ('lo', '192.0.2.2', '192.0.2.0/24'), + ('lo', 'dead:beef::1', 'dead:beef::/64'), + ('tun', '192.0.3.3', '192.0.3.3/32'), + ('tun', '2001:db8::3', '2001:db8::3/128'), + ('tun', 'fe80::1:1', 'fe80::/64')]): + self.assertEqual(binding.network.interfaces[i].name, name) + self.assertEqual(binding.network.interfaces[i].address, ipaddress.ip_address(address)) + self.assertEqual(binding.network.interfaces[i].subnet, ipaddress.ip_network(subnet)) + + def test_invalid_keys(self): + # Basic validation for passing invalid keys. + for name in (object, 0): + with self.assertRaises(ops.model.ModelError): + self.model.get_binding(name) + + def test_dead_relations(self): + fake_script( + self, + 'network-get', + ''' + if [ "$1" = db0 ] && [ "$2" = --format=json ]; then + echo '{}' + else + echo ERROR invalid value "$2" for option -r: relation not found >&2 + exit 2 + fi + '''.format(self.network_get_out)) + # Validate the behavior for dead relations. + binding = ops.model.Binding('db0', 42, self.model._backend) + self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2')) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['network-get', 'db0', '-r', '42', '--format=json'], + ['network-get', 'db0', '--format=json'], + ]) + + def test_binding_by_relation_name(self): + fake_script(self, 'network-get', + '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out)) + binding_name = 'db0' + expected_calls = [['network-get', 'db0', '--format=json']] + + binding = self.model.get_binding(binding_name) + self._check_binding_data(binding_name, binding) + self.assertEqual(fake_script_calls(self, clear=True), expected_calls) + + def test_binding_by_relation(self): + fake_script(self, 'network-get', + '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out)) + binding_name = 'db0' + expected_calls = [ + ['relation-ids', 'db0', '--format=json'], + # The two invocations below are due to the get_relation call. + ['relation-list', '-r', '4', '--format=json'], + ['network-get', 'db0', '-r', '4', '--format=json'], + ] + binding = self.model.get_binding(self.model.get_relation(binding_name)) + self._check_binding_data(binding_name, binding) + self.assertEqual(fake_script_calls(self, clear=True), expected_calls) + + +class TestModelBackend(unittest.TestCase): + + def setUp(self): + self._backend = None + + @property + def backend(self): + if self._backend is None: + self._backend = ops.model._ModelBackend('myapp/0') + return self._backend + + def test_relation_get_set_is_app_arg(self): + # No is_app provided. + with self.assertRaises(TypeError): + self.backend.relation_set(1, 'fookey', 'barval') + + with self.assertRaises(TypeError): + self.backend.relation_get(1, 'fooentity') + + # Invalid types for is_app. + for is_app_v in [None, 1, 2.0, 'a', b'beef']: + with self.assertRaises(TypeError): + self.backend.relation_set(1, 'fookey', 'barval', is_app=is_app_v) + + with self.assertRaises(TypeError): + self.backend.relation_get(1, 'fooentity', is_app=is_app_v) + + def test_is_leader_refresh(self): + meta = ops.charm.CharmMeta.from_yaml(''' + name: myapp + ''') + model = ops.model.Model(meta, self.backend) + fake_script(self, 'is-leader', 'echo false') + self.assertFalse(model.unit.is_leader()) + + # Change the leadership status + fake_script(self, 'is-leader', 'echo true') + # If you don't force it, we don't check, so we won't see the change + self.assertFalse(model.unit.is_leader()) + # If we force a recheck, then we notice + self.backend._leader_check_time = None + self.assertTrue(model.unit.is_leader()) + + # Force a recheck without changing the leadership status. + fake_script(self, 'is-leader', 'echo true') + self.backend._leader_check_time = None + self.assertTrue(model.unit.is_leader()) + + def test_relation_tool_errors(self): + self.addCleanup(os.environ.pop, 'JUJU_VERSION', None) + os.environ['JUJU_VERSION'] = '2.8.0' + err_msg = 'ERROR invalid value "$2" for option -r: relation not found' + + test_cases = [( + lambda: fake_script(self, 'relation-list', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_list(3), + ops.model.ModelError, + [['relation-list', '-r', '3', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-list', 'echo {} >&2 ; exit 2'.format(err_msg)), + lambda: self.backend.relation_list(3), + ops.model.RelationNotFoundError, + [['relation-list', '-r', '3', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-set', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.model.ModelError, + [['relation-set', '-r', '3', 'foo=bar']], + ), ( + lambda: fake_script(self, 'relation-set', 'echo {} >&2 ; exit 2'.format(err_msg)), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.model.RelationNotFoundError, + [['relation-set', '-r', '3', 'foo=bar']], + ), ( + lambda: None, + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=True), + ops.model.RelationNotFoundError, + [['relation-set', '-r', '3', 'foo=bar', '--app']], + ), ( + lambda: fake_script(self, 'relation-get', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.model.ModelError, + [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-get', 'echo {} >&2 ; exit 2'.format(err_msg)), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.model.RelationNotFoundError, + [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], + ), ( + lambda: None, + lambda: self.backend.relation_get(3, 'remote/0', is_app=True), + ops.model.RelationNotFoundError, + [['relation-get', '-r', '3', '-', 'remote/0', '--app', '--format=json']], + )] + + for i, (do_fake, run, exception, calls) in enumerate(test_cases): + with self.subTest(i): + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_relation_get_juju_version_quirks(self): + self.addCleanup(os.environ.pop, 'JUJU_VERSION', None) + + fake_script(self, 'relation-get', '''echo '{"foo": "bar"}' ''') + + # on 2.7.0+, things proceed as expected + for v in ['2.8.0', '2.7.0']: + with self.subTest(v): + os.environ['JUJU_VERSION'] = v + rel_data = self.backend.relation_get(1, 'foo/0', is_app=True) + self.assertEqual(rel_data, {"foo": "bar"}) + calls = [' '.join(i) for i in fake_script_calls(self, clear=True)] + self.assertEqual(calls, ['relation-get -r 1 - foo/0 --app --format=json']) + + # before 2.7.0, it just fails (no --app support) + os.environ['JUJU_VERSION'] = '2.6.9' + with self.assertRaisesRegex(RuntimeError, 'not supported on Juju version 2.6.9'): + self.backend.relation_get(1, 'foo/0', is_app=True) + self.assertEqual(fake_script_calls(self), []) + + def test_relation_set_juju_version_quirks(self): + self.addCleanup(os.environ.pop, 'JUJU_VERSION', None) + + fake_script(self, 'relation-set', 'exit 0') + + # on 2.7.0+, things proceed as expected + for v in ['2.8.0', '2.7.0']: + with self.subTest(v): + os.environ['JUJU_VERSION'] = v + self.backend.relation_set(1, 'foo', 'bar', is_app=True) + calls = [' '.join(i) for i in fake_script_calls(self, clear=True)] + self.assertEqual(calls, ['relation-set -r 1 foo=bar --app']) + + # before 2.7.0, it just fails always (no --app support) + os.environ['JUJU_VERSION'] = '2.6.9' + with self.assertRaisesRegex(RuntimeError, 'not supported on Juju version 2.6.9'): + self.backend.relation_set(1, 'foo', 'bar', is_app=True) + self.assertEqual(fake_script_calls(self), []) + + def test_status_get(self): + # taken from actual Juju output + content = '{"message": "", "status": "unknown", "status-data": {}}' + fake_script(self, 'status-get', "echo '{}'".format(content)) + s = self.backend.status_get(is_app=False) + self.assertEqual(s['status'], "unknown") + self.assertEqual(s['message'], "") + # taken from actual Juju output + content = dedent(""" + { + "application-status": { + "message": "installing", + "status": "maintenance", + "status-data": {}, + "units": { + "uo/0": { + "message": "", + "status": "active", + "status-data": {} + } + } + } + } + """) + fake_script(self, 'status-get', "echo '{}'".format(content)) + s = self.backend.status_get(is_app=True) + self.assertEqual(s['status'], "maintenance") + self.assertEqual(s['message'], "installing") + self.assertEqual(fake_script_calls(self, clear=True), [ + ['status-get', '--include-data', '--application=False', '--format=json'], + ['status-get', '--include-data', '--application=True', '--format=json'], + ]) + + def test_status_is_app_forced_kwargs(self): + fake_script(self, 'status-get', 'exit 1') + fake_script(self, 'status-set', 'exit 1') + + test_cases = ( + lambda: self.backend.status_get(False), + lambda: self.backend.status_get(True), + lambda: self.backend.status_set('active', '', False), + lambda: self.backend.status_set('active', '', True), + ) + + for case in test_cases: + with self.assertRaises(TypeError): + case() + + def test_local_set_invalid_status(self): + # juju return exit code 1 if you ask to set status to 'unknown' + meta = ops.charm.CharmMeta.from_yaml(''' + name: myapp + ''') + model = ops.model.Model(meta, self.backend) + fake_script(self, 'status-set', 'exit 1') + fake_script(self, 'is-leader', 'echo true') + + with self.assertRaises(ops.model.ModelError): + model.unit.status = ops.model.UnknownStatus() + + self.assertEqual(fake_script_calls(self, True), [ + ['status-set', '--application=False', 'unknown', ''], + ]) + + with self.assertRaises(ops.model.ModelError): + model.app.status = ops.model.UnknownStatus() + + # A leadership check is needed for application status. + self.assertEqual(fake_script_calls(self, True), [ + ['is-leader', '--format=json'], + ['status-set', '--application=True', 'unknown', ''], + ]) + + def test_status_set_is_app_not_bool_raises(self): + for is_app_v in [None, 1, 2.0, 'a', b'beef', object]: + with self.assertRaises(TypeError): + self.backend.status_set(ops.model.ActiveStatus, is_app=is_app_v) + + def test_storage_tool_errors(self): + test_cases = [( + lambda: fake_script(self, 'storage-list', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_list('foobar'), + ops.model.ModelError, + [['storage-list', 'foobar', '--format=json']], + ), ( + lambda: fake_script(self, 'storage-get', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_get('foobar', 'someattr'), + ops.model.ModelError, + [['storage-get', '-s', 'foobar', 'someattr', '--format=json']], + ), ( + lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=2), + ops.model.ModelError, + [['storage-add', 'foobar=2']], + ), ( + lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=object), + TypeError, + [], + ), ( + lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=True), + TypeError, + [], + )] + for do_fake, run, exception, calls in test_cases: + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_network_get(self): + network_get_out = '''{ + "bind-addresses": [ + { + "mac-address": "", + "interface-name": "", + "addresses": [ + { + "hostname": "", + "value": "192.0.2.2", + "cidr": "" + } + ] + } + ], + "egress-subnets": [ + "192.0.2.2/32" + ], + "ingress-addresses": [ + "192.0.2.2" + ] +}''' + fake_script(self, 'network-get', + '''[ "$1" = deadbeef ] && echo '{}' || exit 1'''.format(network_get_out)) + network_info = self.backend.network_get('deadbeef') + self.assertEqual(network_info, json.loads(network_get_out)) + self.assertEqual(fake_script_calls(self, clear=True), + [['network-get', 'deadbeef', '--format=json']]) + + network_info = self.backend.network_get('deadbeef', 1) + self.assertEqual(network_info, json.loads(network_get_out)) + self.assertEqual(fake_script_calls(self, clear=True), + [['network-get', 'deadbeef', '-r', '1', '--format=json']]) + + def test_network_get_errors(self): + err_no_endpoint = 'ERROR no network config found for binding "$2"' + err_no_rel = 'ERROR invalid value "$3" for option -r: relation not found' + + test_cases = [( + lambda: fake_script(self, 'network-get', + 'echo {} >&2 ; exit 1'.format(err_no_endpoint)), + lambda: self.backend.network_get("deadbeef"), + ops.model.ModelError, + [['network-get', 'deadbeef', '--format=json']], + ), ( + lambda: fake_script(self, 'network-get', 'echo {} >&2 ; exit 2'.format(err_no_rel)), + lambda: self.backend.network_get("deadbeef", 3), + ops.model.RelationNotFoundError, + [['network-get', 'deadbeef', '-r', '3', '--format=json']], + )] + for do_fake, run, exception, calls in test_cases: + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_get_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-get', 'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_get() + calls = [['action-get', '--format=json']] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_set_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-set', 'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_set(OrderedDict([('foo', 'bar'), ('dead', 'beef cafe')])) + calls = [["action-set", "foo=bar", "dead=beef cafe"]] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_log_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-log', 'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_log('log-message') + calls = [["action-log", "log-message"]] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_get(self): + fake_script(self, 'action-get', """echo '{"foo-name": "bar", "silent": false}'""") + params = self.backend.action_get() + self.assertEqual(params['foo-name'], 'bar') + self.assertEqual(params['silent'], False) + self.assertEqual(fake_script_calls(self), [['action-get', '--format=json']]) + + def test_action_set(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-set', 'exit 0') + self.backend.action_set(OrderedDict([('x', 'dead beef'), ('y', 1)])) + self.assertEqual(fake_script_calls(self), [['action-set', 'x=dead beef', 'y=1']]) + + def test_action_fail(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-fail', 'exit 0') + self.backend.action_fail('error 42') + self.assertEqual(fake_script_calls(self), [['action-fail', 'error 42']]) + + def test_action_log(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-log', 'exit 0') + self.backend.action_log('progress: 42%') + self.assertEqual(fake_script_calls(self), [['action-log', 'progress: 42%']]) + + def test_application_version_set(self): + fake_script(self, 'application-version-set', 'exit 0') + self.backend.application_version_set('1.2b3') + self.assertEqual(fake_script_calls(self), [['application-version-set', '--', '1.2b3']]) + + def test_application_version_set_invalid(self): + fake_script(self, 'application-version-set', 'exit 0') + with self.assertRaises(TypeError): + self.backend.application_version_set(2) + with self.assertRaises(TypeError): + self.backend.application_version_set() + self.assertEqual(fake_script_calls(self), []) + + def test_juju_log(self): + fake_script(self, 'juju-log', 'exit 0') + self.backend.juju_log('WARNING', 'foo') + self.assertEqual(fake_script_calls(self, clear=True), + [['juju-log', '--log-level', 'WARNING', 'foo']]) + + with self.assertRaises(TypeError): + self.backend.juju_log('DEBUG') + self.assertEqual(fake_script_calls(self, clear=True), []) + + fake_script(self, 'juju-log', 'exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.juju_log('BAR', 'foo') + self.assertEqual(fake_script_calls(self, clear=True), + [['juju-log', '--log-level', 'BAR', 'foo']]) + + def test_valid_metrics(self): + fake_script(self, 'add-metric', 'exit 0') + test_cases = [( + OrderedDict([('foo', 42), ('b-ar', 4.5), ('ba_-z', 4.5), ('a', 1)]), + OrderedDict([('de', 'ad'), ('be', 'ef_ -')]), + [['add-metric', '--labels', 'de=ad,be=ef_ -', + 'foo=42', 'b-ar=4.5', 'ba_-z=4.5', 'a=1']] + ), ( + OrderedDict([('foo1', 0), ('b2r', 4.5)]), + OrderedDict([('d3', 'aд'), ('b33f', '3_ -')]), + [['add-metric', '--labels', 'd3=aд,b33f=3_ -', 'foo1=0', 'b2r=4.5']], + )] + for metrics, labels, expected_calls in test_cases: + self.backend.add_metrics(metrics, labels) + self.assertEqual(fake_script_calls(self, clear=True), expected_calls) + + def test_invalid_metric_names(self): + invalid_inputs = [ + ({'': 4.2}, {}), + ({'1': 4.2}, {}), + ({'1': -4.2}, {}), + ({'123': 4.2}, {}), + ({'1foo': 4.2}, {}), + ({'-foo': 4.2}, {}), + ({'_foo': 4.2}, {}), + ({'foo-': 4.2}, {}), + ({'foo_': 4.2}, {}), + ({'a-': 4.2}, {}), + ({'a_': 4.2}, {}), + ({'BAЯ': 4.2}, {}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + def test_invalid_metric_values(self): + invalid_inputs = [ + ({'a': float('+inf')}, {}), + ({'a': float('-inf')}, {}), + ({'a': float('nan')}, {}), + ({'foo': 'bar'}, {}), + ({'foo': '1O'}, {}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + def test_invalid_metric_labels(self): + invalid_inputs = [ + ({'foo': 4.2}, {'': 'baz'}), + ({'foo': 4.2}, {',bar': 'baz'}), + ({'foo': 4.2}, {'b=a=r': 'baz'}), + ({'foo': 4.2}, {'BAЯ': 'baz'}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + def test_invalid_metric_label_values(self): + invalid_inputs = [ + ({'foo': 4.2}, {'bar': ''}), + ({'foo': 4.2}, {'bar': 'b,az'}), + ({'foo': 4.2}, {'bar': 'b=az'}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + +class TestLazyMapping(unittest.TestCase): + + def test_invalidate(self): + loaded = [] + + class MyLazyMap(ops.model.LazyMapping): + def _load(self): + loaded.append(1) + return {'foo': 'bar'} + + map = MyLazyMap() + self.assertEqual(map['foo'], 'bar') + self.assertEqual(loaded, [1]) + self.assertEqual(map['foo'], 'bar') + self.assertEqual(loaded, [1]) + map._invalidate() + self.assertEqual(map['foo'], 'bar') + self.assertEqual(loaded, [1, 1]) + + +if __name__ == "__main__": + unittest.main() diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_storage.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_storage.py new file mode 100755 index 0000000000000000000000000000000000000000..3410999bd5f045cd3d65396cc5be546a3c4a94e0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_storage.py @@ -0,0 +1,412 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import gc +import io +import os +import pathlib +import sys +import tempfile +from textwrap import dedent + +import yaml + +from ops import ( + framework, + storage, +) +from test.test_helpers import ( + BaseTestCase, + fake_script, + fake_script_calls, +) + + +class StoragePermutations(abc.ABC): + + def create_framework(self) -> framework.Framework: + """Create a Framework that we can use to test the backend storage. + """ + return framework.Framework(self.create_storage(), None, None, None) + + @abc.abstractmethod + def create_storage(self) -> storage.SQLiteStorage: + """Create a Storage backend that we can interact with""" + return NotImplemented + + def test_save_and_load_snapshot(self): + f = self.create_framework() + + class Sample(framework.Object): + + def __init__(self, parent, key, content): + super().__init__(parent, key) + self.content = content + + def snapshot(self): + return {'content': self.content} + + def restore(self, snapshot): + self.__dict__.update(snapshot) + + f.register_type(Sample, None, Sample.handle_kind) + data = { + 'str': 'string', + 'bytes': b'bytes', + 'int': 1, + 'float': 3.0, + 'dict': {'a': 'b'}, + 'set': {'a', 'b'}, + 'list': [1, 2], + } + s = Sample(f, 'test', data) + handle = s.handle + f.save_snapshot(s) + del s + gc.collect() + res = f.load_snapshot(handle) + self.assertEqual(data, res.content) + + def test_emit_event(self): + f = self.create_framework() + + class Evt(framework.EventBase): + def __init__(self, handle, content): + super().__init__(handle) + self.content = content + + def snapshot(self): + return self.content + + def restore(self, content): + self.content = content + + class Events(framework.ObjectEvents): + event = framework.EventSource(Evt) + + class Sample(framework.Object): + + on = Events() + + def __init__(self, parent, key): + super().__init__(parent, key) + self.observed_content = None + self.framework.observe(self.on.event, self._on_event) + + def _on_event(self, event: Evt): + self.observed_content = event.content + + s = Sample(f, 'key') + f.register_type(Sample, None, Sample.handle_kind) + s.on.event.emit('foo') + self.assertEqual('foo', s.observed_content) + s.on.event.emit(1) + self.assertEqual(1, s.observed_content) + s.on.event.emit(None) + self.assertEqual(None, s.observed_content) + + def test_save_and_overwrite_snapshot(self): + store = self.create_storage() + store.save_snapshot('foo', {1: 2}) + self.assertEqual({1: 2}, store.load_snapshot('foo')) + store.save_snapshot('foo', {'three': 4}) + self.assertEqual({'three': 4}, store.load_snapshot('foo')) + + def test_drop_snapshot(self): + store = self.create_storage() + store.save_snapshot('foo', {1: 2}) + self.assertEqual({1: 2}, store.load_snapshot('foo')) + store.drop_snapshot('foo') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('foo') + + def test_save_snapshot_empty_string(self): + store = self.create_storage() + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('foo') + store.save_snapshot('foo', '') + self.assertEqual('', store.load_snapshot('foo')) + store.drop_snapshot('foo') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('foo') + + def test_save_snapshot_none(self): + store = self.create_storage() + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('bar') + store.save_snapshot('bar', None) + self.assertEqual(None, store.load_snapshot('bar')) + store.drop_snapshot('bar') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('bar') + + def test_save_snapshot_zero(self): + store = self.create_storage() + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('zero') + store.save_snapshot('zero', 0) + self.assertEqual(0, store.load_snapshot('zero')) + store.drop_snapshot('zero') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('zero') + + def test_save_notice(self): + store = self.create_storage() + store.save_notice('event', 'observer', 'method') + self.assertEqual( + list(store.notices('event')), + [('event', 'observer', 'method')]) + + def test_load_notices(self): + store = self.create_storage() + self.assertEqual(list(store.notices('path')), []) + + def test_save_one_load_another_notice(self): + store = self.create_storage() + store.save_notice('event', 'observer', 'method') + self.assertEqual(list(store.notices('other')), []) + + def test_save_load_drop_load_notices(self): + store = self.create_storage() + store.save_notice('event', 'observer', 'method') + store.save_notice('event', 'observer', 'method2') + self.assertEqual( + list(store.notices('event')), + [('event', 'observer', 'method'), + ('event', 'observer', 'method2'), + ]) + + +class TestSQLiteStorage(StoragePermutations, BaseTestCase): + + def create_storage(self): + return storage.SQLiteStorage(':memory:') + + +def setup_juju_backend(test_case, state_file): + """Create fake scripts for pretending to be state-set and state-get""" + template_args = { + 'executable': sys.executable, + 'pthpth': os.path.dirname(pathlib.__file__), + 'state_file': str(state_file), + } + + fake_script(test_case, 'state-set', dedent('''\ + {executable} -c ' + import sys + if "{pthpth}" not in sys.path: + sys.path.append("{pthpth}") + import sys, yaml, pathlib, pickle + assert sys.argv[1:] == ["--file", "-"] + request = yaml.load(sys.stdin, Loader=getattr(yaml, "CSafeLoader", yaml.SafeLoader)) + state_file = pathlib.Path("{state_file}") + if state_file.exists() and state_file.stat().st_size > 0: + with state_file.open("rb") as f: + state = pickle.load(f) + else: + state = {{}} + for k, v in request.items(): + state[k] = v + with state_file.open("wb") as f: + pickle.dump(state, f) + ' "$@" + ''').format(**template_args)) + + fake_script(test_case, 'state-get', dedent('''\ + {executable} -Sc ' + import sys + if "{pthpth}" not in sys.path: + sys.path.append("{pthpth}") + import sys, pathlib, pickle + assert len(sys.argv) == 2 + state_file = pathlib.Path("{state_file}") + if state_file.exists() and state_file.stat().st_size > 0: + with state_file.open("rb") as f: + state = pickle.load(f) + else: + state = {{}} + result = state.get(sys.argv[1], "\\n") + sys.stdout.write(result) + ' "$@" + ''').format(**template_args)) + + fake_script(test_case, 'state-delete', dedent('''\ + {executable} -Sc ' + import sys + if "{pthpth}" not in sys.path: + sys.path.append("{pthpth}") + import sys, pathlib, pickle + assert len(sys.argv) == 2 + state_file = pathlib.Path("{state_file}") + if state_file.exists() and state_file.stat().st_size > 0: + with state_file.open("rb") as f: + state = pickle.load(f) + else: + state = {{}} + state.pop(sys.argv[1], None) + with state_file.open("wb") as f: + pickle.dump(state, f) + ' "$@" + ''').format(**template_args)) + + +class TestJujuStorage(StoragePermutations, BaseTestCase): + + def create_storage(self): + state_file = pathlib.Path(tempfile.mkstemp(prefix='tmp-ops-test-state-')[1]) + self.addCleanup(state_file.unlink) + setup_juju_backend(self, state_file) + return storage.JujuStorage() + + +class TestSimpleLoader(BaseTestCase): + + def test_is_c_loader(self): + loader = storage._SimpleLoader(io.StringIO('')) + if getattr(yaml, 'CSafeLoader', None) is not None: + self.assertIsInstance(loader, yaml.CSafeLoader) + else: + self.assertIsInstance(loader, yaml.SafeLoader) + + def test_is_c_dumper(self): + dumper = storage._SimpleDumper(io.StringIO('')) + if getattr(yaml, 'CSafeDumper', None) is not None: + self.assertIsInstance(dumper, yaml.CSafeDumper) + else: + self.assertIsInstance(dumper, yaml.SafeDumper) + + def test_handles_tuples(self): + raw = yaml.dump((1, 'tuple'), Dumper=storage._SimpleDumper) + parsed = yaml.load(raw, Loader=storage._SimpleLoader) + self.assertEqual(parsed, (1, 'tuple')) + + def assertRefused(self, obj): + # We shouldn't allow them to be written + with self.assertRaises(yaml.representer.RepresenterError): + yaml.dump(obj, Dumper=storage._SimpleDumper) + # If they did somehow end up written, we shouldn't be able to load them + raw = yaml.dump(obj, Dumper=yaml.Dumper) + with self.assertRaises(yaml.constructor.ConstructorError): + yaml.load(raw, Loader=storage._SimpleLoader) + + def test_forbids_some_types(self): + self.assertRefused(1 + 2j) + self.assertRefused({'foo': 1 + 2j}) + self.assertRefused(frozenset(['foo', 'bar'])) + self.assertRefused(bytearray(b'foo')) + self.assertRefused(object()) + + class Foo: + pass + f = Foo() + self.assertRefused(f) + + +class TestJujuStateBackend(BaseTestCase): + + def test_is_not_available(self): + self.assertFalse(storage._JujuStorageBackend.is_available()) + + def test_is_available(self): + fake_script(self, 'state-get', 'echo ""') + self.assertTrue(storage._JujuStorageBackend.is_available()) + self.assertEqual(fake_script_calls(self, clear=True), []) + + def test_set_encodes_args(self): + t = tempfile.NamedTemporaryFile() + fake_script(self, 'state-set', dedent(""" + cat >> {} + """).format(t.name)) + backend = storage._JujuStorageBackend() + backend.set('key', {'foo': 2}) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['state-set', '--file', '-'], + ]) + t.seek(0) + content = t.read() + self.assertEqual(content.decode('utf-8'), dedent("""\ + "key": | + {foo: 2} + """)) + + def test_get(self): + fake_script(self, 'state-get', dedent(""" + echo 'foo: "bar"' + """)) + backend = storage._JujuStorageBackend() + value = backend.get('key') + self.assertEqual(value, {'foo': 'bar'}) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['state-get', 'key'], + ]) + + def test_set_and_get_complex_value(self): + t = tempfile.NamedTemporaryFile() + fake_script(self, 'state-set', dedent(""" + cat >> {} + """).format(t.name)) + backend = storage._JujuStorageBackend() + complex_val = { + 'foo': 2, + 3: [1, 2, '3'], + 'four': {2, 3}, + 'five': {'a': 2, 'b': 3.0}, + 'six': ('a', 'b'), + 'seven': b'1234', + } + backend.set('Class[foo]/_stored', complex_val) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['state-set', '--file', '-'], + ]) + t.seek(0) + content = t.read() + outer = yaml.safe_load(content) + key = 'Class[foo]/_stored' + self.assertEqual(list(outer.keys()), [key]) + inner = yaml.load(outer[key], Loader=storage._SimpleLoader) + self.assertEqual(complex_val, inner) + if sys.version_info >= (3, 6): + # In Python 3.5 dicts are not ordered by default, and PyYAML only + # iterates the dict. So we read and assert the content is valid, + # but we don't assert the serialized form. + self.assertEqual(content.decode('utf-8'), dedent("""\ + "Class[foo]/_stored": | + foo: 2 + 3: [1, 2, '3'] + four: !!set {2: null, 3: null} + five: {a: 2, b: 3.0} + six: !!python/tuple [a, b] + seven: !!binary | + MTIzNA== + """)) + # Note that the content is yaml in a string, embedded inside YAML to declare the Key: + # Value of where to store the entry. + fake_script(self, 'state-get', dedent(""" + echo "foo: 2 + 3: [1, 2, '3'] + four: !!set {2: null, 3: null} + five: {a: 2, b: 3.0} + six: !!python/tuple [a, b] + seven: !!binary | + MTIzNA== + " + """)) + out = backend.get('Class[foo]/_stored') + self.assertEqual(out, complex_val) + + # TODO: Add tests for things we don't want to support. eg, YAML that has custom types should + # be properly forbidden. + # TODO: Tests for state-set/get/delete and how they handle if you ask to delete something + # that doesn't exist, or get something that doesn't exist, etc. diff --git a/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_testing.py b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..8f9e4eb17f7a1f746834357987976343e216ee2d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/mod/operator/test/test_testing.py @@ -0,0 +1,931 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import pathlib +import shutil +import sys +import tempfile +import textwrap +import unittest +import yaml + +from ops.charm import ( + CharmBase, + RelationEvent, +) +from ops.framework import ( + Object, +) +from ops.model import ( + ActiveStatus, + MaintenanceStatus, + UnknownStatus, + ModelError, + RelationNotFoundError, +) +from ops.testing import Harness + + +class TestHarness(unittest.TestCase): + + def test_add_relation(self): + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + self.assertIsInstance(rel_id, int) + backend = harness._backend + self.assertEqual(backend.relation_ids('db'), [rel_id]) + self.assertEqual(backend.relation_list(rel_id), []) + # Make sure the initial data bags for our app and unit are empty. + self.assertEqual(backend.relation_get(rel_id, 'test-app', is_app=True), {}) + self.assertEqual(backend.relation_get(rel_id, 'test-app/0', is_app=False), {}) + + def test_add_relation_and_unit(self): + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + self.assertIsInstance(rel_id, int) + harness.add_relation_unit(rel_id, 'postgresql/0') + harness.update_relation_data(rel_id, 'postgresql/0', {'foo': 'bar'}) + backend = harness._backend + self.assertEqual(backend.relation_ids('db'), [rel_id]) + self.assertEqual(backend.relation_list(rel_id), ['postgresql/0']) + self.assertEqual( + backend.relation_get(rel_id, 'postgresql/0', is_app=False), + {'foo': 'bar'}) + + def test_add_relation_with_remote_app_data(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + remote_app = 'postgresql' + rel_id = harness.add_relation('db', remote_app) + harness.update_relation_data(rel_id, 'postgresql', {'app': 'data'}) + self.assertIsInstance(rel_id, int) + backend = harness._backend + self.assertEqual([rel_id], backend.relation_ids('db')) + self.assertEqual({'app': 'data'}, backend.relation_get(rel_id, remote_app, is_app=True)) + + def test_add_relation_with_our_initial_data(self): + + class InitialDataTester(CharmBase): + """Record the relation-changed events.""" + + def __init__(self, framework): + super().__init__(framework) + self.observed_events = [] + self.framework.observe(self.on.db_relation_changed, self._on_db_relation_changed) + + def _on_db_relation_changed(self, event): + self.observed_events.append(event) + + # language=YAML + harness = Harness(InitialDataTester, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + harness.update_relation_data(rel_id, 'test-app', {'k': 'v1'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'}) + backend = harness._backend + self.assertEqual({'k': 'v1'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + + harness.begin() + self.assertEqual({'k': 'v1'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + # Make sure no relation-changed events are emitted for our own data bags. + self.assertEqual([], harness.charm.observed_events) + + # A remote unit can still update our app relation data bag since our unit is not a leader. + harness.update_relation_data(rel_id, 'test-app', {'k': 'v2'}) + # And we get an event + self.assertEqual([], harness.charm.observed_events) + # We can also update our own relation data, even if it is a bit 'cheaty' + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'}) + # But no event happens + + # Updating our data app relation data bag and our unit data bag does not generate events. + harness.set_leader(True) + harness.update_relation_data(rel_id, 'test-app', {'k': 'v3'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'}) + self.assertEqual([], harness.charm.observed_events) + + def test_add_peer_relation_with_initial_data_leader(self): + + class InitialDataTester(CharmBase): + """Record the relation-changed events.""" + + def __init__(self, framework): + super().__init__(framework) + self.observed_events = [] + self.framework.observe(self.on.cluster_relation_changed, + self._on_cluster_relation_changed) + + def _on_cluster_relation_changed(self, event): + self.observed_events.append(event) + + # language=YAML + harness = Harness(InitialDataTester, meta=''' + name: test-app + peers: + cluster: + interface: cluster + ''') + # TODO: dmitriis 2020-04-07 test a minion unit and initial peer relation app data + # events when the harness begins to emit events for initial data. + harness.set_leader(is_leader=True) + rel_id = harness.add_relation('cluster', 'test-app') + harness.update_relation_data(rel_id, 'test-app', {'k': 'v'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'}) + backend = harness._backend + self.assertEqual({'k': 'v'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + + harness.begin() + self.assertEqual({'k': 'v'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + # Make sure no relation-changed events are emitted for our own data bags. + self.assertEqual([], harness.charm.observed_events) + + # Updating our app relation data bag and our unit data bag does not trigger events + harness.update_relation_data(rel_id, 'test-app', {'k': 'v2'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'}) + self.assertEqual([], harness.charm.observed_events) + + # If our unit becomes a minion, updating app relation data indirectly becomes possible + # and our charm gets notifications. + harness.set_leader(False) + harness.update_relation_data(rel_id, 'test-app', {'k': 'v3'}) + self.assertEqual({'k': 'v3'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertTrue(len(harness.charm.observed_events), 1) + self.assertIsInstance(harness.charm.observed_events[0], RelationEvent) + + def test_relation_events(self): + harness = Harness(RelationEventCharm, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.charm.observe_relation_events('db') + self.assertEqual(harness.charm.get_changes(), []) + rel_id = harness.add_relation('db', 'postgresql') + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-created', + 'data': { + 'app': 'postgresql', + 'unit': None, + 'relation_id': rel_id, + }}]) + harness.add_relation_unit(rel_id, 'postgresql/0') + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-joined', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'relation_id': rel_id, + }}]) + harness.update_relation_data(rel_id, 'postgresql', {'foo': 'bar'}) + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-changed', + 'data': { + 'app': 'postgresql', + 'unit': None, + 'relation_id': rel_id, + }}]) + harness.update_relation_data(rel_id, 'postgresql/0', {'baz': 'bing'}) + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-changed', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'relation_id': rel_id, + }}]) + + def test_get_relation_data(self): + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + harness.update_relation_data(rel_id, 'postgresql', {'remote': 'data'}) + self.assertEqual(harness.get_relation_data(rel_id, 'test-app'), {}) + self.assertEqual(harness.get_relation_data(rel_id, 'test-app/0'), {}) + self.assertEqual(harness.get_relation_data(rel_id, 'test-app/1'), None) + self.assertEqual(harness.get_relation_data(rel_id, 'postgresql'), {'remote': 'data'}) + with self.assertRaises(KeyError): + # unknown relation id + harness.get_relation_data(99, 'postgresql') + + def test_create_harness_twice(self): + metadata = ''' + name: my-charm + requires: + db: + interface: pgsql + ''' + harness1 = Harness(CharmBase, meta=metadata) + harness2 = Harness(CharmBase, meta=metadata) + harness1.begin() + harness2.begin() + helper1 = DBRelationChangedHelper(harness1.charm, "helper1") + helper2 = DBRelationChangedHelper(harness2.charm, "helper2") + rel_id = harness2.add_relation('db', 'postgresql') + harness2.update_relation_data(rel_id, 'postgresql', {'key': 'value'}) + # Helper2 should see the event triggered by harness2, but helper1 should see no events. + self.assertEqual(helper1.changes, []) + self.assertEqual(helper2.changes, [(rel_id, 'postgresql')]) + + def test_begin_twice(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + harness.begin() + with self.assertRaises(RuntimeError): + harness.begin() + + def test_update_relation_exposes_new_data(self): + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + viewer = RelationChangedViewer(harness.charm, 'db') + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + harness.update_relation_data(rel_id, 'postgresql/0', {'initial': 'data'}) + self.assertEqual(viewer.changes, [{'initial': 'data'}]) + harness.update_relation_data(rel_id, 'postgresql/0', {'new': 'value'}) + self.assertEqual(viewer.changes, [{'initial': 'data'}, + {'initial': 'data', 'new': 'value'}]) + + def test_update_relation_no_local_unit_change_event(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + rel = harness.charm.model.get_relation('db') + rel.data[harness.charm.model.unit]['key'] = 'value' + # there should be no event for updating our own data + harness.update_relation_data(rel_id, 'my-charm/0', {'new': 'other'}) + # but the data will be updated. + self.assertEqual({'key': 'value', 'new': 'other'}, rel.data[harness.charm.model.unit]) + + rel.data[harness.charm.model.unit]['new'] = 'value' + # Our unit data bag got updated. + self.assertEqual(rel.data[harness.charm.model.unit]['new'], 'value') + # But there were no changed events registered by our unit. + self.assertEqual([], helper.changes) + + def test_update_peer_relation_no_local_unit_change_event(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: postgresql + peers: + db: + interface: pgsql + ''') + harness.begin() + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + + rel = harness.charm.model.get_relation('db') + rel.data[harness.charm.model.unit]['key'] = 'value' + rel = harness.charm.model.get_relation('db') + harness.update_relation_data(rel_id, 'postgresql/0', {'key': 'v1'}) + self.assertEqual({'key': 'v1'}, rel.data[harness.charm.model.unit]) + # Make sure there was no event + self.assertEqual([], helper.changes) + + rel.data[harness.charm.model.unit]['key'] = 'v2' + # Our unit data bag got updated. + self.assertEqual({'key': 'v2'}, dict(rel.data[harness.charm.model.unit])) + # But there were no changed events registered by our unit. + self.assertEqual([], helper.changes) + + # Same for when our unit is a leader. + harness.set_leader(is_leader=True) + harness.update_relation_data(rel_id, 'postgresql/0', {'key': 'v3'}) + self.assertEqual({'key': 'v3'}, dict(rel.data[harness.charm.model.unit])) + self.assertEqual([], helper.changes) + + rel.data[harness.charm.model.unit]['key'] = 'v4' + self.assertEqual(rel.data[harness.charm.model.unit]['key'], 'v4') + self.assertEqual([], helper.changes) + + def test_update_peer_relation_app_data(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: postgresql + peers: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(is_leader=True) + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + rel = harness.charm.model.get_relation('db') + rel.data[harness.charm.app]['key'] = 'value' + harness.update_relation_data(rel_id, 'postgresql', {'key': 'v1'}) + self.assertEqual({'key': 'v1'}, rel.data[harness.charm.app]) + self.assertEqual([], helper.changes) + + rel.data[harness.charm.app]['key'] = 'v2' + # Our unit data bag got updated. + self.assertEqual(rel.data[harness.charm.model.app]['key'], 'v2') + # But there were no changed events registered by our unit. + self.assertEqual([], helper.changes) + + # If our unit is not a leader unit we get an update about peer app relation data changes. + harness.set_leader(is_leader=False) + harness.update_relation_data(rel_id, 'postgresql', {'k2': 'v2'}) + self.assertEqual(rel.data[harness.charm.model.app]['k2'], 'v2') + self.assertEqual(helper.changes, [(0, 'postgresql')]) + + def test_update_relation_no_local_app_change_event(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(False) + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + # TODO: remove this as soon as https://github.com/canonical/operator/issues/175 is fixed. + harness.add_relation_unit(rel_id, 'postgresql/0') + self.assertEqual(helper.changes, []) + + harness.update_relation_data(rel_id, 'my-charm', {'new': 'value'}) + rel = harness.charm.model.get_relation('db') + self.assertEqual(rel.data[harness.charm.app]['new'], 'value') + + # Our app data bag got updated. + self.assertEqual(rel.data[harness.charm.model.app]['new'], 'value') + # But there were no changed events registered by our unit. + self.assertEqual(helper.changes, []) + + def test_update_relation_remove_data(self): + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + viewer = RelationChangedViewer(harness.charm, 'db') + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + harness.update_relation_data(rel_id, 'postgresql/0', {'initial': 'data'}) + harness.update_relation_data(rel_id, 'postgresql/0', {'initial': ''}) + self.assertEqual(viewer.changes, [{'initial': 'data'}, {}]) + + def test_update_config(self): + harness = Harness(RecordingCharm) + harness.begin() + harness.update_config(key_values={'a': 'foo', 'b': 2}) + self.assertEqual( + harness.charm.changes, + [{'name': 'config', 'data': {'a': 'foo', 'b': 2}}]) + harness.update_config(key_values={'b': 3}) + self.assertEqual( + harness.charm.changes, + [{'name': 'config', 'data': {'a': 'foo', 'b': 2}}, + {'name': 'config', 'data': {'a': 'foo', 'b': 3}}]) + # you can set config values to the empty string, you can use unset to actually remove items + harness.update_config(key_values={'a': ''}, unset=set('b')) + self.assertEqual( + harness.charm.changes, + [{'name': 'config', 'data': {'a': 'foo', 'b': 2}}, + {'name': 'config', 'data': {'a': 'foo', 'b': 3}}, + {'name': 'config', 'data': {'a': ''}}, + ]) + + def test_set_leader(self): + harness = Harness(RecordingCharm) + # No event happens here + harness.set_leader(False) + harness.begin() + self.assertFalse(harness.charm.model.unit.is_leader()) + harness.set_leader(True) + self.assertEqual(harness.charm.get_changes(reset=True), [{'name': 'leader-elected'}]) + self.assertTrue(harness.charm.model.unit.is_leader()) + harness.set_leader(False) + self.assertFalse(harness.charm.model.unit.is_leader()) + # No hook event when you lose leadership. + # TODO: verify if Juju always triggers `leader-settings-changed` if you + # lose leadership. + self.assertEqual(harness.charm.get_changes(reset=True), []) + harness.disable_hooks() + harness.set_leader(True) + # No hook event if you have disabled them + self.assertEqual(harness.charm.get_changes(reset=True), []) + + def test_relation_set_app_not_leader(self): + harness = Harness(RecordingCharm, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(False) + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + rel = harness.charm.model.get_relation('db') + with self.assertRaises(ModelError): + rel.data[harness.charm.app]['foo'] = 'bar' + # The data has not actually been changed + self.assertEqual(harness.get_relation_data(rel_id, 'test-charm'), {}) + harness.set_leader(True) + rel.data[harness.charm.app]['foo'] = 'bar' + self.assertEqual(harness.get_relation_data(rel_id, 'test-charm'), {'foo': 'bar'}) + + def test_hooks_enabled_and_disabled(self): + harness = Harness(RecordingCharm, meta=''' + name: test-charm + ''') + # Before begin() there are no events. + harness.update_config({'value': 'first'}) + # By default, after begin the charm is set up to receive events. + harness.begin() + harness.update_config({'value': 'second'}) + self.assertEqual( + harness.charm.get_changes(reset=True), + [{'name': 'config', 'data': {'value': 'second'}}]) + # Once disabled, we won't see config-changed when we make an update + harness.disable_hooks() + harness.update_config({'third': '3'}) + self.assertEqual(harness.charm.get_changes(reset=True), []) + harness.enable_hooks() + harness.update_config({'value': 'fourth'}) + self.assertEqual( + harness.charm.get_changes(reset=True), + [{'name': 'config', 'data': {'value': 'fourth', 'third': '3'}}]) + + def test_metadata_from_directory(self): + tmp = pathlib.Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(tmp)) + metadata_filename = tmp / 'metadata.yaml' + with metadata_filename.open('wt') as metadata: + metadata.write(textwrap.dedent(''' + name: my-charm + requires: + db: + interface: pgsql + ''')) + harness = self._get_dummy_charm_harness(tmp) + harness.begin() + self.assertEqual(list(harness.model.relations), ['db']) + # The charm_dir also gets set + self.assertEqual(harness.framework.charm_dir, tmp) + + def test_set_model_name(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + harness.set_model_name('foo') + self.assertEqual('foo', harness.model.name) + + def test_set_model_name_after_begin(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + harness.set_model_name('bar') + harness.begin() + with self.assertRaises(RuntimeError): + harness.set_model_name('foo') + self.assertEqual(harness.model.name, 'bar') + + def test_actions_from_directory(self): + tmp = pathlib.Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(tmp)) + actions_filename = tmp / 'actions.yaml' + with actions_filename.open('wt') as actions: + actions.write(textwrap.dedent(''' + test: + description: a dummy action + ''')) + harness = self._get_dummy_charm_harness(tmp) + harness.begin() + self.assertEqual(list(harness.framework.meta.actions), ['test']) + # The charm_dir also gets set + self.assertEqual(harness.framework.charm_dir, tmp) + + def _get_dummy_charm_harness(self, tmp): + self._write_dummy_charm(tmp) + charm_mod = importlib.import_module('charm') + harness = Harness(charm_mod.MyTestingCharm) + return harness + + def _write_dummy_charm(self, tmp): + srcdir = tmp / 'src' + srcdir.mkdir(0o755) + charm_filename = srcdir / 'charm.py' + with charm_filename.open('wt') as charmpy: + # language=Python + charmpy.write(textwrap.dedent(''' + from ops.charm import CharmBase + class MyTestingCharm(CharmBase): + pass + ''')) + orig = sys.path[:] + sys.path.append(str(srcdir)) + + def cleanup(): + sys.path = orig + sys.modules.pop('charm') + + self.addCleanup(cleanup) + + def test_actions_passed_in(self): + harness = Harness( + CharmBase, + meta=''' + name: test-app + ''', + actions=''' + test-action: + description: a dummy test action + ''') + self.assertEqual(list(harness.framework.meta.actions), ['test-action']) + + def test_relation_set_deletes(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(False) + rel_id = harness.add_relation('db', 'postgresql') + harness.update_relation_data(rel_id, 'test-charm/0', {'foo': 'bar'}) + harness.add_relation_unit(rel_id, 'postgresql/0') + rel = harness.charm.model.get_relation('db', rel_id) + del rel.data[harness.charm.model.unit]['foo'] + self.assertEqual({}, harness.get_relation_data(rel_id, 'test-charm/0')) + + def test_set_workload_version(self): + harness = Harness(CharmBase, meta=''' + name: app + ''') + harness.begin() + self.assertIsNone(harness.get_workload_version()) + harness.charm.model.unit.set_workload_version('1.2.3') + self.assertEqual(harness.get_workload_version(), '1.2.3') + + def test_get_backend_calls(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + # No calls to the backend yet + self.assertEqual(harness._get_backend_calls(), []) + rel_id = harness.add_relation('db', 'postgresql') + # update_relation_data ensures the cached data for the relation is wiped + harness.update_relation_data(rel_id, 'test-charm/0', {'foo': 'bar'}) + self.assertEqual( + harness._get_backend_calls(reset=True), [ + ('relation_ids', 'db'), + ('relation_list', rel_id), + ]) + # add_relation_unit resets the relation_list, but doesn't trigger backend calls + harness.add_relation_unit(rel_id, 'postgresql/0') + self.assertEqual([], harness._get_backend_calls(reset=False)) + # however, update_relation_data does, because we are preparing relation-changed + harness.update_relation_data(rel_id, 'postgresql/0', {'foo': 'bar'}) + self.assertEqual( + harness._get_backend_calls(reset=False), [ + ('relation_ids', 'db'), + ('relation_list', rel_id), + ]) + # If we check again, they are still there, but now we reset it + self.assertEqual( + harness._get_backend_calls(reset=True), [ + ('relation_ids', 'db'), + ('relation_list', rel_id), + ]) + # And the calls are gone + self.assertEqual(harness._get_backend_calls(), []) + + def test_get_backend_calls_with_kwargs(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + unit = harness.charm.model.unit + # Reset the list, because we don't care what it took to get here + harness._get_backend_calls(reset=True) + unit.status = ActiveStatus() + self.assertEqual( + [('status_set', 'active', '', {'is_app': False})], harness._get_backend_calls()) + harness.set_leader(True) + app = harness.charm.model.app + harness._get_backend_calls(reset=True) + app.status = ActiveStatus('message') + self.assertEqual( + [('is_leader',), + ('status_set', 'active', 'message', {'is_app': True})], + harness._get_backend_calls()) + + def test_unit_status(self): + harness = Harness(CharmBase, meta='name: test-app') + harness.set_leader(True) + harness.begin() + # default status + self.assertEqual(harness.model.unit.status, MaintenanceStatus('')) + status = ActiveStatus('message') + harness.model.unit.status = status + self.assertEqual(harness.model.unit.status, status) + + def test_app_status(self): + harness = Harness(CharmBase, meta='name: test-app') + harness.set_leader(True) + harness.begin() + # default status + self.assertEqual(harness.model.app.status, UnknownStatus()) + status = ActiveStatus('message') + harness.model.app.status = status + self.assertEqual(harness.model.app.status, status) + + +class DBRelationChangedHelper(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.changes = [] + parent.framework.observe(parent.on.db_relation_changed, self.on_relation_changed) + + def on_relation_changed(self, event): + if event.unit is not None: + self.changes.append((event.relation.id, event.unit.name)) + else: + self.changes.append((event.relation.id, event.app.name)) + + +class RelationChangedViewer(Object): + """Track relation_changed events and saves the data seen in the relation bucket.""" + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.changes = [] + charm.framework.observe(charm.on[relation_name].relation_changed, self.on_relation_changed) + + def on_relation_changed(self, event): + if event.unit is not None: + data = event.relation.data[event.unit] + else: + data = event.relation.data[event.app] + self.changes.append(dict(data)) + + +class RecordingCharm(CharmBase): + """Record the events that we see, and any associated data.""" + + def __init__(self, framework): + super().__init__(framework) + self.changes = [] + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.leader_elected, self.on_leader_elected) + + def get_changes(self, reset=True): + changes = self.changes + if reset: + self.changes = [] + return changes + + def on_config_changed(self, _): + self.changes.append(dict(name='config', data=dict(self.framework.model.config))) + + def on_leader_elected(self, _): + self.changes.append(dict(name='leader-elected')) + + +class RelationEventCharm(RecordingCharm): + """Record events related to relation lifecycles.""" + + def __init__(self, framework): + super().__init__(framework) + + def observe_relation_events(self, relation_name): + self.framework.observe(self.on[relation_name].relation_created, self._on_relation_created) + self.framework.observe(self.on[relation_name].relation_joined, self._on_relation_joined) + self.framework.observe(self.on[relation_name].relation_changed, self._on_relation_changed) + self.framework.observe(self.on[relation_name].relation_departed, + self._on_relation_departed) + self.framework.observe(self.on[relation_name].relation_broken, self._on_relation_broken) + + def _on_relation_created(self, event): + self._observe_relation_event('relation-created', event) + + def _on_relation_joined(self, event): + self._observe_relation_event('relation-joined', event) + + def _on_relation_changed(self, event): + self._observe_relation_event('relation-changed', event) + + def _on_relation_departed(self, event): + self._observe_relation_event('relation-departed', event) + + def _on_relation_broken(self, event): + self._observe_relation_event('relation-broken', event) + + def _observe_relation_event(self, event_name, event): + unit_name = None + if event.unit is not None: + unit_name = event.unit.name + app_name = None + if event.app is not None: + app_name = event.app.name + self.changes.append( + dict(name=event_name, + data=dict(app=app_name, unit=unit_name, relation_id=event.relation.id))) + + +class TestTestingModelBackend(unittest.TestCase): + + def test_status_set_get_unit(self): + harness = Harness(CharmBase, meta=''' + name: app + ''') + backend = harness._backend + backend.status_set('blocked', 'message', is_app=False) + self.assertEqual( + backend.status_get(is_app=False), + {'status': 'blocked', 'message': 'message'}) + self.assertEqual( + backend.status_get(is_app=True), + {'status': 'unknown', 'message': ''}) + + def test_status_set_get_app(self): + harness = Harness(CharmBase, meta=''' + name: app + ''') + backend = harness._backend + backend.status_set('blocked', 'message', is_app=True) + self.assertEqual( + backend.status_get(is_app=True), + {'status': 'blocked', 'message': 'message'}) + self.assertEqual( + backend.status_get(is_app=False), + {'status': 'maintenance', 'message': ''}) + + def test_relation_ids_unknown_relation(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + provides: + db: + interface: mydb + ''') + backend = harness._backend + # With no relations added, we just get an empty list for the interface + self.assertEqual(backend.relation_ids('db'), []) + # But an unknown interface raises a ModelError + with self.assertRaises(ModelError): + backend.relation_ids('unknown') + + def test_relation_get_unknown_relation_id(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + backend = harness._backend + with self.assertRaises(RelationNotFoundError): + backend.relation_get(1234, 'unit/0', False) + + def test_relation_list_unknown_relation_id(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + backend = harness._backend + with self.assertRaises(RelationNotFoundError): + backend.relation_list(1234) + + def test_populate_oci_resources(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: oci-image + description: "Image to deploy." + image2: + type: oci-image + description: "Another image." + ''') + harness.populate_oci_resources() + resource = harness._resource_dir / "image" / "contents.yaml" + with resource.open('r') as resource_file: + contents = yaml.safe_load(resource_file.read()) + self.assertEqual(contents['registrypath'], 'registrypath') + self.assertEqual(contents['username'], 'username') + self.assertEqual(contents['password'], 'password') + self.assertEqual(len(harness._backend._resources_map), 2) + + def test_resource_folder_cleanup(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: oci-image + description: "Image to deploy." + ''') + harness.populate_oci_resources() + resource = harness._resource_dir / "image" / "contents.yaml" + del harness + with self.assertRaises(FileNotFoundError): + with resource.open('r') as resource_file: + print("This shouldn't be here: {}".format(resource_file)) + + def test_add_oci_resource_custom(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: oci-image + description: "Image to deploy." + ''') + custom = { + "registrypath": "custompath", + "username": "custom_username", + "password": "custom_password", + } + harness.add_oci_resource('image', custom) + resource = harness._resource_dir / "image" / "contents.yaml" + with resource.open('r') as resource_file: + contents = yaml.safe_load(resource_file.read()) + self.assertEqual(contents['registrypath'], 'custompath') + self.assertEqual(contents['username'], 'custom_username') + self.assertEqual(contents['password'], 'custom_password') + self.assertEqual(len(harness._backend._resources_map), 1) + + def test_add_oci_resource_no_image(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: file + description: "Image to deploy." + ''') + with self.assertRaises(RuntimeError): + harness.add_oci_resource("image") + with self.assertRaises(RuntimeError): + harness.add_oci_resource("missing-resource") + self.assertEqual(len(harness._backend._resources_map), 0) diff --git a/hackfest_firewall_pnf/charms/vyos-config/playbooks/backup.yaml b/hackfest_firewall_pnf/charms/vyos-config/playbooks/backup.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a98eebf19f8f1cad0e8723f353143debff9fd134 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/playbooks/backup.yaml @@ -0,0 +1,8 @@ +- hosts: vyos-routers + connection: local + tasks: + - name: Performs a backup of the current configuration + vyos_config: + lines: + - save "/home/osm/{{ backupFile }}" + diff --git a/hackfest_firewall_pnf/charms/vyos-config/playbooks/configure-remote.yaml b/hackfest_firewall_pnf/charms/vyos-config/playbooks/configure-remote.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc0232fae05dca7c81301a771a07b133f006a75c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/playbooks/configure-remote.yaml @@ -0,0 +1,8 @@ +- hosts: vyos-routers + connection: local + tasks: + - name: add a host to the list of allowed in the firewall group + vyos_config: + lines: + - set firewall group network-group MAGMA_AGW network "{{ MAGMA_AGW_IP }}/32" + diff --git a/hackfest_firewall_pnf/charms/vyos-config/playbooks/restore.yaml b/hackfest_firewall_pnf/charms/vyos-config/playbooks/restore.yaml new file mode 100644 index 0000000000000000000000000000000000000000..20051c42fa13639fc25ca99e5751022e0fbe0249 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/playbooks/restore.yaml @@ -0,0 +1,7 @@ +- hosts: vyos-routers + connection: local + tasks: + - name: Performs a restore a previously created backup + vyos_config: + lines: + - load "/home/osm/{{ backupFile }}" diff --git a/hackfest_firewall_pnf/charms/vyos-config/requirements.txt b/hackfest_firewall_pnf/charms/vyos-config/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf/charms/vyos-config/src/charm.py b/hackfest_firewall_pnf/charms/vyos-config/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..b110e1c95fac46445b6f50c5334716fab6705f98 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config/src/charm.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append("lib") + +from ops.charm import CharmBase, CharmEvents +from ops.framework import StoredState, EventBase, EventSource +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess +import traceback + +from charms.osm.sshproxy import SSHProxyCharm +from charms.osm import libansible + + +class VyosCharm(SSHProxyCharm): + def __init__(self, framework, key): + super().__init__(framework, key) + + # Register all of the events we want to observe + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.install, self.on_install) + self.framework.observe(self.on.start, self.on_start) + self.framework.observe(self.on.upgrade_charm, self.on_upgrade_charm) + # Charm actions (primitives) + self.framework.observe( + self.on.configure_remote_action, self.on_configure_remote_action + ) + + def on_config_changed(self, event): + """Handle changes in configuration""" + super().on_config_changed(event) + + def on_install(self, event): + """Called when the charm is being installed""" + super().on_install(event) + self.unit.status = MaintenanceStatus("Installing Ansible") + libansible.install_ansible_support() + self.unit.status = ActiveStatus() + + def on_start(self, event): + """Called when the charm is being started""" + super().on_start(event) + + def on_configure_remote_action(self, event): + """Configure remote.""" + + if self.unit.is_leader(): + try: + config = self.model.config + magmaIP = event.params["magmaIP"] + dict_vars = {"MAGMA_AGW_IP": magmaIP} + result = libansible.execute_playbook( + "configure-remote.yaml", + config["ssh-hostname"], + config["ssh-username"], + config["ssh-password"], + dict_vars, + ) + event.set_results({"output": result}) + except: + exc_type, exc_value, exc_traceback = sys.exc_info() + err = traceback.format_exception(exc_type, exc_value, exc_traceback) + event.fail(message="configure-remote failed: " + str(err)) + + else: + event.fail("Unit is not leader") + return + + def on_upgrade_charm(self, event): + """Upgrade the charm.""" + + +if __name__ == "__main__": + main(VyosCharm) diff --git a/hackfest_firewall_pnf/cloud_init/vyos-userdata b/hackfest_firewall_pnf/cloud_init/vyos-userdata new file mode 100644 index 0000000000000000000000000000000000000000..e888432723fdda1ffbd7a1e129ba0b4bdda5a39d --- /dev/null +++ b/hackfest_firewall_pnf/cloud_init/vyos-userdata @@ -0,0 +1,23 @@ +#!/bin/vbash +source /opt/vyatta/etc/functions/script-template +# Interface Config eth0 +set interfaces ethernet eth0 address 172.21.250.200/22 +set interfaces ethernet eth0 description VyOS-eth0 +# Interface Config eth1 +set interfaces ethernet eth1 address 10.0.0.1/24 +set interfaces ethernet eth1 description VyOS-eth1 +# Interface Config eth2 +set interfaces ethernet eth2 address 10.1.0.1/24 +set interfaces ethernet eth2 description VyOS-eth2 +# System config +set system host-name vyos-osmTest +set service ssh listen-address 0.0.0.0 +set service ssh port 22 +set system login user osm authentication plaintext-password osm2020 +# SNMP +set service snmp community public authorization ro +set service snmp location "OSM Labs" +set service snmp contact "glavado@whitestack.com" +# Save +commit +save diff --git a/hackfest_firewall_pnf/hackfest_firewall_pnfd.yaml b/hackfest_firewall_pnf/hackfest_firewall_pnfd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..484f8e886bd6af44df84fcb4fb889d70b77c17c9 --- /dev/null +++ b/hackfest_firewall_pnf/hackfest_firewall_pnfd.yaml @@ -0,0 +1,65 @@ +vnfd: + description: PNF entry for a firewall router + df: + - id: default-df + instantiation-level: + - id: default-instantiation-level + vdu-level: + - number-of-instances: 1 + vdu-id: vyos-VM + vdu-profile: + - id: vyos-VM + min-number-of-instances: 1 + lcm-operations-configuration: + operate-vnf-op-config: + day1-2: + - config-primitive: + - name: configure-remote + parameter: + - data-type: STRING + default-value: 0.0.0.0 + name: magmaIP + id: default-vnf-configuration + config-access: + ssh-access: + default-user: osm + required: true + initial-config-primitive: + - name: config + parameter: + - name: ssh-username + value: osm + - name: ssh-password + value: osm2021 + - name: ssh-hostname + value: + seq: 1 + execution-environment-list: + - id: vyos-config-ee + juju: + charm: vyos-config + #cloud: k8s + ext-cpd: + - id: gateway_public + int-cpd: + cpd: gateway_public + vdu-id: vyos-VM + - id: vnf_internal + int-cpd: + cpd: vnf_internal + vdu-id: vyos-VM + id: hackfest_firewall_pnf + mgmt-cp: gateway_public + product-name: hackfest_firewall_pnf + vdu: + - id: vyos-VM + pdu-type: gateway + int-cpd: + - id: gateway_public + virtual-network-interface-requirement: + - name: gateway_public + - id: vnf_internal + virtual-network-interface-requirement: + - name: vnf_internal + name: vyos-VM + version: '1.0' \ No newline at end of file diff --git a/hackfest_firewall_pnf/snmp/snmp.yml b/hackfest_firewall_pnf/snmp/snmp.yml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/hackfest_firewall_pnf_ns/README b/hackfest_firewall_pnf_ns/README new file mode 100644 index 0000000000000000000000000000000000000000..77850bb3719ade9bd4a781275d7825178ac113ba --- /dev/null +++ b/hackfest_firewall_pnf_ns/README @@ -0,0 +1,2 @@ +Descriptor created by OSM descriptor package generated +Created on Tue Jan 14 17:49:17 UTC 2020 diff --git a/hackfest_firewall_pnf_ns/hackfest_firewall_pnf_nsd.yaml b/hackfest_firewall_pnf_ns/hackfest_firewall_pnf_nsd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cca94bb8e4ac79f86cf9adebcf7c324d855641d6 --- /dev/null +++ b/hackfest_firewall_pnf_ns/hackfest_firewall_pnf_nsd.yaml @@ -0,0 +1,28 @@ +nsd: + nsd: + - description: PNF Firewall + designer: ETSI OSM + df: + - id: default-df + vnf-profile: + - id: VYOS-PNF + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: VYOS-PNF + constituent-cpd-id: gateway_public + virtual-link-profile-id: mgmt + - constituent-cpd-id: + - constituent-base-element-id: VYOS-PNF + constituent-cpd-id: vnf_internal + virtual-link-profile-id: private + vnfd-id: hackfest_firewall_pnf + id: hackfest_firewall_pnf_ns + name: hackfest_firewall_pnf_ns + version: '1.0' + virtual-link-desc: + - id: mgmt + mgmt-network: true + - id: private + vim-network-name: private + vnfd-id: + - hackfest_firewall_pnf diff --git a/hackfest_virtual-pc_ns/hackfest_virtual-pc_nsd.yaml b/hackfest_virtual-pc_ns/hackfest_virtual-pc_nsd.yaml index 124625188e463ad98cfd4341b4e5365a1738e308..497fc1c22784cbdff6467d4f932110a88694f73c 100644 --- a/hackfest_virtual-pc_ns/hackfest_virtual-pc_nsd.yaml +++ b/hackfest_virtual-pc_ns/hackfest_virtual-pc_nsd.yaml @@ -22,16 +22,21 @@ nsd: vnf-profile: - id: '1' virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: '1' + constituent-cpd-id: virtual-pc-private-ext + virtual-link-profile-id: private - constituent-cpd-id: - constituent-base-element-id: '1' constituent-cpd-id: virtual-pc-mgmt-ext virtual-link-profile-id: mgmtnet - vnfd-id: virtual-pc_vnfd - id: hackfest_virtual-pc_nsd - name: hackfest_virtual-pc_nsd + vnfd-id: hackfest_virtual-pc_vnf + id: hackfest_virtual-pc_ns + name: hackfest_virtual-pc_ns version: '1.0' virtual-link-desc: - id: mgmtnet mgmt-network: 'true' + - id: private vnfd-id: - - virtual-pc_vnfd + - hackfest_virtual-pc_vnf diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/actions.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/actions.yaml index 9a14be4d84b051a438fa574c9be45de53902e828..867b12fb7529627480b3ecf66786f510becf471a 100644 --- a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/actions.yaml +++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/actions.yaml @@ -1,6 +1,33 @@ -# Copyright 2020 ETSI OSM Contributors +# Copyright ETSI OSM Contributors # See LICENSE file for licensing details. # # This is only an example, and you should edit to suit your needs. # If you don't need actions, you can remove the file entirely. # It ties in to the example _on_fortune_action handler in src/charm.py + +add-package: + description: "Adds software packages from apt." + params: + package: + description: "Names of packages to add, comma delimited." + type: string + default: "" + required: + - package + +reboot: + description: "Reboots the server." + +remove-package: + description: "Removes software packages from system." + params: + package: + description: "Names of packages to remove, comma delimited." + type: string + default: "" + required: + - package + +update-system: + description: "Updates all software to latest version." + diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/charm.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/charm.py index e068304f72a6fcaa2fce64ea711685f5c64c1690..579645795e3905e00db0ba2cf98c96e7906e44cd 100755 --- a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/charm.py +++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/charm.py @@ -27,6 +27,8 @@ from utils import ( # from typing import Dict, Any logger = logging.getLogger(__name__) +APT_PROXY_PATH = "/etc/apt/apt.conf.d/99-HIVE-apt-proxy" +APT_PROXY_TEMPLATE = "./templates/proxy" APT_REQUIREMENTS = [ "firefox", "mate-desktop", # 469 packages @@ -66,6 +68,10 @@ class VirtualPCCharm(CharmBase, InstallProgress): self.framework.observe(self.on.update_status, self._on_update_status) # Actions hooks + self.framework.observe(self.on["add-package"].action, self._add_package) + self.framework.observe(self.on["reboot"].action, self._reboot) + self.framework.observe(self.on["remove-package"].action, self._remove_package) + self.framework.observe(self.on["update-system"].action, self._update_system) # Relations hooks @@ -76,6 +82,13 @@ class VirtualPCCharm(CharmBase, InstallProgress): # Basic hooks def _on_install(self, _): + + self.unit.status = MaintenanceStatus("Setting up apt proxy") + with open(APT_PROXY_TEMPLATE, "r") as template: + content = Template(template.read()).render() + with open(APT_PROXY_PATH, "w") as proxy: + proxy.write(content) + self.unit.status = MaintenanceStatus("Installing apt packages") install_apt(packages=APT_REQUIREMENTS, update=True, progress=self) service_stop('xrdp') @@ -122,6 +135,27 @@ class VirtualPCCharm(CharmBase, InstallProgress): self.unit.status = self._get_current_status() # Action hooks + def _add_package(self, _): + self.unit.status = MaintenanceStatus("Installing apt packages") + install_apt(packages=event.params["package"].split(','), + update=True, progress=self) + self.unit.status = self._get_current_status() + + def _reboot(self, _): + self.unit.status = MaintenanceStatus("Rebooting server") + shell("reboot --reboot") + self.unit.status = self._get_current_status() + + def _remove_package(self, _): + self.unit.status = MaintenanceStatus("Removing apt packages") + remove_apt(packages=event.params["package"].split(','), + update=True, progress=self) + self.unit.status = self._get_current_status() + + def _update_system(self, _): + self.unit.status = MaintenanceStatus("Updating system") + self.unit.status = self._get_current_status() + # Relation hooks # Private functions diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/actions.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc/actions.yaml index 9a14be4d84b051a438fa574c9be45de53902e828..867b12fb7529627480b3ecf66786f510becf471a 100644 --- a/hackfest_virtual-pc_vnfd/charms/virtual-pc/actions.yaml +++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/actions.yaml @@ -1,6 +1,33 @@ -# Copyright 2020 ETSI OSM Contributors +# Copyright ETSI OSM Contributors # See LICENSE file for licensing details. # # This is only an example, and you should edit to suit your needs. # If you don't need actions, you can remove the file entirely. # It ties in to the example _on_fortune_action handler in src/charm.py + +add-package: + description: "Adds software packages from apt." + params: + package: + description: "Names of packages to add, comma delimited." + type: string + default: "" + required: + - package + +reboot: + description: "Reboots the server." + +remove-package: + description: "Removes software packages from system." + params: + package: + description: "Names of packages to remove, comma delimited." + type: string + default: "" + required: + - package + +update-system: + description: "Updates all software to latest version." + diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/charm.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/charm.py index 1d72001264ceaa7b563b4ac82650d79669157e45..579645795e3905e00db0ba2cf98c96e7906e44cd 100755 --- a/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/charm.py +++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/charm.py @@ -27,6 +27,8 @@ from utils import ( # from typing import Dict, Any logger = logging.getLogger(__name__) +APT_PROXY_PATH = "/etc/apt/apt.conf.d/99-HIVE-apt-proxy" +APT_PROXY_TEMPLATE = "./templates/proxy" APT_REQUIREMENTS = [ "firefox", "mate-desktop", # 469 packages @@ -34,7 +36,6 @@ APT_REQUIREMENTS = [ "mate-applet-brisk-menu", "mate-indicator-applet", "mate-session-manager", - "indicator-applet-session", "mate-terminal", "xrdp", ] @@ -67,6 +68,10 @@ class VirtualPCCharm(CharmBase, InstallProgress): self.framework.observe(self.on.update_status, self._on_update_status) # Actions hooks + self.framework.observe(self.on["add-package"].action, self._add_package) + self.framework.observe(self.on["reboot"].action, self._reboot) + self.framework.observe(self.on["remove-package"].action, self._remove_package) + self.framework.observe(self.on["update-system"].action, self._update_system) # Relations hooks @@ -77,6 +82,13 @@ class VirtualPCCharm(CharmBase, InstallProgress): # Basic hooks def _on_install(self, _): + + self.unit.status = MaintenanceStatus("Setting up apt proxy") + with open(APT_PROXY_TEMPLATE, "r") as template: + content = Template(template.read()).render() + with open(APT_PROXY_PATH, "w") as proxy: + proxy.write(content) + self.unit.status = MaintenanceStatus("Installing apt packages") install_apt(packages=APT_REQUIREMENTS, update=True, progress=self) service_stop('xrdp') @@ -123,6 +135,27 @@ class VirtualPCCharm(CharmBase, InstallProgress): self.unit.status = self._get_current_status() # Action hooks + def _add_package(self, _): + self.unit.status = MaintenanceStatus("Installing apt packages") + install_apt(packages=event.params["package"].split(','), + update=True, progress=self) + self.unit.status = self._get_current_status() + + def _reboot(self, _): + self.unit.status = MaintenanceStatus("Rebooting server") + shell("reboot --reboot") + self.unit.status = self._get_current_status() + + def _remove_package(self, _): + self.unit.status = MaintenanceStatus("Removing apt packages") + remove_apt(packages=event.params["package"].split(','), + update=True, progress=self) + self.unit.status = self._get_current_status() + + def _update_system(self, _): + self.unit.status = MaintenanceStatus("Updating system") + self.unit.status = self._get_current_status() + # Relation hooks # Private functions diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/proxy b/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/proxy new file mode 100644 index 0000000000000000000000000000000000000000..d134ea040a3da1a0ab9f9b4b92a0275d97365d75 --- /dev/null +++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/proxy @@ -0,0 +1 @@ +Acquire::http { Proxy "http://172.21.18.4:3142"; } \ No newline at end of file diff --git a/hackfest_virtual-pc_vnfd/cloud_init/virtual-pc_init b/hackfest_virtual-pc_vnfd/cloud_init/virtual-pc_init index a9acf62700fbcaed336d7a5213143e22da63a8c7..993b62947a1abebd2eff4754be75fec39bc9d5e0 100644 --- a/hackfest_virtual-pc_vnfd/cloud_init/virtual-pc_init +++ b/hackfest_virtual-pc_vnfd/cloud_init/virtual-pc_init @@ -1,4 +1,4 @@ #cloud-config -password: osm2020 +password: osm2021 chpasswd: { expire: False } ssh_pwauth: True diff --git a/hackfest_virtual-pc_vnfd/virtual-pc_vnfd.yaml b/hackfest_virtual-pc_vnfd/virtual-pc_vnfd.yaml index 49e4b4cfaa6ce484bdf0303915fa0a7f16e35f96..78484dd737cab871ca565f3254e6d7e1a80a500c 100644 --- a/hackfest_virtual-pc_vnfd/virtual-pc_vnfd.yaml +++ b/hackfest_virtual-pc_vnfd/virtual-pc_vnfd.yaml @@ -10,15 +10,52 @@ vnfd: vdu-profile: - id: virtual-pc min-number-of-instances: 1 - vdu-configuration-id: virtual-pc-vdu-configuration + lcm-operations-configuration: + operate-vnf-op-config: + day1-2: + - config-access: + ssh-access: + default-user: ubuntu + required: true + id: virtual-pc + execution-environment-list: + - id: virtual-pc-ee + juju: + charm: virtual-pc + proxy: false + initial-config-primitive: + - name: config + execution-environment-ref: virtual-pc-ee + seq: 1 + config-primitive: + - name: add-package + execution-environment-ref: virtual-pc-ee + parameter: + - data-type: STRING + name: package + - name: reboot + execution-environment-ref: virtual-pc-ee + - name: remove-package + execution-environment-ref: virtual-pc-ee + parameter: + - data-type: STRING + name: package + - name: update-system + execution-environment-ref: virtual-pc-ee + ext-cpd: - - id: virtual-pc-mgmt-ext + - id: virtual-pc-private-ext int-cpd: cpd: eth0-int vdu-id: virtual-pc - id: virtual-pc_vnfd + - id: virtual-pc-mgmt-ext + int-cpd: + cpd: eth1-int + vdu-id: virtual-pc + id: hackfest_virtual-pc_vnf mgmt-cp: virtual-pc-mgmt-ext - product-name: virtual-pc_vnfd + + product-name: hackfest_virtual-pc_vnf provider: Canonical sw-image-desc: - id: ubuntu20.04 @@ -34,33 +71,23 @@ vnfd: - name: eth0 virtual-interface: type: PARAVIRT + - id: eth1-int + virtual-network-interface-requirement: + - name: eth1 + virtual-interface: + type: PARAVIRT name: virtual-pc-vdu sw-image-desc: ubuntu20.04 virtual-compute-desc: virtual-pc-vdu-compute virtual-storage-desc: - virtual-pc-vdu-storage - vdu-configuration: - - id: virtual-pc-vdu-configuration - config-access: - ssh-access: - required: true - config-access: - ssh-access: - default-user: ubuntu - required: true - initial-config-primitive: - - name: config - seq: '1' - juju: - charm: virtual-pc - proxy: false version: '1.0' virtual-compute-desc: - id: virtual-pc-vdu-compute virtual-cpu: - num-virtual-cpu: 4 + num-virtual-cpu: 8 virtual-memory: - size: 8.0 + size: 32.0 virtual-storage-desc: - id: virtual-pc-vdu-storage - size-of-storage: 20 + size-of-storage: 120 diff --git a/hackfest_vyos_vnf/hackfest_vyos_vnfd.yaml b/hackfest_vyos_vnf/hackfest_vyos_vnfd.yaml index c1b3033f91456aa1692075e1912da86d7dbfa382..acbf2474287ba2c3f2464197575a9fb030c32b6b 100644 --- a/hackfest_vyos_vnf/hackfest_vyos_vnfd.yaml +++ b/hackfest_vyos_vnf/hackfest_vyos_vnfd.yaml @@ -10,6 +10,7 @@ vnfd: vdu-profile: - id: vyos-VM min-number-of-instances: 1 + vnf-configuration-id: default-vnf-configuration ext-cpd: - id: vnf-mgmt-ext int-cpd: @@ -69,3 +70,28 @@ vnfd: virtual-storage-desc: - id: vyos-VM-storage size-of-storage: 10 + vnf-configuration: + - config-primitive: + - name: configure-remote + parameter: + - data-type: STRING + default-value: 0.0.0.0 + name: magmaIP + id: default-vnf-configuration + config-access: + ssh-access: + default-user: osm + required: true + initial-config-primitive: + - name: config + parameter: + - name: ssh-username + value: osm + - name: ssh-password + value: osm2020 + - name: ssh-hostname + value: + seq: 1 + juju: + charm: vyos-config + #cloud: k8s \ No newline at end of file diff --git a/magma/pdu.yaml b/magma/pdu.yaml index db4a5f231a98ea8ac547f6649003ccd9db138cc5..aa1da0ab9a12bb3b9e291be377390ffb378a79dd 100644 --- a/magma/pdu.yaml +++ b/magma/pdu.yaml @@ -1,5 +1,5 @@ name: router01 -description: router +description: router type: gateway vim_accounts: [ 42ee7fda-2996-4de0-877f-61abc792ad92 ] shared: false