diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/actions.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f381313a5dfa8cbbdb3a3773070b20dc065c7985 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/actions.yaml @@ -0,0 +1,64 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## +touch: + description: "Touch a file on the VNF." + params: + filename: + description: "The name of the file to touch." + type: string + default: "" + required: + - filename + +mkdir: + description: "Create a folder on the VNF." + params: + foldername: + description: "The name of the folder to create." + type: string + default: "" + required: + - foldername + +# Standard OSM functions +start: + description: "Stop the service on the VNF." +stop: + description: "Stop the service on the VNF." +restart: + description: "Stop the service on the VNF." +reboot: + description: "Reboot the VNF virtual machine." +upgrade: + description: "Upgrade the software on the VNF." + +# Required by charms.osm.sshproxy +run: + description: "Run an arbitrary command" + params: + command: + description: "The command to execute." + type: string + default: "" + required: + - command +generate-ssh-key: + description: "Generate a new SSH keypair for this unit. This will replace any existing previously generated keypair." +verify-ssh-credentials: + description: "Verify that this unit can authenticate with server specified by ssh-hostname and ssh-username." +get-ssh-public-key: + description: "Get the public SSH key for this unit." diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/config.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93e3cab01c0843418c57e4bc918eb933f2daf934 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/config.yaml @@ -0,0 +1,41 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## +options: + ssh-hostname: + type: string + default: "" + description: "The hostname or IP address of the machine to" + ssh-username: + type: string + default: "" + description: "The username to login as." + ssh-password: + type: string + default: "" + description: "The password used to authenticate." + ssh-public-key: + type: string + default: "" + description: "The public key of this unit." + ssh-key-type: + type: string + default: "rsa" + description: "The type of encryption to use for the SSH key." + ssh-key-bits: + type: int + default: 4096 + description: "The number of bits to use for the SSH key." diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/dispatch b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/dispatch new file mode 100755 index 0000000000000000000000000000000000000000..1aa29496032f924113fbce8ccaf26262ea95afbf --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/dispatch @@ -0,0 +1,4 @@ +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv \ + exec ./src/charm.py diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/install b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/install new file mode 100755 index 0000000000000000000000000000000000000000..1aa29496032f924113fbce8ccaf26262ea95afbf --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/install @@ -0,0 +1,4 @@ +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv \ + exec ./src/charm.py diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/start b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/start new file mode 100755 index 0000000000000000000000000000000000000000..1aa29496032f924113fbce8ccaf26262ea95afbf --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/start @@ -0,0 +1,4 @@ +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv \ + exec ./src/charm.py diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/upgrade-charm b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/upgrade-charm new file mode 100755 index 0000000000000000000000000000000000000000..1aa29496032f924113fbce8ccaf26262ea95afbf --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/hooks/upgrade-charm @@ -0,0 +1,4 @@ +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv \ + exec ./src/charm.py diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/libansible.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/libansible.py new file mode 100644 index 0000000000000000000000000000000000000000..32fd26ae7d63d42edef33982d5438669b191a361 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/libansible.py @@ -0,0 +1,108 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import fnmatch +import os +import yaml +import subprocess +import sys + +sys.path.append("lib") +import charmhelpers.fetch + + +ansible_hosts_path = "/etc/ansible/hosts" + + +def install_ansible_support(from_ppa=True, ppa_location="ppa:ansible/ansible"): + """Installs the ansible package. + + By default it is installed from the `PPA`_ linked from + the ansible `website`_ or from a ppa specified by a charm config.. + + .. _PPA: https://launchpad.net/~rquillo/+archive/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If from_ppa is empty, you must ensure that the package is available + from a configured repository. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install("ansible") + with open(ansible_hosts_path, "w+") as hosts_file: + hosts_file.write("localhost ansible_connection=local") + + +def create_hosts(hostname, username, password, hosts): + inventory_path = "/etc/ansible/hosts" + + with open(inventory_path, "w") as f: + f.write("[{}]\n".format(hosts)) + h1 = "host ansible_host={0} ansible_user={1} ansible_password={2}\n".format( + hostname, username, password + ) + f.write(h1) + + +def create_ansible_cfg(): + ansible_config_path = "/etc/ansible/ansible.cfg" + + with open(ansible_config_path, "w") as f: + f.write("[defaults]\n") + f.write("host_key_checking = False\n") + + +# Function to find the playbook path +def find(pattern, path): + result = "" + for root, dirs, files in os.walk(path): + for name in files: + if fnmatch.fnmatch(name, pattern): + result = os.path.join(root, name) + return result + + +def execute_playbook(playbook_file, hostname, user, password, vars_dict=None): + playbook_path = find(playbook_file, "/var/lib/juju/agents/") + + with open(playbook_path, "r") as f: + playbook_data = yaml.load(f) + + hosts = "all" + if "hosts" in playbook_data[0].keys() and playbook_data[0]["hosts"]: + hosts = playbook_data[0]["hosts"] + + create_ansible_cfg() + create_hosts(hostname, user, password, hosts) + + call = "ansible-playbook {} ".format(playbook_path) + + if vars_dict and isinstance(vars_dict, dict) and len(vars_dict) > 0: + call += "--extra-vars " + + string_var = "" + for k,v in vars_dict.items(): + string_var += "{}={} ".format(k, v) + + string_var = string_var.strip() + call += '"{}"'.format(string_var) + + call = call.strip() + result = subprocess.check_output(call, shell=True) + + return result diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/ns.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/ns.py new file mode 100644 index 0000000000000000000000000000000000000000..25be4056282e48ce946632025be8b466557e3171 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/ns.py @@ -0,0 +1,301 @@ +# A prototype of a library to aid in the development and operation of +# OSM Network Service charms + +import asyncio +import logging +import os +import os.path +import re +import subprocess +import sys +import time +import yaml + +try: + import juju +except ImportError: + # Not all cloud images are created equal + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + + # Install the Python3 package + subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + + # Install the libjuju build dependencies + subprocess.check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "juju"], + ) + +from juju.controller import Controller + +# Quiet the debug logging +logging.getLogger('websockets.protocol').setLevel(logging.INFO) +logging.getLogger('juju.client.connection').setLevel(logging.WARN) +logging.getLogger('juju.model').setLevel(logging.WARN) +logging.getLogger('juju.machine').setLevel(logging.WARN) + + +class NetworkService: + """A lightweight interface to the Juju controller. + + This NetworkService client is specifically designed to allow a higher-level + "NS" charm to interoperate with "VNF" charms, allowing for the execution of + Primitives across other charms within the same model. + """ + endpoint = None + user = 'admin' + secret = None + port = 17070 + loop = None + client = None + model = None + cacert = None + + def __init__(self, user, secret, endpoint=None): + + self.user = user + self.secret = secret + if endpoint is None: + addresses = os.environ['JUJU_API_ADDRESSES'] + for address in addresses.split(' '): + self.endpoint = address + else: + self.endpoint = endpoint + + # Stash the name of the model + self.model = os.environ['JUJU_MODEL_NAME'] + + # Load the ca-cert from agent.conf + AGENT_PATH = os.path.dirname(os.environ['JUJU_CHARM_DIR']) + with open("{}/agent.conf".format(AGENT_PATH), "r") as f: + try: + y = yaml.safe_load(f) + self.cacert = y['cacert'] + except yaml.YAMLError as exc: + print("Unable to find Juju ca-cert.") + raise exc + + # Create our event loop + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + async def connect(self): + """Connect to the Juju controller.""" + controller = Controller() + + print( + "Connecting to controller... ws://{}:{} as {}/{}".format( + self.endpoint, + self.port, + self.user, + self.secret[-4:].rjust(len(self.secret), "*"), + ) + ) + await controller.connect( + endpoint=self.endpoint, + username=self.user, + password=self.secret, + cacert=self.cacert, + ) + + return controller + + def __del__(self): + self.logout() + + async def disconnect(self): + """Disconnect from the Juju controller.""" + if self.client: + print("Disconnecting Juju controller") + await self.client.disconnect() + + def login(self): + """Login to the Juju controller.""" + if not self.client: + # Connect to the Juju API server + self.client = self.loop.run_until_complete(self.connect()) + return self.client + + def logout(self): + """Logout of the Juju controller.""" + + if self.loop: + print("Disconnecting from API") + self.loop.run_until_complete(self.disconnect()) + + def FormatApplicationName(self, *args): + """ + Generate a Juju-compatible Application name + + :param args tuple: Positional arguments to be used to construct the + application name. + + Limitations:: + - Only accepts characters a-z and non-consequitive dashes (-) + - Application name should not exceed 50 characters + + Examples:: + + FormatApplicationName("ping_pong_ns", "ping_vnf", "a") + """ + appname = "" + for c in "-".join(list(args)): + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + appname += c + + return re.sub('-+', '-', appname.lower()) + + def GetApplicationName(self, nsr_name, vnf_name, vnf_member_index): + """Get the runtime application name of a VNF/VDU. + + This will generate an application name matching the name of the deployed charm, + given the right parameters. + + :param nsr_name str: The name of the running Network Service, as specified at instantiation. + :param vnf_name str: The name of the VNF or VDU + :param vnf_member_index: The vnf-member-index as specified in the descriptor + """ + + application_name = self.FormatApplicationName(nsr_name, vnf_member_index, vnf_name) + + # This matches the logic used by the LCM + application_name = application_name[0:48] + vca_index = int(vnf_member_index) - 1 + application_name += '-' + chr(97 + vca_index // 26) + chr(97 + vca_index % 26) + + return application_name + + def ExecutePrimitiveGetOutput(self, application, primitive, params={}, timeout=600): + """Execute a single primitive and return it's output. + + This is a blocking method that will execute a single primitive and wait + for its completion before return it's output. + + :param application str: The application name provided by `GetApplicationName`. + :param primitive str: The name of the primitive to execute. + :param params list: A list of parameters. + :param timeout int: A timeout, in seconds, to wait for the primitive to finish. Defaults to 600 seconds. + """ + uuid = self.ExecutePrimitive(application, primitive, params) + + status = None + output = None + + starttime = time.time() + while(time.time() < starttime + timeout): + status = self.GetPrimitiveStatus(uuid) + if status in ['completed', 'failed']: + break + time.sleep(10) + + # When the primitive is done, get the output + if status in ['completed', 'failed']: + output = self.GetPrimitiveOutput(uuid) + + return output + + def ExecutePrimitive(self, application, primitive, params={}): + """Execute a primitive. + + This is a non-blocking method to execute a primitive. It will return + the UUID of the queued primitive execution, which you can use + for subsequent calls to `GetPrimitiveStatus` and `GetPrimitiveOutput`. + + :param application string: The name of the application + :param primitive string: The name of the Primitive. + :param params list: A list of parameters. + + :returns uuid string: The UUID of the executed Primitive + """ + uuid = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + # Get the application + if application in model.applications: + app = model.applications[application] + + # Execute the primitive + unit = app.units[0] + if unit: + action = self.loop.run_until_complete( + unit.run_action(primitive, **params) + ) + uuid = action.id + print("Executing action: {}".format(uuid)) + self.loop.run_until_complete( + model.disconnect() + ) + else: + # Invalid mapping: application not found. Raise exception + raise Exception("Application not found: {}".format(application)) + + return uuid + + def GetPrimitiveStatus(self, uuid): + """Get the status of a Primitive execution. + + This will return one of the following strings: + - pending + - running + - completed + - failed + + :param uuid string: The UUID of the executed Primitive. + :returns: The status of the executed Primitive + """ + status = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + status = self.loop.run_until_complete( + model.get_action_status(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return status[uuid] + + def GetPrimitiveOutput(self, uuid): + """Get the output of a completed Primitive execution. + + + :param uuid string: The UUID of the executed Primitive. + :returns: The output of the execution, or None if it's still running. + """ + result = None + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + result = self.loop.run_until_complete( + model.get_action_output(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return result diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/proxy_cluster.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/proxy_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..f323a3af88f3011ef9fde794cdfe343be8665abb --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/proxy_cluster.py @@ -0,0 +1,59 @@ +import socket + +from ops.framework import Object, StoredState + + +class ProxyCluster(Object): + + state = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self._relation_name = relation_name + self._relation = self.framework.model.get_relation(self._relation_name) + + self.framework.observe(charm.on.ssh_keys_initialized, self.on_ssh_keys_initialized) + + self.state.set_default(ssh_public_key=None) + self.state.set_default(ssh_private_key=None) + + def on_ssh_keys_initialized(self, event): + if not self.framework.model.unit.is_leader(): + raise RuntimeError("The initial unit of a cluster must also be a leader.") + + self.state.ssh_public_key = event.ssh_public_key + self.state.ssh_private_key = event.ssh_private_key + if not self.is_joined: + event.defer() + return + + self._relation.data[self.model.app][ + "ssh_public_key" + ] = self.state.ssh_public_key + self._relation.data[self.model.app][ + "ssh_private_key" + ] = self.state.ssh_private_key + + @property + def is_joined(self): + return self._relation is not None + + @property + def ssh_public_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_public_key") + + @property + def ssh_private_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_private_key") + + @property + def is_cluster_initialized(self): + return ( + True + if self.is_joined + and self._relation.data[self.model.app].get("ssh_public_key") + and self._relation.data[self.model.app].get("ssh_private_key") + else False + ) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/sshproxy.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/sshproxy.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c311e5be8515a7fe19c05dfed9f042ded0576b --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/charms/osm/sshproxy.py @@ -0,0 +1,375 @@ +"""Module to help with executing commands over SSH.""" +## +# Copyright 2016 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +# from charmhelpers.core import unitdata +# from charmhelpers.core.hookenv import log + +import io +import ipaddress +import subprocess +import os +import socket +import shlex +import traceback +import sys + +from subprocess import ( + check_call, + Popen, + CalledProcessError, + PIPE, +) + +from ops.charm import CharmBase, CharmEvents +from ops.framework import StoredState, EventBase, EventSource +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess +from .proxy_cluster import ProxyCluster + +import logging + + +logger = logging.getLogger(__name__) + +class SSHKeysInitialized(EventBase): + def __init__(self, handle, ssh_public_key, ssh_private_key): + super().__init__(handle) + self.ssh_public_key = ssh_public_key + self.ssh_private_key = ssh_private_key + + def snapshot(self): + return { + "ssh_public_key": self.ssh_public_key, + "ssh_private_key": self.ssh_private_key, + } + + def restore(self, snapshot): + self.ssh_public_key = snapshot["ssh_public_key"] + self.ssh_private_key = snapshot["ssh_private_key"] + + +class ProxyClusterEvents(CharmEvents): + ssh_keys_initialized = EventSource(SSHKeysInitialized) + + +class SSHProxyCharm(CharmBase): + + state = StoredState() + on = ProxyClusterEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + self.peers = ProxyCluster(self, "proxypeer") + + # SSH Proxy actions (primitives) + self.framework.observe(self.on.generate_ssh_key_action, self.on_generate_ssh_key_action) + self.framework.observe(self.on.get_ssh_public_key_action, self.on_get_ssh_public_key_action) + self.framework.observe(self.on.run_action, self.on_run_action) + self.framework.observe(self.on.verify_ssh_credentials_action, self.on_verify_ssh_credentials_action) + + self.framework.observe(self.on.proxypeer_relation_changed, self.on_proxypeer_relation_changed) + + def get_ssh_proxy(self): + """Get the SSHProxy instance""" + proxy = SSHProxy( + hostname=self.model.config["ssh-hostname"], + username=self.model.config["ssh-username"], + password=self.model.config["ssh-password"], + ) + return proxy + + def on_proxypeer_relation_changed(self, event): + if self.peers.is_cluster_initialized and not SSHProxy.has_ssh_key(): + pubkey = self.peers.ssh_public_key + privkey = self.peers.ssh_private_key + SSHProxy.write_ssh_keys(public=pubkey, private=privkey) + self.verify_credentials() + else: + event.defer() + + def on_config_changed(self, event): + """Handle changes in configuration""" + self.verify_credentials() + + def on_install(self, event): + SSHProxy.install() + + def on_start(self, event): + """Called when the charm is being installed""" + if not self.peers.is_joined: + event.defer() + return + + unit = self.model.unit + + if not SSHProxy.has_ssh_key(): + unit.status = MaintenanceStatus("Generating SSH keys...") + pubkey = None + privkey = None + if self.model.unit.is_leader(): + if self.peers.is_cluster_initialized: + SSHProxy.write_ssh_keys( + public=self.peers.ssh_public_key, + private=self.peers.ssh_private_key, + ) + else: + SSHProxy.generate_ssh_key() + self.on.ssh_keys_initialized.emit( + SSHProxy.get_ssh_public_key(), SSHProxy.get_ssh_private_key() + ) + self.verify_credentials() + + def verify_credentials(self): + unit = self.model.unit + + # Unit should go into a waiting state until verify_ssh_credentials is successful + unit.status = WaitingStatus("Waiting for SSH credentials") + proxy = self.get_ssh_proxy() + verified, _ = proxy.verify_credentials() + if verified: + unit.status = ActiveStatus() + else: + unit.status = BlockedStatus("Invalid SSH credentials.") + return verified + + ##################### + # SSH Proxy methods # + ##################### + def on_generate_ssh_key_action(self, event): + """Generate a new SSH keypair for this unit.""" + if self.model.unit.is_leader(): + if not SSHProxy.generate_ssh_key(): + event.fail("Unable to generate ssh key") + else: + event.fail("Unit is not leader") + return + + def on_get_ssh_public_key_action(self, event): + """Get the SSH public key for this unit.""" + if self.model.unit.is_leader(): + pubkey = SSHProxy.get_ssh_public_key() + event.set_results({"pubkey": SSHProxy.get_ssh_public_key()}) + else: + event.fail("Unit is not leader") + return + + def on_run_action(self, event): + """Run an arbitrary command on the remote host.""" + if self.model.unit.is_leader(): + cmd = event.params["command"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run(cmd) + event.set_results({"output": stdout}) + if len(stderr): + event.fail(stderr) + else: + event.fail("Unit is not leader") + return + + def on_verify_ssh_credentials_action(self, event): + """Verify the SSH credentials for this unit.""" + unit = self.model.unit + if unit.is_leader(): + proxy = self.get_ssh_proxy() + verified, stderr = proxy.verify_credentials() + if verified: + event.set_results({"verified": True}) + unit.status = ActiveStatus() + else: + event.set_results({"verified": False, "stderr": stderr}) + event.fail("Not verified") + unit.status = BlockedStatus("Invalid SSH credentials.") + + else: + event.fail("Unit is not leader") + return + + +class LeadershipError(ModelError): + def __init__(self): + super().__init__("not leader") + +class SSHProxy: + private_key_path = "/root/.ssh/id_sshproxy" + public_key_path = "/root/.ssh/id_sshproxy.pub" + key_type = "rsa" + key_bits = 4096 + + def __init__(self, hostname: str, username: str, password: str = ""): + self.hostname = hostname + self.username = username + self.password = password + + @staticmethod + def install(): + check_call("apt update && apt install -y openssh-client sshpass", shell=True) + + @staticmethod + def generate_ssh_key(): + """Generate a 4096-bit rsa keypair.""" + if not os.path.exists(SSHProxy.private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path, + ) + + try: + check_call(cmd, shell=True) + except CalledProcessError: + return False + + return True + + @staticmethod + def write_ssh_keys(public, private): + """Write a 4096-bit rsa keypair.""" + with open(SSHProxy.public_key_path, "w") as f: + f.write(public) + f.close() + with open(SSHProxy.private_key_path, "w") as f: + f.write(private) + f.close() + + @staticmethod + def get_ssh_public_key(): + publickey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.public_key_path, "r") as f: + publickey = f.read() + return publickey + + @staticmethod + def get_ssh_private_key(): + privatekey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.private_key_path, "r") as f: + privatekey = f.read() + return privatekey + + @staticmethod + def has_ssh_key(): + return True if os.path.exists(SSHProxy.private_key_path) else False + + def run(self, cmd: str) -> (str, str): + """Run a command remotely via SSH. + + Note: The previous behavior was to run the command locally if SSH wasn't + configured, but that can lead to cases where execution succeeds when you'd + expect it not to. + """ + if isinstance(cmd, str): + cmd = shlex.split(cmd) + + host = self._get_hostname() + user = self.username + passwd = self.password + key = self.private_key_path + + # Make sure we have everything we need to connect + if host and user: + return self.ssh(cmd) + + raise Exception("Invalid SSH credentials.") + + def scp(self, source_file, destination_file): + """Execute an scp command. Requires a fully qualified source and + destination. + + :param str source_file: Path to the source file + :param str destination_file: Path to the destination file + :raises: :class:`CalledProcessError` if the command fails + """ + cmd = [ + "sshpass", + "-p", + self.password, + "scp", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + "-B", + ] + destination = "{}@{}:{}".format(self.username, self.hostname, destination_file) + cmd.extend([source_file, destination]) + subprocess.run(cmd, check=True) + + def ssh(self, command): + """Run a command remotely via SSH. + + :param list(str) command: The command to execute + :return: tuple: The stdout and stderr of the command execution + :raises: :class:`CalledProcessError` if the command fails + """ + + destination = "{}@{}".format(self.username, self.hostname) + cmd = [ + "sshpass", + "-p", + self.password, + "ssh", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + destination, + ] + cmd.extend(command) + output = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return (output.stdout.decode("utf-8").strip(), output.stderr.decode("utf-8").strip()) + + def verify_credentials(self): + """Verify the SSH credentials. + + :return (bool, str): Verified, Stderr + """ + verified = False + try: + (stdout, stderr) = self.run("hostname") + verified = True + except CalledProcessError as e: + stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output)) + except (TimeoutError, socket.timeout): + stderr = "Timeout attempting to reach {}".format(self._get_hostname()) + except Exception as error: + tb = traceback.format_exc() + stderr = "Unhandled exception: {}".format(tb) + return verified, stderr + + ################### + # Private methods # + ################### + def _get_hostname(self): + """Get the hostname for the ssh target. + + HACK: This function was added to work around an issue where the + ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first + is the floating ip, and the second the non-floating ip, for an Openstack + instance. + """ + return self.hostname.split(";")[0] diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/charm.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..d898de859fc444814bc19a7f8f0caaaec6f7e5f4 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/charm.py @@ -0,0 +1,575 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """A base class for events that trigger because of a Juju hook firing.""" + + +class ActionEvent(EventBase): + """A base class for events that trigger when a user asks for an Action to be run. + + To read the parameters for the action, see the instance variable `params`. + To respond with the result of the action, call `set_results`. To add progress + messages that are visible as the action is progressing use `log`. + + :ivar params: The parameters passed to the action (read by action-get) + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the user is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by Charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Represents the `install` hook from Juju.""" + + +class StartEvent(HookEvent): + """Represents the `start` hook from Juju.""" + + +class StopEvent(HookEvent): + """Represents the `stop` hook from Juju.""" + + +class RemoveEvent(HookEvent): + """Represents the `remove` hook from Juju. """ + + +class ConfigChangedEvent(HookEvent): + """Represents the `config-changed` hook from Juju.""" + + +class UpdateStatusEvent(HookEvent): + """Represents the `update-status` hook from Juju.""" + + +class UpgradeCharmEvent(HookEvent): + """Represents the `upgrade-charm` hook from Juju. + + This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju + has unpacked the upgraded charm code, and so this event will be handled with new code. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Represents the `pre-series-upgrade` hook from Juju. + + This happens when a user has run `juju upgrade-series MACHINE prepare` and + will fire for each unit that is running on the machine, telling them that + the user is preparing to upgrade the Machine's series (eg trusty->bionic). + The charm should take actions to prepare for the upgrade (a database charm + would want to write out a version-independent dump of the database, so that + when a new version of the database is available in a new series, it can be + used.) + Once all units on a machine have run `pre-series-upgrade`, the user will + initiate the steps to actually upgrade the machine (eg `do-release-upgrade`). + When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Represents the `post-series-upgrade` hook from Juju. + + This is run after the user has done a distribution upgrade (or rolled back + and kept the same series). It is called in response to + `juju upgrade-series MACHINE complete`. Charms are expected to do whatever + steps are necessary to reconfigure their applications for the new series. + """ + + +class LeaderElectedEvent(HookEvent): + """Represents the `leader-elected` hook from Juju. + + Juju will trigger this when a new lead unit is chosen for a given application. + This represents the leader of the charm information (not necessarily the primary + of a running application). The main utility is that charm authors can know + that only one unit will be a leader at any given time, so they can do + configuration, etc, that would otherwise require coordination between units. + (eg, selecting a password for a new relation) + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Represents the `leader-settings-changed` hook from Juju. + + Deprecated. This represents when a lead unit would call `leader-set` to inform + the other units of an application that they have new information to handle. + This has been deprecated in favor of using a Peer relation, and having the + leader set a value in the Application data bag for that peer relation. + (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Represents the `collect-metrics` hook from Juju. + + Note that events firing during a CollectMetricsEvent are currently + sandboxed in how they can interact with Juju. To report metrics + use :meth:`.add_metrics`. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Charmers should not be creating RelationEvents directly. The events will be + generated by the framework from Juju related events. Users can observe them + from the various `CharmBase.on[relation_name].relation_*` events. + + Attributes: + relation: The Relation involved in this event + app: The remote application that has triggered this event + unit: The remote unit that has triggered this event. This may be None + if the relation event was triggered as an Application level event + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by Charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by Charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Represents the `relation-created` hook from Juju. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Represents the `relation-joined` hook from Juju. + + This is triggered whenever a new unit of a related application joins the relation. + (eg, a unit was added to an existing related app, or a new relation was established + with an application that already had units.) + """ + + +class RelationChangedEvent(RelationEvent): + """Represents the `relation-changed` hook from Juju. + + This is triggered whenever there is a change to the data bucket for a related + application or unit. Look at `event.relation.data[event.unit/app]` to see the + new information. + """ + + +class RelationDepartedEvent(RelationEvent): + """Represents the `relation-departed` hook from Juju. + + This is the inverse of the RelationJoinedEvent, representing when a unit + is leaving the relation (the unit is being removed, the app is being removed, + the relation is being removed). It is fired once for each unit that is + going away. + """ + + +class RelationBrokenEvent(RelationEvent): + """Represents the `relation-broken` hook from Juju. + + If a relation is being removed (`juju remove-relation` or `juju remove-application`), + once all the units have been removed, RelationBrokenEvent will fire to signal + that the relationship has been fully terminated. + """ + + +class StorageEvent(HookEvent): + """Base class representing Storage related events.""" + + +class StorageAttachedEvent(StorageEvent): + """Represents the `storage-attached` hook from Juju. + + Called when new storage is available for the charm to use. + """ + + +class StorageDetachingEvent(StorageEvent): + """Represents the `storage-detaching` hook from Juju. + + Called when storage a charm has been using is going away. + """ + + +class CharmEvents(ObjectEvents): + """The events that are generated by Juju in response to the lifecycle of an application.""" + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the Charm overall. + + Usually this initialization is done by ops.main.main() rather than Charm authors + directly instantiating a Charm. + + Args: + framework: The framework responsible for managing the Model and events for this + Charm. + key: Ignored; will remove after deprecation period of the signature change. + """ + + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """CharmMeta of this charm. + """ + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the Charm as it is running. + """ + return self.framework.charm_dir + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from metadata.yaml and/or actions.yaml. Generally charms will + define this information, rather than reading it at runtime. This class is + mostly for the framework to understand what the charm has defined. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by Charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/framework.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/framework.py new file mode 100755 index 0000000000000000000000000000000000000000..b7c4749ff2b5bfb4f354bf1a8d4cd6ed64cf0da5 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/framework.py @@ -0,0 +1,1067 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return ''.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(ObjectEvents): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint(); if Python < 3.7, this doesn't affect anything + sys.breakpointhook = self.breakpoint + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse once the env var, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/jujuversion.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..b2b8177dbe396f0d8c46b86e26af6b4e54ea046d --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/jujuversion.py @@ -0,0 +1,98 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'''^ + (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there + ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - + (\.(?P\d{1,9}))?$ # and sometimes with a number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if not v: + raise RuntimeError('environ has no JUJU_VERSION') + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/lib/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edb9fcacea6f0173aed9f07ca8a683cfead989cc --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/lib/__init__.py @@ -0,0 +1,194 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import re + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType + + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except OSError: + continue + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except OSError: + continue + finder = get_importer(opslib) + if finder is None or not hasattr(finder, 'find_spec'): + continue + for lib_dir in lib_dirs: + spec = finder.find_spec(lib_dir) + if spec is None: + continue + if spec.loader is None: + # a namespace package; not supported + continue + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 + + +def _parse_lib(spec): + if spec.origin is None: + return None + + _expected = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_expected): + break + if n > _MAX_LIB_LINES: + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _expected: + value = literal_eval(value) + if not isinstance(value, _expected[key]): + return None + libinfo[key] = value + else: + if len(libinfo) != len(_expected): + return None + except Exception: + return None + + return _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {0.name} by {0.author}, API {0.api}, patch {0.patch}>".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/log.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/log.py new file mode 100644 index 0000000000000000000000000000000000000000..4aac5543aec4d84dc393e79b772a30284712d6d4 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/log.py @@ -0,0 +1,51 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/main.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/main.py new file mode 100755 index 0000000000000000000000000000000000000000..6dc31c3575044796e8fe1f61b8415395689d6339 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/main.py @@ -0,0 +1,348 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import logging +import os +import subprocess +import sys +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. + # For Windows it is always an absolute path (any symlinks are resolved) + # while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy” hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(sys.argv[0]) + + if 'JUJU_DISPATCH_PATH' in os.environ and (charm_dir / 'dispatch').exists(): + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk""" + + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = self._charm_dir / self._dispatch_path + if not dispatch_path.exists(): + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == self._exec_path.resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning( + "Legacy %s exited with status %d.", + self._dispatch_path, e.returncode) + sys.exit(e.returncode) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """"Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def main(charm_class, use_juju_for_storage=False): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/model.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b96e89154ea9cec2b62a4fab4649412e115c304e --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/model.py @@ -0,0 +1,1237 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + +import ops +from ops.jujuversion import JujuVersion + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + + Attributes: + unit: A :class:`Unit` that represents the unit that is running this code (eg yourself) + app: A :class:`Application` that represents the application this unit is a part of. + relations: Mapping of endpoint to list of :class:`Relation` answering the question + "what am I currently related to". See also :meth:`.get_relation` + config: A dict of the config for the current application. + resources: Access to resources for this charm. Use ``model.resources.fetch(resource_name)`` + to get the path on disk where the resource can be found. + storages: Mapping of storage_name to :class:`Storage` for the storage points defined in + metadata.yaml + pod: Used to get access to ``model.pod.set_spec`` to set the container specification + for Kubernetes charms. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(self._backend.unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworKInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info['bind-addresses']: + interface_name = interface_info['interface-name'] + for address_info in interface_info['addresses']: + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info['ingress-addresses']: + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info['egress-subnets']: + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + return self.interfaces[0].address + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + return self.ingress_addresses[0] + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + self.address = ipaddress.ip_address(address_info['value']) + cidr = address_info['cidr'] + if not cidr: + # The cidr field may be empty, see LP: #1864102. + # In this case, make it a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address_info['value']) + else: + self.subnet = ipaddress.ip_network(cidr) + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """"Represents a storage as defined in metadata.yaml + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/storage.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/storage.py new file mode 100755 index 0000000000000000000000000000000000000000..d4310ce1cfbb707c6278b70f84a9751da3ce07af --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/storage.py @@ -0,0 +1,318 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer)""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: typing.Optional[str]) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """"Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + return + + def commit(self): + return + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str): + notice_list = self._load_notice_list() + for row in notice_list: + if row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal' + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + @staticmethod + def is_available() -> bool: + """Check if Juju state storage is available. + + This checks if there is a 'state-get' executable in PATH. + """ + p = shutil.which('state-get') + return p is not None + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf-8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + subprocess.run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = subprocess.run( + ["state-get", key], + stdout=subprocess.PIPE, + check=True, + ) + if p.stdout == b'' or p.stdout == b'\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + subprocess.run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/testing.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/testing.py new file mode 100755 index 0000000000000000000000000000000000000000..b4b3fe071216238007c9f3847ca9556be626bf6b --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/testing.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pathlib +from textwrap import dedent +import tempfile +import typing +import yaml +import weakref + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None): + # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since + # it would define the default values of config that the charm would see. + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._lazy_resource_dir = None + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + @property + def _resource_dir(self) -> pathlib.Path: + if self._lazy_resource_dir is not None: + return self._lazy_resource_dir + + self.__resource_dir = tempfile.TemporaryDirectory() + self._lazy_resource_dir = pathlib.Path(self.__resource_dir.name) + self._finalizer = weakref.finalize(self, self.__resource_dir.cleanup) + return self._lazy_resource_dir + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling begin(), there is no Charm instance, so changes to the Model won't emit + events. You must call begin before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + resource_dir = self._resource_dir / resource_name + resource_dir.mkdir(exist_ok=True) + resource_file = resource_dir / "contents.yaml" + with resource_file.open('wt', encoding='utf8') as resource_yaml: + yaml.dump(contents, resource_yaml) + self._backend._resources_map[resource_name] = resource_file + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return rel_id + relation = self._model.get_relation(relation_name, rel_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + return rel_id + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event and a `relation_changed` event. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + self._config = {} + self._is_leader = False + self._resources_map = {} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + return self._resources_map[resource_name] + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/version.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/version.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5478555ee0fa948bfb0ad57cc79ba7cef3721 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/lib/ops/version.py @@ -0,0 +1,50 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from pathlib import Path + +__all__ = ('version',) + +_FALLBACK = '0.8' # this gets bumped after release + + +def _get_version(): + version = _FALLBACK + ".dev0+unknown" + + p = Path(__file__).parent + if (p.parent / '.git').exists(): + try: + proc = subprocess.run( + ['git', 'describe', '--tags', '--dirty'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=p, + check=True) + except Exception: + pass + else: + version = proc.stdout.strip().decode('utf8') + if '-' in version: + # version will look like -<#commits>-g[-dirty] + # in terms of PEP 440, the tag we'll make sure is a 'public version identifier'; + # everything after the first - needs to be a 'local version' + public, local = version.split('-', 1) + version = public + '+' + local.replace('-', '.') + # version now +<#commits>.g[.dirty] + # which is PEP440-compliant (as long as is :-) + return version + + +version = _get_version() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/manifest.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/manifest.yaml new file mode 100644 index 0000000000000000000000000000000000000000..444913c54e28556b47af3b9fe616414b9743362b --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/manifest.yaml @@ -0,0 +1,13 @@ +analysis: + attributes: + - name: language + result: python + - name: framework + result: unknown +bases: +- architectures: + - amd64 + channel: '18.04' + name: ubuntu +charmcraft-started-at: '2022-05-10T20:29:56.920071Z' +charmcraft-version: 1.6.0 diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/metadata.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..207b086e008b6228068fdda6674b358b63ef9950 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/metadata.yaml @@ -0,0 +1,26 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +name: simple-ha-proxy +summary: A simple example proxy charm +description: | + Simple proxy charm is an example charm used in OSM Hackfests +series: + - bionic +peers: + proxypeer: + interface: proxypeer diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/LICENSE b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/README.md b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a98a55253f32b50a17989c1fcd7df6e5f82454af --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/README.md @@ -0,0 +1,201 @@ +# charms.osm + +A Python library to aid the development of charms for Open Source Mano (OSM) + +## How to install + +```bash +git submodule add https://github.com/canonical/operator mod/operator +git submodule add https://github.com/charmed-osm/charms.osm mod/charms.osm +git submodule add https://github.com/juju/charm-helpers.git mod/charm-helpers # Only for libansible +``` + +## SSHProxyCharm + +In this section, we show the class you should inherit from in order to develop your SSH Proxy charms. + +Example: + +```python +from charms.osm.sshproxy import SSHProxyCharm + +class MySSHProxyCharm(SSHProxyCharm): + + def __init__(self, framework, key): + super().__init__(framework, key) + + # Listen to charm events + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.install, self.on_install) + self.framework.observe(self.on.start, self.on_start) + + # Listen to the touch action event + self.framework.observe(self.on.touch_action, self.on_touch_action) + + def on_config_changed(self, event): + """Handle changes in configuration""" + super().on_config_changed(event) + + def on_install(self, event): + """Called when the charm is being installed""" + super().on_install(event) + + def on_start(self, event): + """Called when the charm is being started""" + super().on_start(event) + + def on_touch_action(self, event): + """Touch a file.""" + + if self.model.unit.is_leader(): + filename = event.params["filename"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run("touch {}".format(filename)) + event.set_results({"output": stdout}) + else: + event.fail("Unit is not leader") + return +``` + +### Attributes and methods available + +- Atttributes: + - state: StoredState object. It can be used to store state data to be shared within a charm across different hooks +- SSH related methods: + - get_ssh_proxy(): Return an SSHProxy object with which you can then execute scp and ssh commands in the remote machine. + - verify_credentials(): Return True if it has the right credentials to SSH the remote machine. It also updates the status of the unit. +- Charm related methods: Methods that should be run in specific hooks/events. + - on_install(): Install dependencies for enabling SSH functionality + - on_start(): Generate needed SSH keys + - on_config_changed(): Check if the SSH + +### config.yaml + +You need to add this in your config.yaml in your charm. + +```yaml +options: + ssh-hostname: + type: string + default: "" + description: "The hostname or IP address of the machine to" + ssh-username: + type: string + default: "" + description: "The username to login as." + ssh-password: + type: string + default: "" + description: "The password used to authenticate." + ssh-public-key: + type: string + default: "" + description: "The public key of this unit." + ssh-key-type: + type: string + default: "rsa" + description: "The type of encryption to use for the SSH key." + ssh-key-bits: + type: int + default: 4096 + description: "The number of bits to use for the SSH key." + +``` + +### metadata.yaml + +You need to add this in your metadata.yaml in your charm. + +```yaml +peers: + proxypeer: + interface: proxypeer +``` + +### actions.yaml + +You need to add this in your actions.yaml in your charm. + +```yaml +# Required by charms.osm.sshproxy +run: + description: "Run an arbitrary command" + params: + command: + description: "The command to execute." + type: string + default: "" + required: + - command +generate-ssh-key: + description: "Generate a new SSH keypair for this unit. This will replace any existing previously generated keypair." +verify-ssh-credentials: + description: "Verify that this unit can authenticate with server specified by ssh-hostname and ssh-username." +get-ssh-public-key: + description: "Get the public SSH key for this unit." +``` + +## SSHProxy + +Example: + +```python +from charms.osm.sshproxy import SSHProxy + +# Check if SSH Proxy has key +if not SSHProxy.has_ssh_key(): + # Generate SSH Key + SSHProxy.generate_ssh_key() + +# Get generated public and private keys +SSHProxy.get_ssh_public_key() +SSHProxy.get_ssh_private_key() + +# Get Proxy +proxy = SSHProxy( + hostname=config["ssh-hostname"], + username=config["ssh-username"], + password=config["ssh-password"], +) + +# Verify credentials +verified = proxy.verify_credentials() + +if verified: + # Run commands in remote machine + proxy.run("touch /home/ubuntu/touch") +``` + +## Libansible + +```python +from charms.osm import libansible + +# Install ansible packages in the charm +libansible.install_ansible_support() + +result = libansible.execute_playbook( + "configure-remote.yaml", # Name of the playbook <-- Put the playbook in playbooks/ folder + config["ssh-hostname"], + config["ssh-username"], + config["ssh-password"], + dict_vars, # Dictionary with variables to populate in the playbook +) +``` + +## Usage + +Import submodules: + +```bash +git submodule add https://github.com/charmed-osm/charms.osm mod/charms.osm +git submodule add https://github.com/juju/charm-helpers.git mod/charm-helpers # Only for libansible +``` + +Add symlinks: + +```bash +mkdir -p lib/charms +ln -s ../mod/charms.osm/charms/osm lib/charms/osm +ln -s ../mod/charm-helpers/charmhelpers lib/charmhelpers # Only for libansible +``` diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/libansible.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/libansible.py new file mode 100644 index 0000000000000000000000000000000000000000..32fd26ae7d63d42edef33982d5438669b191a361 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/libansible.py @@ -0,0 +1,108 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import fnmatch +import os +import yaml +import subprocess +import sys + +sys.path.append("lib") +import charmhelpers.fetch + + +ansible_hosts_path = "/etc/ansible/hosts" + + +def install_ansible_support(from_ppa=True, ppa_location="ppa:ansible/ansible"): + """Installs the ansible package. + + By default it is installed from the `PPA`_ linked from + the ansible `website`_ or from a ppa specified by a charm config.. + + .. _PPA: https://launchpad.net/~rquillo/+archive/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If from_ppa is empty, you must ensure that the package is available + from a configured repository. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install("ansible") + with open(ansible_hosts_path, "w+") as hosts_file: + hosts_file.write("localhost ansible_connection=local") + + +def create_hosts(hostname, username, password, hosts): + inventory_path = "/etc/ansible/hosts" + + with open(inventory_path, "w") as f: + f.write("[{}]\n".format(hosts)) + h1 = "host ansible_host={0} ansible_user={1} ansible_password={2}\n".format( + hostname, username, password + ) + f.write(h1) + + +def create_ansible_cfg(): + ansible_config_path = "/etc/ansible/ansible.cfg" + + with open(ansible_config_path, "w") as f: + f.write("[defaults]\n") + f.write("host_key_checking = False\n") + + +# Function to find the playbook path +def find(pattern, path): + result = "" + for root, dirs, files in os.walk(path): + for name in files: + if fnmatch.fnmatch(name, pattern): + result = os.path.join(root, name) + return result + + +def execute_playbook(playbook_file, hostname, user, password, vars_dict=None): + playbook_path = find(playbook_file, "/var/lib/juju/agents/") + + with open(playbook_path, "r") as f: + playbook_data = yaml.load(f) + + hosts = "all" + if "hosts" in playbook_data[0].keys() and playbook_data[0]["hosts"]: + hosts = playbook_data[0]["hosts"] + + create_ansible_cfg() + create_hosts(hostname, user, password, hosts) + + call = "ansible-playbook {} ".format(playbook_path) + + if vars_dict and isinstance(vars_dict, dict) and len(vars_dict) > 0: + call += "--extra-vars " + + string_var = "" + for k,v in vars_dict.items(): + string_var += "{}={} ".format(k, v) + + string_var = string_var.strip() + call += '"{}"'.format(string_var) + + call = call.strip() + result = subprocess.check_output(call, shell=True) + + return result diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/ns.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/ns.py new file mode 100644 index 0000000000000000000000000000000000000000..25be4056282e48ce946632025be8b466557e3171 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/ns.py @@ -0,0 +1,301 @@ +# A prototype of a library to aid in the development and operation of +# OSM Network Service charms + +import asyncio +import logging +import os +import os.path +import re +import subprocess +import sys +import time +import yaml + +try: + import juju +except ImportError: + # Not all cloud images are created equal + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + + # Install the Python3 package + subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + + # Install the libjuju build dependencies + subprocess.check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "juju"], + ) + +from juju.controller import Controller + +# Quiet the debug logging +logging.getLogger('websockets.protocol').setLevel(logging.INFO) +logging.getLogger('juju.client.connection').setLevel(logging.WARN) +logging.getLogger('juju.model').setLevel(logging.WARN) +logging.getLogger('juju.machine').setLevel(logging.WARN) + + +class NetworkService: + """A lightweight interface to the Juju controller. + + This NetworkService client is specifically designed to allow a higher-level + "NS" charm to interoperate with "VNF" charms, allowing for the execution of + Primitives across other charms within the same model. + """ + endpoint = None + user = 'admin' + secret = None + port = 17070 + loop = None + client = None + model = None + cacert = None + + def __init__(self, user, secret, endpoint=None): + + self.user = user + self.secret = secret + if endpoint is None: + addresses = os.environ['JUJU_API_ADDRESSES'] + for address in addresses.split(' '): + self.endpoint = address + else: + self.endpoint = endpoint + + # Stash the name of the model + self.model = os.environ['JUJU_MODEL_NAME'] + + # Load the ca-cert from agent.conf + AGENT_PATH = os.path.dirname(os.environ['JUJU_CHARM_DIR']) + with open("{}/agent.conf".format(AGENT_PATH), "r") as f: + try: + y = yaml.safe_load(f) + self.cacert = y['cacert'] + except yaml.YAMLError as exc: + print("Unable to find Juju ca-cert.") + raise exc + + # Create our event loop + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + async def connect(self): + """Connect to the Juju controller.""" + controller = Controller() + + print( + "Connecting to controller... ws://{}:{} as {}/{}".format( + self.endpoint, + self.port, + self.user, + self.secret[-4:].rjust(len(self.secret), "*"), + ) + ) + await controller.connect( + endpoint=self.endpoint, + username=self.user, + password=self.secret, + cacert=self.cacert, + ) + + return controller + + def __del__(self): + self.logout() + + async def disconnect(self): + """Disconnect from the Juju controller.""" + if self.client: + print("Disconnecting Juju controller") + await self.client.disconnect() + + def login(self): + """Login to the Juju controller.""" + if not self.client: + # Connect to the Juju API server + self.client = self.loop.run_until_complete(self.connect()) + return self.client + + def logout(self): + """Logout of the Juju controller.""" + + if self.loop: + print("Disconnecting from API") + self.loop.run_until_complete(self.disconnect()) + + def FormatApplicationName(self, *args): + """ + Generate a Juju-compatible Application name + + :param args tuple: Positional arguments to be used to construct the + application name. + + Limitations:: + - Only accepts characters a-z and non-consequitive dashes (-) + - Application name should not exceed 50 characters + + Examples:: + + FormatApplicationName("ping_pong_ns", "ping_vnf", "a") + """ + appname = "" + for c in "-".join(list(args)): + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + appname += c + + return re.sub('-+', '-', appname.lower()) + + def GetApplicationName(self, nsr_name, vnf_name, vnf_member_index): + """Get the runtime application name of a VNF/VDU. + + This will generate an application name matching the name of the deployed charm, + given the right parameters. + + :param nsr_name str: The name of the running Network Service, as specified at instantiation. + :param vnf_name str: The name of the VNF or VDU + :param vnf_member_index: The vnf-member-index as specified in the descriptor + """ + + application_name = self.FormatApplicationName(nsr_name, vnf_member_index, vnf_name) + + # This matches the logic used by the LCM + application_name = application_name[0:48] + vca_index = int(vnf_member_index) - 1 + application_name += '-' + chr(97 + vca_index // 26) + chr(97 + vca_index % 26) + + return application_name + + def ExecutePrimitiveGetOutput(self, application, primitive, params={}, timeout=600): + """Execute a single primitive and return it's output. + + This is a blocking method that will execute a single primitive and wait + for its completion before return it's output. + + :param application str: The application name provided by `GetApplicationName`. + :param primitive str: The name of the primitive to execute. + :param params list: A list of parameters. + :param timeout int: A timeout, in seconds, to wait for the primitive to finish. Defaults to 600 seconds. + """ + uuid = self.ExecutePrimitive(application, primitive, params) + + status = None + output = None + + starttime = time.time() + while(time.time() < starttime + timeout): + status = self.GetPrimitiveStatus(uuid) + if status in ['completed', 'failed']: + break + time.sleep(10) + + # When the primitive is done, get the output + if status in ['completed', 'failed']: + output = self.GetPrimitiveOutput(uuid) + + return output + + def ExecutePrimitive(self, application, primitive, params={}): + """Execute a primitive. + + This is a non-blocking method to execute a primitive. It will return + the UUID of the queued primitive execution, which you can use + for subsequent calls to `GetPrimitiveStatus` and `GetPrimitiveOutput`. + + :param application string: The name of the application + :param primitive string: The name of the Primitive. + :param params list: A list of parameters. + + :returns uuid string: The UUID of the executed Primitive + """ + uuid = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + # Get the application + if application in model.applications: + app = model.applications[application] + + # Execute the primitive + unit = app.units[0] + if unit: + action = self.loop.run_until_complete( + unit.run_action(primitive, **params) + ) + uuid = action.id + print("Executing action: {}".format(uuid)) + self.loop.run_until_complete( + model.disconnect() + ) + else: + # Invalid mapping: application not found. Raise exception + raise Exception("Application not found: {}".format(application)) + + return uuid + + def GetPrimitiveStatus(self, uuid): + """Get the status of a Primitive execution. + + This will return one of the following strings: + - pending + - running + - completed + - failed + + :param uuid string: The UUID of the executed Primitive. + :returns: The status of the executed Primitive + """ + status = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + status = self.loop.run_until_complete( + model.get_action_status(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return status[uuid] + + def GetPrimitiveOutput(self, uuid): + """Get the output of a completed Primitive execution. + + + :param uuid string: The UUID of the executed Primitive. + :returns: The output of the execution, or None if it's still running. + """ + result = None + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + result = self.loop.run_until_complete( + model.get_action_output(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return result diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/proxy_cluster.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/proxy_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..f323a3af88f3011ef9fde794cdfe343be8665abb --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/proxy_cluster.py @@ -0,0 +1,59 @@ +import socket + +from ops.framework import Object, StoredState + + +class ProxyCluster(Object): + + state = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self._relation_name = relation_name + self._relation = self.framework.model.get_relation(self._relation_name) + + self.framework.observe(charm.on.ssh_keys_initialized, self.on_ssh_keys_initialized) + + self.state.set_default(ssh_public_key=None) + self.state.set_default(ssh_private_key=None) + + def on_ssh_keys_initialized(self, event): + if not self.framework.model.unit.is_leader(): + raise RuntimeError("The initial unit of a cluster must also be a leader.") + + self.state.ssh_public_key = event.ssh_public_key + self.state.ssh_private_key = event.ssh_private_key + if not self.is_joined: + event.defer() + return + + self._relation.data[self.model.app][ + "ssh_public_key" + ] = self.state.ssh_public_key + self._relation.data[self.model.app][ + "ssh_private_key" + ] = self.state.ssh_private_key + + @property + def is_joined(self): + return self._relation is not None + + @property + def ssh_public_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_public_key") + + @property + def ssh_private_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_private_key") + + @property + def is_cluster_initialized(self): + return ( + True + if self.is_joined + and self._relation.data[self.model.app].get("ssh_public_key") + and self._relation.data[self.model.app].get("ssh_private_key") + else False + ) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/sshproxy.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/sshproxy.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c311e5be8515a7fe19c05dfed9f042ded0576b --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/charms.osm/charms/osm/sshproxy.py @@ -0,0 +1,375 @@ +"""Module to help with executing commands over SSH.""" +## +# Copyright 2016 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +# from charmhelpers.core import unitdata +# from charmhelpers.core.hookenv import log + +import io +import ipaddress +import subprocess +import os +import socket +import shlex +import traceback +import sys + +from subprocess import ( + check_call, + Popen, + CalledProcessError, + PIPE, +) + +from ops.charm import CharmBase, CharmEvents +from ops.framework import StoredState, EventBase, EventSource +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess +from .proxy_cluster import ProxyCluster + +import logging + + +logger = logging.getLogger(__name__) + +class SSHKeysInitialized(EventBase): + def __init__(self, handle, ssh_public_key, ssh_private_key): + super().__init__(handle) + self.ssh_public_key = ssh_public_key + self.ssh_private_key = ssh_private_key + + def snapshot(self): + return { + "ssh_public_key": self.ssh_public_key, + "ssh_private_key": self.ssh_private_key, + } + + def restore(self, snapshot): + self.ssh_public_key = snapshot["ssh_public_key"] + self.ssh_private_key = snapshot["ssh_private_key"] + + +class ProxyClusterEvents(CharmEvents): + ssh_keys_initialized = EventSource(SSHKeysInitialized) + + +class SSHProxyCharm(CharmBase): + + state = StoredState() + on = ProxyClusterEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + self.peers = ProxyCluster(self, "proxypeer") + + # SSH Proxy actions (primitives) + self.framework.observe(self.on.generate_ssh_key_action, self.on_generate_ssh_key_action) + self.framework.observe(self.on.get_ssh_public_key_action, self.on_get_ssh_public_key_action) + self.framework.observe(self.on.run_action, self.on_run_action) + self.framework.observe(self.on.verify_ssh_credentials_action, self.on_verify_ssh_credentials_action) + + self.framework.observe(self.on.proxypeer_relation_changed, self.on_proxypeer_relation_changed) + + def get_ssh_proxy(self): + """Get the SSHProxy instance""" + proxy = SSHProxy( + hostname=self.model.config["ssh-hostname"], + username=self.model.config["ssh-username"], + password=self.model.config["ssh-password"], + ) + return proxy + + def on_proxypeer_relation_changed(self, event): + if self.peers.is_cluster_initialized and not SSHProxy.has_ssh_key(): + pubkey = self.peers.ssh_public_key + privkey = self.peers.ssh_private_key + SSHProxy.write_ssh_keys(public=pubkey, private=privkey) + self.verify_credentials() + else: + event.defer() + + def on_config_changed(self, event): + """Handle changes in configuration""" + self.verify_credentials() + + def on_install(self, event): + SSHProxy.install() + + def on_start(self, event): + """Called when the charm is being installed""" + if not self.peers.is_joined: + event.defer() + return + + unit = self.model.unit + + if not SSHProxy.has_ssh_key(): + unit.status = MaintenanceStatus("Generating SSH keys...") + pubkey = None + privkey = None + if self.model.unit.is_leader(): + if self.peers.is_cluster_initialized: + SSHProxy.write_ssh_keys( + public=self.peers.ssh_public_key, + private=self.peers.ssh_private_key, + ) + else: + SSHProxy.generate_ssh_key() + self.on.ssh_keys_initialized.emit( + SSHProxy.get_ssh_public_key(), SSHProxy.get_ssh_private_key() + ) + self.verify_credentials() + + def verify_credentials(self): + unit = self.model.unit + + # Unit should go into a waiting state until verify_ssh_credentials is successful + unit.status = WaitingStatus("Waiting for SSH credentials") + proxy = self.get_ssh_proxy() + verified, _ = proxy.verify_credentials() + if verified: + unit.status = ActiveStatus() + else: + unit.status = BlockedStatus("Invalid SSH credentials.") + return verified + + ##################### + # SSH Proxy methods # + ##################### + def on_generate_ssh_key_action(self, event): + """Generate a new SSH keypair for this unit.""" + if self.model.unit.is_leader(): + if not SSHProxy.generate_ssh_key(): + event.fail("Unable to generate ssh key") + else: + event.fail("Unit is not leader") + return + + def on_get_ssh_public_key_action(self, event): + """Get the SSH public key for this unit.""" + if self.model.unit.is_leader(): + pubkey = SSHProxy.get_ssh_public_key() + event.set_results({"pubkey": SSHProxy.get_ssh_public_key()}) + else: + event.fail("Unit is not leader") + return + + def on_run_action(self, event): + """Run an arbitrary command on the remote host.""" + if self.model.unit.is_leader(): + cmd = event.params["command"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run(cmd) + event.set_results({"output": stdout}) + if len(stderr): + event.fail(stderr) + else: + event.fail("Unit is not leader") + return + + def on_verify_ssh_credentials_action(self, event): + """Verify the SSH credentials for this unit.""" + unit = self.model.unit + if unit.is_leader(): + proxy = self.get_ssh_proxy() + verified, stderr = proxy.verify_credentials() + if verified: + event.set_results({"verified": True}) + unit.status = ActiveStatus() + else: + event.set_results({"verified": False, "stderr": stderr}) + event.fail("Not verified") + unit.status = BlockedStatus("Invalid SSH credentials.") + + else: + event.fail("Unit is not leader") + return + + +class LeadershipError(ModelError): + def __init__(self): + super().__init__("not leader") + +class SSHProxy: + private_key_path = "/root/.ssh/id_sshproxy" + public_key_path = "/root/.ssh/id_sshproxy.pub" + key_type = "rsa" + key_bits = 4096 + + def __init__(self, hostname: str, username: str, password: str = ""): + self.hostname = hostname + self.username = username + self.password = password + + @staticmethod + def install(): + check_call("apt update && apt install -y openssh-client sshpass", shell=True) + + @staticmethod + def generate_ssh_key(): + """Generate a 4096-bit rsa keypair.""" + if not os.path.exists(SSHProxy.private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path, + ) + + try: + check_call(cmd, shell=True) + except CalledProcessError: + return False + + return True + + @staticmethod + def write_ssh_keys(public, private): + """Write a 4096-bit rsa keypair.""" + with open(SSHProxy.public_key_path, "w") as f: + f.write(public) + f.close() + with open(SSHProxy.private_key_path, "w") as f: + f.write(private) + f.close() + + @staticmethod + def get_ssh_public_key(): + publickey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.public_key_path, "r") as f: + publickey = f.read() + return publickey + + @staticmethod + def get_ssh_private_key(): + privatekey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.private_key_path, "r") as f: + privatekey = f.read() + return privatekey + + @staticmethod + def has_ssh_key(): + return True if os.path.exists(SSHProxy.private_key_path) else False + + def run(self, cmd: str) -> (str, str): + """Run a command remotely via SSH. + + Note: The previous behavior was to run the command locally if SSH wasn't + configured, but that can lead to cases where execution succeeds when you'd + expect it not to. + """ + if isinstance(cmd, str): + cmd = shlex.split(cmd) + + host = self._get_hostname() + user = self.username + passwd = self.password + key = self.private_key_path + + # Make sure we have everything we need to connect + if host and user: + return self.ssh(cmd) + + raise Exception("Invalid SSH credentials.") + + def scp(self, source_file, destination_file): + """Execute an scp command. Requires a fully qualified source and + destination. + + :param str source_file: Path to the source file + :param str destination_file: Path to the destination file + :raises: :class:`CalledProcessError` if the command fails + """ + cmd = [ + "sshpass", + "-p", + self.password, + "scp", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + "-B", + ] + destination = "{}@{}:{}".format(self.username, self.hostname, destination_file) + cmd.extend([source_file, destination]) + subprocess.run(cmd, check=True) + + def ssh(self, command): + """Run a command remotely via SSH. + + :param list(str) command: The command to execute + :return: tuple: The stdout and stderr of the command execution + :raises: :class:`CalledProcessError` if the command fails + """ + + destination = "{}@{}".format(self.username, self.hostname) + cmd = [ + "sshpass", + "-p", + self.password, + "ssh", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + destination, + ] + cmd.extend(command) + output = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return (output.stdout.decode("utf-8").strip(), output.stderr.decode("utf-8").strip()) + + def verify_credentials(self): + """Verify the SSH credentials. + + :return (bool, str): Verified, Stderr + """ + verified = False + try: + (stdout, stderr) = self.run("hostname") + verified = True + except CalledProcessError as e: + stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output)) + except (TimeoutError, socket.timeout): + stderr = "Timeout attempting to reach {}".format(self._get_hostname()) + except Exception as error: + tb = traceback.format_exc() + stderr = "Unhandled exception: {}".format(tb) + return verified, stderr + + ################### + # Private methods # + ################### + def _get_hostname(self): + """Get the hostname for the ssh target. + + HACK: This function was added to work around an issue where the + ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first + is the floating ip, and the second the non-floating ip, for an Openstack + instance. + """ + return self.hostname.split(";")[0] diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.flake8 b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..61d908155588a7968dd25c90cff349377305a789 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 99 diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.gitignore b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5596c502618d6428de0199c4cec069c151c0edd3 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.gitignore @@ -0,0 +1,4 @@ +__pycache__ +/sandbox +.idea +docs/_build diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.readthedocs.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.readthedocs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6989fdb6a173e6f75343fd5d2953acb9c24b0a2 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.readthedocs.yaml @@ -0,0 +1,6 @@ +version: 2 # required +formats: [] # i.e. no extra formats (for now) +python: + version: "3.5" + install: + - requirements: docs/requirements.txt diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.travis.yml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..5aa83238027ae5dccb4a797e6c7ed456f1cfe5de --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/.travis.yml @@ -0,0 +1,24 @@ +dist: bionic + +language: python + +arch: + - amd64 + - arm64 + +python: + - "3.5" + - "3.6" + - "3.7" + - "3.8" + +matrix: + include: + - os: osx + language: generic + +install: + - pip3 install -r requirements-dev.txt + +script: + - ./run_tests diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/CODE_OF_CONDUCT.md b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..b5bc1b1f8f266e3c7e15e8b8db490f688a1e5230 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/CODE_OF_CONDUCT.md @@ -0,0 +1,78 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by contacting the project team at +charmcrafters@lists.launchpad.net. All complaints will be reviewed and +investigated and will result in a response that is deemed necessary +and appropriate to the circumstances. The project team is obligated to +maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/LICENSE.txt b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/README.md b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3b4e63f67906e9b9cbaf4fe2055e35005b1380a7 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/README.md @@ -0,0 +1,138 @@ +# The Operator Framework + +The Operator Framework provides a simple, lightweight, and powerful way of +writing Juju charms, the best way to encapsulate operational experience in code. + +The framework will help you to: + +* model the integration of your services +* manage the lifecycle of your application +* create reusable and scalable components +* keep your code simple and readable + +## Getting Started + +Charms written using the operator framework are just Python code. The intention +is for it to feel very natural for somebody used to coding in Python, and +reasonably easy to pick up for somebody who might be a domain expert but not +necessarily a pythonista themselves. + +The dependencies of the operator framework are kept as minimal as possible; +currently that's Python 3.5 or greater, and `PyYAML` (both are included by +default in Ubuntu's cloud images from 16.04 on). + + +## A Quick Introduction + +Operator framework charms are just Python code. The entry point to your charm is +a particular Python file. It could be anything that makes sense to your project, +but let's assume this is `src/charm.py`. This file must be executable (and it +must have the appropriate shebang line). + +You need the usual `metadata.yaml` and (probably) `config.yaml` files, and a +`requirements.txt` for any Python dependencies. In other words, your project +might look like this: + +``` +my-charm +├── config.yaml +├── metadata.yaml +├── requirements.txt +└── src/ + └── charm.py +``` + +`src/charm.py` here is the entry point to your charm code. At a minimum, it +needs to define a subclass of `CharmBase` and pass that into the framework's +`main` function: + +```python +from ops.charm import CharmBase +from ops.main import main + +class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.start, self.on_start) + + def on_start(self, event): + # Handle the start event here. + +if __name__ == "__main__": + main(MyCharm) +``` + +That should be enough for you to be able to run + +``` +$ charmcraft build +Done, charm left in 'my-charm.charm' +$ juju deploy my-charm.charm +``` + +> 🛈 More information on [`charmcraft`](https://pypi.org/project/charmcraft/) can +> also be found on its [github page](https://github.com/canonical/charmcraft). + +Happy charming! + +## Testing your charms + +The operator framework provides a testing harness, so that you can test that +your charm does the right thing when presented with different scenarios, without +having to have a full deployment to do so. `pydoc3 ops.testing` has the details +for that, including this example: + +```python +harness = Harness(MyCharm) +# Do initial setup here +relation_id = harness.add_relation('db', 'postgresql') +# Now instantiate the charm to see events as the model changes +harness.begin() +harness.add_relation_unit(relation_id, 'postgresql/0') +harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) +# Check that charm has properly handled the relation_joined event for postgresql/0 +self.assertEqual(harness.charm. ...) +``` + +## Talk to us + +If you need help, have ideas, or would just like to chat with us, reach out on +IRC: we're in [#smooth-operator] on freenode (or try the [webchat]). + +We also pay attention to Juju's [discourse], but currently we don't actively +post there outside of our little corner of the [docs]; most discussion at this +stage is on IRC. + +[webchat]: https://webchat.freenode.net/#smooth-operator +[#smooth-operator]: irc://chat.freenode.net/%23smooth-operator +[discourse]: https://discourse.juju.is/c/charming +[docs]: https://discourse.juju.is/c/docs/operator-framework + +## Operator Framework development + +If you want to work in the framework *itself* you will need Python >= 3.5 and +the dependencies declared in `requirements-dev.txt` installed in your system. +Or you can use a virtualenv: + + virtualenv --python=python3 env + source env/bin/activate + pip install -r requirements-dev.txt + +Then you can try `./run_tests`, it should all go green. + +If you want to build the documentation you'll need the requirements from +`docs/requirements.txt`, or in your virtualenv + + pip install -r docs/requirements.txt + +and then you can run `./build_docs`. diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/build_docs b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/build_docs new file mode 100755 index 0000000000000000000000000000000000000000..af8b892f7568bdda5ac892beaeafad5696e607d3 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/build_docs @@ -0,0 +1,18 @@ +#!/bin/sh + +set -e + +flavour=html + +if [ "$1" ]; then + if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + flavour=help + else + flavour="$1" + fi + shift +fi + +cd docs + +sphinx-build -M "$flavour" . _build "$@" diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/conf.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..c8d3e3d57b11cbd9ae6bb144336728e433ae1d28 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/conf.py @@ -0,0 +1,97 @@ +# Configuration file for the Sphinx documentation builder. +# +# For a full list of options see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +# -- Path setup -------------------------------------------------------------- + +from pathlib import Path +import sys +sys.path.insert(0, str(Path(__file__).parent.parent)) + + +# -- Project information ----------------------------------------------------- + +project = 'The Operator Framework' +copyright = '2019-2020, Canonical Ltd.' +author = 'Canonical Ltd' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', + 'sphinx.ext.intersphinx', +] + +# The document name of the “master” document, that is, the document +# that contains the root toctree directive. +master_doc = 'index' + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' # 'alabaster' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for sphinx.ext.todo --------------------------------------------- + +# If this is True, todo and todolist produce output, else they +# produce nothing. The default is False. +todo_include_todos = False + + +# -- Options for sphinx.ext.autodoc ------------------------------------------ + +# This value controls how to represents typehints. The setting takes the +# following values: +# 'signature' – Show typehints as its signature (default) +# 'description' – Show typehints as content of function or method +# 'none' – Do not show typehints +autodoc_typehints = 'description' + +# This value selects what content will be inserted into the main body of an +# autoclass directive. The possible values are: +# 'class' - Only the class’ docstring is inserted. This is the +# default. You can still document __init__ as a separate method +# using automethod or the members option to autoclass. +# 'both' - Both the class’ and the __init__ method’s docstring are +# concatenated and inserted. +# 'init' - Only the __init__ method’s docstring is inserted. +autoclass_content = 'both' + +autodoc_default_options = { + 'members': None, # None here means "yes" + 'undoc-members': None, + 'show-inheritance': None, +} + + +# -- Options for sphinx.ext.intersphinx -------------------------------------- + +# This config value contains the locations and names of other projects +# that should be linked to in this documentation. +intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/index.rst b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..424d78d423e1e7dd5d4049c9f88ffaec994c1714 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/index.rst @@ -0,0 +1,58 @@ + +Welcome to The Operator Framework's documentation! +================================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + +ops package +=========== + +.. automodule:: ops + +Submodules +---------- + +ops.charm module +---------------- + +.. automodule:: ops.charm + +ops.framework module +-------------------- + +.. automodule:: ops.framework + +ops.jujuversion module +---------------------- + +.. automodule:: ops.jujuversion + +ops.log module +-------------- + +.. automodule:: ops.log + +ops.main module +--------------- + +.. automodule:: ops.main + +ops.model module +---------------- + +.. automodule:: ops.model + +ops.testing module +------------------ + +.. automodule:: ops.testing + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/requirements.txt b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..217e49dd6e684b07eaac18aec4c2f717e5f9f5f0 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/docs/requirements.txt @@ -0,0 +1,5 @@ +-r ../requirements.txt + +sphinx==3.* +sphinx_rtd_theme + diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/charm.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..d898de859fc444814bc19a7f8f0caaaec6f7e5f4 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/charm.py @@ -0,0 +1,575 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """A base class for events that trigger because of a Juju hook firing.""" + + +class ActionEvent(EventBase): + """A base class for events that trigger when a user asks for an Action to be run. + + To read the parameters for the action, see the instance variable `params`. + To respond with the result of the action, call `set_results`. To add progress + messages that are visible as the action is progressing use `log`. + + :ivar params: The parameters passed to the action (read by action-get) + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the user is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by Charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Represents the `install` hook from Juju.""" + + +class StartEvent(HookEvent): + """Represents the `start` hook from Juju.""" + + +class StopEvent(HookEvent): + """Represents the `stop` hook from Juju.""" + + +class RemoveEvent(HookEvent): + """Represents the `remove` hook from Juju. """ + + +class ConfigChangedEvent(HookEvent): + """Represents the `config-changed` hook from Juju.""" + + +class UpdateStatusEvent(HookEvent): + """Represents the `update-status` hook from Juju.""" + + +class UpgradeCharmEvent(HookEvent): + """Represents the `upgrade-charm` hook from Juju. + + This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju + has unpacked the upgraded charm code, and so this event will be handled with new code. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Represents the `pre-series-upgrade` hook from Juju. + + This happens when a user has run `juju upgrade-series MACHINE prepare` and + will fire for each unit that is running on the machine, telling them that + the user is preparing to upgrade the Machine's series (eg trusty->bionic). + The charm should take actions to prepare for the upgrade (a database charm + would want to write out a version-independent dump of the database, so that + when a new version of the database is available in a new series, it can be + used.) + Once all units on a machine have run `pre-series-upgrade`, the user will + initiate the steps to actually upgrade the machine (eg `do-release-upgrade`). + When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Represents the `post-series-upgrade` hook from Juju. + + This is run after the user has done a distribution upgrade (or rolled back + and kept the same series). It is called in response to + `juju upgrade-series MACHINE complete`. Charms are expected to do whatever + steps are necessary to reconfigure their applications for the new series. + """ + + +class LeaderElectedEvent(HookEvent): + """Represents the `leader-elected` hook from Juju. + + Juju will trigger this when a new lead unit is chosen for a given application. + This represents the leader of the charm information (not necessarily the primary + of a running application). The main utility is that charm authors can know + that only one unit will be a leader at any given time, so they can do + configuration, etc, that would otherwise require coordination between units. + (eg, selecting a password for a new relation) + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Represents the `leader-settings-changed` hook from Juju. + + Deprecated. This represents when a lead unit would call `leader-set` to inform + the other units of an application that they have new information to handle. + This has been deprecated in favor of using a Peer relation, and having the + leader set a value in the Application data bag for that peer relation. + (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Represents the `collect-metrics` hook from Juju. + + Note that events firing during a CollectMetricsEvent are currently + sandboxed in how they can interact with Juju. To report metrics + use :meth:`.add_metrics`. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Charmers should not be creating RelationEvents directly. The events will be + generated by the framework from Juju related events. Users can observe them + from the various `CharmBase.on[relation_name].relation_*` events. + + Attributes: + relation: The Relation involved in this event + app: The remote application that has triggered this event + unit: The remote unit that has triggered this event. This may be None + if the relation event was triggered as an Application level event + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by Charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by Charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Represents the `relation-created` hook from Juju. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Represents the `relation-joined` hook from Juju. + + This is triggered whenever a new unit of a related application joins the relation. + (eg, a unit was added to an existing related app, or a new relation was established + with an application that already had units.) + """ + + +class RelationChangedEvent(RelationEvent): + """Represents the `relation-changed` hook from Juju. + + This is triggered whenever there is a change to the data bucket for a related + application or unit. Look at `event.relation.data[event.unit/app]` to see the + new information. + """ + + +class RelationDepartedEvent(RelationEvent): + """Represents the `relation-departed` hook from Juju. + + This is the inverse of the RelationJoinedEvent, representing when a unit + is leaving the relation (the unit is being removed, the app is being removed, + the relation is being removed). It is fired once for each unit that is + going away. + """ + + +class RelationBrokenEvent(RelationEvent): + """Represents the `relation-broken` hook from Juju. + + If a relation is being removed (`juju remove-relation` or `juju remove-application`), + once all the units have been removed, RelationBrokenEvent will fire to signal + that the relationship has been fully terminated. + """ + + +class StorageEvent(HookEvent): + """Base class representing Storage related events.""" + + +class StorageAttachedEvent(StorageEvent): + """Represents the `storage-attached` hook from Juju. + + Called when new storage is available for the charm to use. + """ + + +class StorageDetachingEvent(StorageEvent): + """Represents the `storage-detaching` hook from Juju. + + Called when storage a charm has been using is going away. + """ + + +class CharmEvents(ObjectEvents): + """The events that are generated by Juju in response to the lifecycle of an application.""" + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the Charm overall. + + Usually this initialization is done by ops.main.main() rather than Charm authors + directly instantiating a Charm. + + Args: + framework: The framework responsible for managing the Model and events for this + Charm. + key: Ignored; will remove after deprecation period of the signature change. + """ + + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """CharmMeta of this charm. + """ + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the Charm as it is running. + """ + return self.framework.charm_dir + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from metadata.yaml and/or actions.yaml. Generally charms will + define this information, rather than reading it at runtime. This class is + mostly for the framework to understand what the charm has defined. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by Charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/framework.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/framework.py new file mode 100755 index 0000000000000000000000000000000000000000..b7c4749ff2b5bfb4f354bf1a8d4cd6ed64cf0da5 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/framework.py @@ -0,0 +1,1067 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return ''.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(ObjectEvents): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint(); if Python < 3.7, this doesn't affect anything + sys.breakpointhook = self.breakpoint + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse once the env var, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/jujuversion.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..b2b8177dbe396f0d8c46b86e26af6b4e54ea046d --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/jujuversion.py @@ -0,0 +1,98 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'''^ + (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there + ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - + (\.(?P\d{1,9}))?$ # and sometimes with a number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if not v: + raise RuntimeError('environ has no JUJU_VERSION') + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/lib/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edb9fcacea6f0173aed9f07ca8a683cfead989cc --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/lib/__init__.py @@ -0,0 +1,194 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import re + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType + + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except OSError: + continue + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except OSError: + continue + finder = get_importer(opslib) + if finder is None or not hasattr(finder, 'find_spec'): + continue + for lib_dir in lib_dirs: + spec = finder.find_spec(lib_dir) + if spec is None: + continue + if spec.loader is None: + # a namespace package; not supported + continue + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 + + +def _parse_lib(spec): + if spec.origin is None: + return None + + _expected = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_expected): + break + if n > _MAX_LIB_LINES: + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _expected: + value = literal_eval(value) + if not isinstance(value, _expected[key]): + return None + libinfo[key] = value + else: + if len(libinfo) != len(_expected): + return None + except Exception: + return None + + return _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {0.name} by {0.author}, API {0.api}, patch {0.patch}>".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/log.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/log.py new file mode 100644 index 0000000000000000000000000000000000000000..4aac5543aec4d84dc393e79b772a30284712d6d4 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/log.py @@ -0,0 +1,51 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/main.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/main.py new file mode 100755 index 0000000000000000000000000000000000000000..6dc31c3575044796e8fe1f61b8415395689d6339 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/main.py @@ -0,0 +1,348 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import logging +import os +import subprocess +import sys +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. + # For Windows it is always an absolute path (any symlinks are resolved) + # while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy” hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(sys.argv[0]) + + if 'JUJU_DISPATCH_PATH' in os.environ and (charm_dir / 'dispatch').exists(): + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk""" + + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = self._charm_dir / self._dispatch_path + if not dispatch_path.exists(): + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == self._exec_path.resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning( + "Legacy %s exited with status %d.", + self._dispatch_path, e.returncode) + sys.exit(e.returncode) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """"Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def main(charm_class, use_juju_for_storage=False): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/model.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b96e89154ea9cec2b62a4fab4649412e115c304e --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/model.py @@ -0,0 +1,1237 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + +import ops +from ops.jujuversion import JujuVersion + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + + Attributes: + unit: A :class:`Unit` that represents the unit that is running this code (eg yourself) + app: A :class:`Application` that represents the application this unit is a part of. + relations: Mapping of endpoint to list of :class:`Relation` answering the question + "what am I currently related to". See also :meth:`.get_relation` + config: A dict of the config for the current application. + resources: Access to resources for this charm. Use ``model.resources.fetch(resource_name)`` + to get the path on disk where the resource can be found. + storages: Mapping of storage_name to :class:`Storage` for the storage points defined in + metadata.yaml + pod: Used to get access to ``model.pod.set_spec`` to set the container specification + for Kubernetes charms. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(self._backend.unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworKInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info['bind-addresses']: + interface_name = interface_info['interface-name'] + for address_info in interface_info['addresses']: + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info['ingress-addresses']: + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info['egress-subnets']: + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + return self.interfaces[0].address + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + return self.ingress_addresses[0] + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + self.address = ipaddress.ip_address(address_info['value']) + cidr = address_info['cidr'] + if not cidr: + # The cidr field may be empty, see LP: #1864102. + # In this case, make it a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address_info['value']) + else: + self.subnet = ipaddress.ip_network(cidr) + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """"Represents a storage as defined in metadata.yaml + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/storage.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/storage.py new file mode 100755 index 0000000000000000000000000000000000000000..d4310ce1cfbb707c6278b70f84a9751da3ce07af --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/storage.py @@ -0,0 +1,318 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer)""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: typing.Optional[str]) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """"Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + return + + def commit(self): + return + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str): + notice_list = self._load_notice_list() + for row in notice_list: + if row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal' + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + @staticmethod + def is_available() -> bool: + """Check if Juju state storage is available. + + This checks if there is a 'state-get' executable in PATH. + """ + p = shutil.which('state-get') + return p is not None + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf-8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + subprocess.run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = subprocess.run( + ["state-get", key], + stdout=subprocess.PIPE, + check=True, + ) + if p.stdout == b'' or p.stdout == b'\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + subprocess.run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/testing.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/testing.py new file mode 100755 index 0000000000000000000000000000000000000000..b4b3fe071216238007c9f3847ca9556be626bf6b --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/testing.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pathlib +from textwrap import dedent +import tempfile +import typing +import yaml +import weakref + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None): + # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since + # it would define the default values of config that the charm would see. + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._lazy_resource_dir = None + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + @property + def _resource_dir(self) -> pathlib.Path: + if self._lazy_resource_dir is not None: + return self._lazy_resource_dir + + self.__resource_dir = tempfile.TemporaryDirectory() + self._lazy_resource_dir = pathlib.Path(self.__resource_dir.name) + self._finalizer = weakref.finalize(self, self.__resource_dir.cleanup) + return self._lazy_resource_dir + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling begin(), there is no Charm instance, so changes to the Model won't emit + events. You must call begin before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + resource_dir = self._resource_dir / resource_name + resource_dir.mkdir(exist_ok=True) + resource_file = resource_dir / "contents.yaml" + with resource_file.open('wt', encoding='utf8') as resource_yaml: + yaml.dump(contents, resource_yaml) + self._backend._resources_map[resource_name] = resource_file + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return rel_id + relation = self._model.get_relation(relation_name, rel_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + return rel_id + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event and a `relation_changed` event. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + self._config = {} + self._is_leader = False + self._resources_map = {} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + return self._resources_map[resource_name] + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/version.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/version.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5478555ee0fa948bfb0ad57cc79ba7cef3721 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/ops/version.py @@ -0,0 +1,50 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from pathlib import Path + +__all__ = ('version',) + +_FALLBACK = '0.8' # this gets bumped after release + + +def _get_version(): + version = _FALLBACK + ".dev0+unknown" + + p = Path(__file__).parent + if (p.parent / '.git').exists(): + try: + proc = subprocess.run( + ['git', 'describe', '--tags', '--dirty'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=p, + check=True) + except Exception: + pass + else: + version = proc.stdout.strip().decode('utf8') + if '-' in version: + # version will look like -<#commits>-g[-dirty] + # in terms of PEP 440, the tag we'll make sure is a 'public version identifier'; + # everything after the first - needs to be a 'local version' + public, local = version.split('-', 1) + version = public + '+' + local.replace('-', '.') + # version now +<#commits>.g[.dirty] + # which is PEP440-compliant (as long as is :-) + return version + + +version = _get_version() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/requirements-dev.txt b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..61bd425a6748e8b316bf81948e809b1eba148f16 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/requirements-dev.txt @@ -0,0 +1,5 @@ +-r requirements.txt + +autopep8 +flake8 +logassert diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/requirements.txt b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5500f007d0bf6c6098afc0f2c6d00915e345a569 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/requirements.txt @@ -0,0 +1 @@ +PyYAML diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/run_tests b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/run_tests new file mode 100755 index 0000000000000000000000000000000000000000..56411030fdcd8629ffbfbbebb8a8a0650203a934 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/run_tests @@ -0,0 +1,3 @@ +#!/bin/bash + +python3 -m unittest "$@" diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/setup.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb866815ae1227338a1e2dc67a217a338ae7bbc --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/setup.py @@ -0,0 +1,75 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from importlib.util import spec_from_file_location, module_from_spec +from pathlib import Path +from setuptools import setup, find_packages + + +def _read_me() -> str: + with open("README.md", "rt", encoding="utf8") as fh: + readme = fh.read() + return readme + + +def _get_version() -> str: + """Get the version via ops/version.py, without loading ops/__init__.py""" + spec = spec_from_file_location('ops.version', 'ops/version.py') + module = module_from_spec(spec) + spec.loader.exec_module(module) + + return module.version + + +version = _get_version() +version_path = Path("ops/version.py") +version_backup = Path("ops/version.py~") +version_path.rename(version_backup) +try: + with version_path.open("wt", encoding="utf8") as fh: + fh.write('''\ +# this is a generated file + +version = {!r} +'''.format(version)) + + setup( + name="ops", + version=version, + description="The Python library behind great charms", + long_description=_read_me(), + long_description_content_type="text/markdown", + license="Apache-2.0", + url="https://github.com/canonical/operator", + author="The Charmcraft team at Canonical Ltd.", + author_email="charmcraft@lists.launchpad.net", + packages=find_packages(include=('ops', 'ops.*')), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Development Status :: 4 - Beta", + + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + # include Windows once we're running tests there also + # "Operating System :: Microsoft :: Windows", + ], + python_requires='>=3.5', + install_requires=["PyYAML"], + ) + +finally: + version_backup.rename(version_path) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/bin/relation-ids b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/bin/relation-ids new file mode 100755 index 0000000000000000000000000000000000000000..a7e0ead2d3182713bd826696fc403b5a8c54faa6 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/bin/relation-ids @@ -0,0 +1,11 @@ +#!/bin/bash + +case $1 in + db) echo '["db:1"]' ;; + mon) echo '["mon:2"]' ;; + ha) echo '[]' ;; + db0) echo '[]' ;; + db1) echo '["db1:4"]' ;; + db2) echo '["db2:5", "db2:6"]' ;; + *) echo '[]' ;; +esac diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/bin/relation-list b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/bin/relation-list new file mode 100755 index 0000000000000000000000000000000000000000..88490159775624108766a17a35a77599ddea8f03 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/bin/relation-list @@ -0,0 +1,16 @@ +#!/bin/bash + +fail_not_found() { + 1>&2 echo "ERROR invalid value \"$1\" for option -r: relation not found" + exit 2 +} + +case $2 in + 1) echo '["remote/0"]' ;; + 2) echo '["remote/0"]' ;; + 3) fail_not_found $2 ;; + 4) echo '["remoteapp1/0"]' ;; + 5) echo '["remoteapp1/0"]' ;; + 6) echo '["remoteapp2/0"]' ;; + *) fail_not_found $2 ;; +esac diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/actions.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d16c94204f2cc0064965553e9b911d3d6075ba17 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/actions.yaml @@ -0,0 +1,29 @@ +foo-bar: + description: Foos the bar. + title: foo-bar + params: + foo-name: + type: string + description: A foo name to bar. + silent: + type: boolean + description: "" + default: false + required: + - foo-name +start: + description: Start the unit. +get-model-name: + description: Return the name of the model +get-status: + description: Return the Status of the unit +log-critical: + description: log a message at Critical level +log-error: + description: log a message at Error level +log-warning: + description: log a message at Warning level +log-info: + description: log a message at Info level +log-debug: + description: log a message at Debug level diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/config.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ffc0186002391ca52273d39bebcc9c4261c47535 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/config.yaml @@ -0,0 +1 @@ +"options": {} diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/charm.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..d898de859fc444814bc19a7f8f0caaaec6f7e5f4 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/charm.py @@ -0,0 +1,575 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """A base class for events that trigger because of a Juju hook firing.""" + + +class ActionEvent(EventBase): + """A base class for events that trigger when a user asks for an Action to be run. + + To read the parameters for the action, see the instance variable `params`. + To respond with the result of the action, call `set_results`. To add progress + messages that are visible as the action is progressing use `log`. + + :ivar params: The parameters passed to the action (read by action-get) + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the user is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by Charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Represents the `install` hook from Juju.""" + + +class StartEvent(HookEvent): + """Represents the `start` hook from Juju.""" + + +class StopEvent(HookEvent): + """Represents the `stop` hook from Juju.""" + + +class RemoveEvent(HookEvent): + """Represents the `remove` hook from Juju. """ + + +class ConfigChangedEvent(HookEvent): + """Represents the `config-changed` hook from Juju.""" + + +class UpdateStatusEvent(HookEvent): + """Represents the `update-status` hook from Juju.""" + + +class UpgradeCharmEvent(HookEvent): + """Represents the `upgrade-charm` hook from Juju. + + This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju + has unpacked the upgraded charm code, and so this event will be handled with new code. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Represents the `pre-series-upgrade` hook from Juju. + + This happens when a user has run `juju upgrade-series MACHINE prepare` and + will fire for each unit that is running on the machine, telling them that + the user is preparing to upgrade the Machine's series (eg trusty->bionic). + The charm should take actions to prepare for the upgrade (a database charm + would want to write out a version-independent dump of the database, so that + when a new version of the database is available in a new series, it can be + used.) + Once all units on a machine have run `pre-series-upgrade`, the user will + initiate the steps to actually upgrade the machine (eg `do-release-upgrade`). + When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Represents the `post-series-upgrade` hook from Juju. + + This is run after the user has done a distribution upgrade (or rolled back + and kept the same series). It is called in response to + `juju upgrade-series MACHINE complete`. Charms are expected to do whatever + steps are necessary to reconfigure their applications for the new series. + """ + + +class LeaderElectedEvent(HookEvent): + """Represents the `leader-elected` hook from Juju. + + Juju will trigger this when a new lead unit is chosen for a given application. + This represents the leader of the charm information (not necessarily the primary + of a running application). The main utility is that charm authors can know + that only one unit will be a leader at any given time, so they can do + configuration, etc, that would otherwise require coordination between units. + (eg, selecting a password for a new relation) + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Represents the `leader-settings-changed` hook from Juju. + + Deprecated. This represents when a lead unit would call `leader-set` to inform + the other units of an application that they have new information to handle. + This has been deprecated in favor of using a Peer relation, and having the + leader set a value in the Application data bag for that peer relation. + (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Represents the `collect-metrics` hook from Juju. + + Note that events firing during a CollectMetricsEvent are currently + sandboxed in how they can interact with Juju. To report metrics + use :meth:`.add_metrics`. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Charmers should not be creating RelationEvents directly. The events will be + generated by the framework from Juju related events. Users can observe them + from the various `CharmBase.on[relation_name].relation_*` events. + + Attributes: + relation: The Relation involved in this event + app: The remote application that has triggered this event + unit: The remote unit that has triggered this event. This may be None + if the relation event was triggered as an Application level event + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by Charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by Charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Represents the `relation-created` hook from Juju. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Represents the `relation-joined` hook from Juju. + + This is triggered whenever a new unit of a related application joins the relation. + (eg, a unit was added to an existing related app, or a new relation was established + with an application that already had units.) + """ + + +class RelationChangedEvent(RelationEvent): + """Represents the `relation-changed` hook from Juju. + + This is triggered whenever there is a change to the data bucket for a related + application or unit. Look at `event.relation.data[event.unit/app]` to see the + new information. + """ + + +class RelationDepartedEvent(RelationEvent): + """Represents the `relation-departed` hook from Juju. + + This is the inverse of the RelationJoinedEvent, representing when a unit + is leaving the relation (the unit is being removed, the app is being removed, + the relation is being removed). It is fired once for each unit that is + going away. + """ + + +class RelationBrokenEvent(RelationEvent): + """Represents the `relation-broken` hook from Juju. + + If a relation is being removed (`juju remove-relation` or `juju remove-application`), + once all the units have been removed, RelationBrokenEvent will fire to signal + that the relationship has been fully terminated. + """ + + +class StorageEvent(HookEvent): + """Base class representing Storage related events.""" + + +class StorageAttachedEvent(StorageEvent): + """Represents the `storage-attached` hook from Juju. + + Called when new storage is available for the charm to use. + """ + + +class StorageDetachingEvent(StorageEvent): + """Represents the `storage-detaching` hook from Juju. + + Called when storage a charm has been using is going away. + """ + + +class CharmEvents(ObjectEvents): + """The events that are generated by Juju in response to the lifecycle of an application.""" + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the Charm overall. + + Usually this initialization is done by ops.main.main() rather than Charm authors + directly instantiating a Charm. + + Args: + framework: The framework responsible for managing the Model and events for this + Charm. + key: Ignored; will remove after deprecation period of the signature change. + """ + + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """CharmMeta of this charm. + """ + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the Charm as it is running. + """ + return self.framework.charm_dir + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from metadata.yaml and/or actions.yaml. Generally charms will + define this information, rather than reading it at runtime. This class is + mostly for the framework to understand what the charm has defined. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by Charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/framework.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/framework.py new file mode 100755 index 0000000000000000000000000000000000000000..b7c4749ff2b5bfb4f354bf1a8d4cd6ed64cf0da5 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/framework.py @@ -0,0 +1,1067 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return ''.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(ObjectEvents): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint(); if Python < 3.7, this doesn't affect anything + sys.breakpointhook = self.breakpoint + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse once the env var, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/jujuversion.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..b2b8177dbe396f0d8c46b86e26af6b4e54ea046d --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/jujuversion.py @@ -0,0 +1,98 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'''^ + (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there + ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - + (\.(?P\d{1,9}))?$ # and sometimes with a number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if not v: + raise RuntimeError('environ has no JUJU_VERSION') + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/lib/__init__.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edb9fcacea6f0173aed9f07ca8a683cfead989cc --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/lib/__init__.py @@ -0,0 +1,194 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import re + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType + + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except OSError: + continue + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except OSError: + continue + finder = get_importer(opslib) + if finder is None or not hasattr(finder, 'find_spec'): + continue + for lib_dir in lib_dirs: + spec = finder.find_spec(lib_dir) + if spec is None: + continue + if spec.loader is None: + # a namespace package; not supported + continue + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 + + +def _parse_lib(spec): + if spec.origin is None: + return None + + _expected = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_expected): + break + if n > _MAX_LIB_LINES: + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _expected: + value = literal_eval(value) + if not isinstance(value, _expected[key]): + return None + libinfo[key] = value + else: + if len(libinfo) != len(_expected): + return None + except Exception: + return None + + return _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {0.name} by {0.author}, API {0.api}, patch {0.patch}>".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/log.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/log.py new file mode 100644 index 0000000000000000000000000000000000000000..4aac5543aec4d84dc393e79b772a30284712d6d4 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/log.py @@ -0,0 +1,51 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/main.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/main.py new file mode 100755 index 0000000000000000000000000000000000000000..6dc31c3575044796e8fe1f61b8415395689d6339 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/main.py @@ -0,0 +1,348 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import logging +import os +import subprocess +import sys +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. + # For Windows it is always an absolute path (any symlinks are resolved) + # while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy” hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(sys.argv[0]) + + if 'JUJU_DISPATCH_PATH' in os.environ and (charm_dir / 'dispatch').exists(): + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk""" + + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = self._charm_dir / self._dispatch_path + if not dispatch_path.exists(): + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == self._exec_path.resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning( + "Legacy %s exited with status %d.", + self._dispatch_path, e.returncode) + sys.exit(e.returncode) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """"Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def main(charm_class, use_juju_for_storage=False): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/model.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b96e89154ea9cec2b62a4fab4649412e115c304e --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/model.py @@ -0,0 +1,1237 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + +import ops +from ops.jujuversion import JujuVersion + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + + Attributes: + unit: A :class:`Unit` that represents the unit that is running this code (eg yourself) + app: A :class:`Application` that represents the application this unit is a part of. + relations: Mapping of endpoint to list of :class:`Relation` answering the question + "what am I currently related to". See also :meth:`.get_relation` + config: A dict of the config for the current application. + resources: Access to resources for this charm. Use ``model.resources.fetch(resource_name)`` + to get the path on disk where the resource can be found. + storages: Mapping of storage_name to :class:`Storage` for the storage points defined in + metadata.yaml + pod: Used to get access to ``model.pod.set_spec`` to set the container specification + for Kubernetes charms. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(self._backend.unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworKInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info['bind-addresses']: + interface_name = interface_info['interface-name'] + for address_info in interface_info['addresses']: + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info['ingress-addresses']: + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info['egress-subnets']: + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + return self.interfaces[0].address + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + return self.ingress_addresses[0] + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + self.address = ipaddress.ip_address(address_info['value']) + cidr = address_info['cidr'] + if not cidr: + # The cidr field may be empty, see LP: #1864102. + # In this case, make it a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address_info['value']) + else: + self.subnet = ipaddress.ip_network(cidr) + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """"Represents a storage as defined in metadata.yaml + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/storage.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/storage.py new file mode 100755 index 0000000000000000000000000000000000000000..d4310ce1cfbb707c6278b70f84a9751da3ce07af --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/storage.py @@ -0,0 +1,318 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer)""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: typing.Optional[str]) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """"Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + return + + def commit(self): + return + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str): + notice_list = self._load_notice_list() + for row in notice_list: + if row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal' + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + @staticmethod + def is_available() -> bool: + """Check if Juju state storage is available. + + This checks if there is a 'state-get' executable in PATH. + """ + p = shutil.which('state-get') + return p is not None + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf-8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + subprocess.run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = subprocess.run( + ["state-get", key], + stdout=subprocess.PIPE, + check=True, + ) + if p.stdout == b'' or p.stdout == b'\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + subprocess.run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/testing.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/testing.py new file mode 100755 index 0000000000000000000000000000000000000000..b4b3fe071216238007c9f3847ca9556be626bf6b --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/testing.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pathlib +from textwrap import dedent +import tempfile +import typing +import yaml +import weakref + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None): + # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since + # it would define the default values of config that the charm would see. + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._lazy_resource_dir = None + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + @property + def _resource_dir(self) -> pathlib.Path: + if self._lazy_resource_dir is not None: + return self._lazy_resource_dir + + self.__resource_dir = tempfile.TemporaryDirectory() + self._lazy_resource_dir = pathlib.Path(self.__resource_dir.name) + self._finalizer = weakref.finalize(self, self.__resource_dir.cleanup) + return self._lazy_resource_dir + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling begin(), there is no Charm instance, so changes to the Model won't emit + events. You must call begin before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + resource_dir = self._resource_dir / resource_name + resource_dir.mkdir(exist_ok=True) + resource_file = resource_dir / "contents.yaml" + with resource_file.open('wt', encoding='utf8') as resource_yaml: + yaml.dump(contents, resource_yaml) + self._backend._resources_map[resource_name] = resource_file + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return rel_id + relation = self._model.get_relation(relation_name, rel_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + return rel_id + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event and a `relation_changed` event. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + self._config = {} + self._is_leader = False + self._resources_map = {} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + return self._resources_map[resource_name] + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/version.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/version.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5478555ee0fa948bfb0ad57cc79ba7cef3721 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/lib/ops/version.py @@ -0,0 +1,50 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from pathlib import Path + +__all__ = ('version',) + +_FALLBACK = '0.8' # this gets bumped after release + + +def _get_version(): + version = _FALLBACK + ".dev0+unknown" + + p = Path(__file__).parent + if (p.parent / '.git').exists(): + try: + proc = subprocess.run( + ['git', 'describe', '--tags', '--dirty'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=p, + check=True) + except Exception: + pass + else: + version = proc.stdout.strip().decode('utf8') + if '-' in version: + # version will look like -<#commits>-g[-dirty] + # in terms of PEP 440, the tag we'll make sure is a 'public version identifier'; + # everything after the first - needs to be a 'local version' + public, local = version.split('-', 1) + version = public + '+' + local.replace('-', '.') + # version now +<#commits>.g[.dirty] + # which is PEP440-compliant (as long as is :-) + return version + + +version = _get_version() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/metadata.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b3aed87e96121224c63916b04009daf40fcab35 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/metadata.yaml @@ -0,0 +1,26 @@ +name: main +summary: A charm used for testing the basic operation of the entrypoint code. +maintainer: Dmitrii Shcherbakov +description: A charm used for testing the basic operation of the entrypoint code. +tags: + - misc +series: + - bionic + - cosmic + - disco +min-juju-version: 2.7.1 +provides: + db: + interface: db +requires: + mon: + interface: monitoring +peers: + ha: + interface: cluster +subordinate: false +storage: + disks: + type: block + multiple: + range: 0- diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/src/charm.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..154b2376b6ce466a94c0d6745b8b48e48dea00dc --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/charms/test_main/src/charm.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import base64 +import pickle +import sys +import logging + +sys.path.append('lib') + +from ops.charm import CharmBase # noqa: E402 (module-level import after non-import code) +from ops.main import main # noqa: E402 (ditto) + +logger = logging.getLogger() + + +class Charm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + # This environment variable controls the test charm behavior. + charm_config = os.environ.get('CHARM_CONFIG') + if charm_config is not None: + self._charm_config = pickle.loads(base64.b64decode(charm_config)) + else: + self._charm_config = {} + + # TODO: refactor to use StoredState + # (this implies refactoring most of test_main.py) + self._state_file = self._charm_config.get('STATE_FILE') + try: + with open(str(self._state_file), 'rb') as f: + self._state = pickle.load(f) + except (FileNotFoundError, EOFError): + self._state = { + 'on_install': [], + 'on_start': [], + 'on_config_changed': [], + 'on_update_status': [], + 'on_leader_settings_changed': [], + 'on_db_relation_joined': [], + 'on_mon_relation_changed': [], + 'on_mon_relation_departed': [], + 'on_ha_relation_broken': [], + 'on_foo_bar_action': [], + 'on_start_action': [], + '_on_get_model_name_action': [], + 'on_collect_metrics': [], + + 'on_log_critical_action': [], + 'on_log_error_action': [], + 'on_log_warning_action': [], + 'on_log_info_action': [], + 'on_log_debug_action': [], + + # Observed event types per invocation. A list is used to preserve the + # order in which charm handlers have observed the events. + 'observed_event_types': [], + } + + self.framework.observe(self.on.install, self._on_install) + self.framework.observe(self.on.start, self._on_start) + self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.update_status, self._on_update_status) + self.framework.observe(self.on.leader_settings_changed, self._on_leader_settings_changed) + # Test relation events with endpoints from different + # sections (provides, requires, peers) as well. + self.framework.observe(self.on.db_relation_joined, self._on_db_relation_joined) + self.framework.observe(self.on.mon_relation_changed, self._on_mon_relation_changed) + self.framework.observe(self.on.mon_relation_departed, self._on_mon_relation_departed) + self.framework.observe(self.on.ha_relation_broken, self._on_ha_relation_broken) + + if self._charm_config.get('USE_ACTIONS'): + self.framework.observe(self.on.start_action, self._on_start_action) + self.framework.observe(self.on.foo_bar_action, self._on_foo_bar_action) + self.framework.observe(self.on.get_model_name_action, self._on_get_model_name_action) + self.framework.observe(self.on.get_status_action, self._on_get_status_action) + + self.framework.observe(self.on.collect_metrics, self._on_collect_metrics) + + if self._charm_config.get('USE_LOG_ACTIONS'): + self.framework.observe(self.on.log_critical_action, self._on_log_critical_action) + self.framework.observe(self.on.log_error_action, self._on_log_error_action) + self.framework.observe(self.on.log_warning_action, self._on_log_warning_action) + self.framework.observe(self.on.log_info_action, self._on_log_info_action) + self.framework.observe(self.on.log_debug_action, self._on_log_debug_action) + + if self._charm_config.get('TRY_EXCEPTHOOK'): + raise RuntimeError("failing as requested") + + def _write_state(self): + """Write state variables so that the parent process can read them. + + Each invocation will override the previous state which is intentional. + """ + if self._state_file is not None: + with self._state_file.open('wb') as f: + pickle.dump(self._state, f) + + def _on_install(self, event): + self._state['on_install'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_start(self, event): + self._state['on_start'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_config_changed(self, event): + self._state['on_config_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + event.defer() + self._write_state() + + def _on_update_status(self, event): + self._state['on_update_status'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_leader_settings_changed(self, event): + self._state['on_leader_settings_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_db_relation_joined(self, event): + assert event.app is not None, 'application name cannot be None for a relation-joined event' + self._state['on_db_relation_joined'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['db_relation_joined_data'] = event.snapshot() + self._write_state() + + def _on_mon_relation_changed(self, event): + assert event.app is not None, ( + 'application name cannot be None for a relation-changed event') + if os.environ.get('JUJU_REMOTE_UNIT'): + assert event.unit is not None, ( + 'a unit name cannot be None for a relation-changed event' + ' associated with a remote unit') + self._state['on_mon_relation_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['mon_relation_changed_data'] = event.snapshot() + self._write_state() + + def _on_mon_relation_departed(self, event): + assert event.app is not None, ( + 'application name cannot be None for a relation-departed event') + self._state['on_mon_relation_departed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['mon_relation_departed_data'] = event.snapshot() + self._write_state() + + def _on_ha_relation_broken(self, event): + assert event.app is None, ( + 'relation-broken events cannot have a reference to a remote application') + assert event.unit is None, ( + 'relation broken events cannot have a reference to a remote unit') + self._state['on_ha_relation_broken'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['ha_relation_broken_data'] = event.snapshot() + self._write_state() + + def _on_start_action(self, event): + assert event.handle.kind == 'start_action', ( + 'event action name cannot be different from the one being handled') + self._state['on_start_action'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_foo_bar_action(self, event): + assert event.handle.kind == 'foo_bar_action', ( + 'event action name cannot be different from the one being handled') + self._state['on_foo_bar_action'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def _on_get_status_action(self, event): + self._state['status_name'] = self.unit.status.name + self._state['status_message'] = self.unit.status.message + self._write_state() + + def _on_collect_metrics(self, event): + self._state['on_collect_metrics'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + event.add_metrics({'foo': 42}, {'bar': 4.2}) + self._write_state() + + def _on_log_critical_action(self, event): + logger.critical('super critical') + + def _on_log_error_action(self, event): + logger.error('grave error') + + def _on_log_warning_action(self, event): + logger.warning('wise warning') + + def _on_log_info_action(self, event): + logger.info('useful info') + + def _on_log_debug_action(self, event): + logger.debug('insightful debug') + + def _on_get_model_name_action(self, event): + self._state['_on_get_model_name_action'].append(self.model.name) + self._write_state() + + +if __name__ == '__main__': + main(Charm) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_charm.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_charm.py new file mode 100755 index 0000000000000000000000000000000000000000..f698aec3b71a837134ecd6d2d83164763e9a1957 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_charm.py @@ -0,0 +1,322 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +import tempfile +import shutil + +from pathlib import Path + +from ops.charm import ( + CharmBase, + CharmMeta, + CharmEvents, +) +from ops.framework import Framework, EventSource, EventBase +from ops.model import Model, _ModelBackend +from ops.storage import SQLiteStorage + +from .test_helpers import fake_script, fake_script_calls + + +class TestCharm(unittest.TestCase): + + def setUp(self): + def restore_env(env): + os.environ.clear() + os.environ.update(env) + self.addCleanup(restore_env, os.environ.copy()) + + os.environ['PATH'] = "{}:{}".format(Path(__file__).parent / 'bin', os.environ['PATH']) + os.environ['JUJU_UNIT_NAME'] = 'local/0' + + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(self.tmpdir)) + self.meta = CharmMeta() + + class CustomEvent(EventBase): + pass + + class TestCharmEvents(CharmEvents): + custom = EventSource(CustomEvent) + + # Relations events are defined dynamically and modify the class attributes. + # We use a subclass temporarily to prevent these side effects from leaking. + CharmBase.on = TestCharmEvents() + + def cleanup(): + CharmBase.on = CharmEvents() + self.addCleanup(cleanup) + + def create_framework(self): + model = Model(self.meta, _ModelBackend('local/0')) + framework = Framework(SQLiteStorage(':memory:'), self.tmpdir, self.meta, model) + self.addCleanup(framework.close) + return framework + + def test_basic(self): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + self.started = False + framework.observe(self.on.start, self._on_start) + + def _on_start(self, event): + self.started = True + + events = list(MyCharm.on.events()) + self.assertIn('install', events) + self.assertIn('custom', events) + + framework = self.create_framework() + charm = MyCharm(framework) + charm.on.start.emit() + + self.assertEqual(charm.started, True) + + with self.assertRaisesRegex(TypeError, "observer methods must now be explicitly provided"): + framework.observe(charm.on.start, charm) + + def test_helper_properties(self): + framework = self.create_framework() + + class MyCharm(CharmBase): + pass + + charm = MyCharm(framework) + self.assertEqual(charm.app, framework.model.app) + self.assertEqual(charm.unit, framework.model.unit) + self.assertEqual(charm.meta, framework.meta) + self.assertEqual(charm.charm_dir, framework.charm_dir) + + def test_relation_events(self): + + class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.seen = [] + for rel in ('req1', 'req-2', 'pro1', 'pro-2', 'peer1', 'peer-2'): + # Hook up relation events to generic handler. + self.framework.observe(self.on[rel].relation_joined, self.on_any_relation) + self.framework.observe(self.on[rel].relation_changed, self.on_any_relation) + self.framework.observe(self.on[rel].relation_departed, self.on_any_relation) + self.framework.observe(self.on[rel].relation_broken, self.on_any_relation) + + def on_any_relation(self, event): + assert event.relation.name == 'req1' + assert event.relation.app.name == 'remote' + self.seen.append(type(event).__name__) + + # language=YAML + self.meta = CharmMeta.from_yaml(metadata=''' +name: my-charm +requires: + req1: + interface: req1 + req-2: + interface: req2 +provides: + pro1: + interface: pro1 + pro-2: + interface: pro2 +peers: + peer1: + interface: peer1 + peer-2: + interface: peer2 +''') + + charm = MyCharm(self.create_framework()) + + rel = charm.framework.model.get_relation('req1', 1) + unit = charm.framework.model.get_unit('remote/0') + charm.on['req1'].relation_joined.emit(rel, unit) + charm.on['req1'].relation_changed.emit(rel, unit) + charm.on['req-2'].relation_changed.emit(rel, unit) + charm.on['pro1'].relation_departed.emit(rel, unit) + charm.on['pro-2'].relation_departed.emit(rel, unit) + charm.on['peer1'].relation_broken.emit(rel) + charm.on['peer-2'].relation_broken.emit(rel) + + self.assertEqual(charm.seen, [ + 'RelationJoinedEvent', + 'RelationChangedEvent', + 'RelationChangedEvent', + 'RelationDepartedEvent', + 'RelationDepartedEvent', + 'RelationBrokenEvent', + 'RelationBrokenEvent', + ]) + + def test_storage_events(self): + + class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.seen = [] + self.framework.observe(self.on['stor1'].storage_attached, self._on_stor1_attach) + self.framework.observe(self.on['stor2'].storage_detaching, self._on_stor2_detach) + self.framework.observe(self.on['stor3'].storage_attached, self._on_stor3_attach) + self.framework.observe(self.on['stor-4'].storage_attached, self._on_stor4_attach) + + def _on_stor1_attach(self, event): + self.seen.append(type(event).__name__) + + def _on_stor2_detach(self, event): + self.seen.append(type(event).__name__) + + def _on_stor3_attach(self, event): + self.seen.append(type(event).__name__) + + def _on_stor4_attach(self, event): + self.seen.append(type(event).__name__) + + # language=YAML + self.meta = CharmMeta.from_yaml(''' +name: my-charm +storage: + stor-4: + multiple: + range: 2-4 + type: filesystem + stor1: + type: filesystem + stor2: + multiple: + range: "2" + type: filesystem + stor3: + multiple: + range: 2- + type: filesystem +''') + + self.assertIsNone(self.meta.storages['stor1'].multiple_range) + self.assertEqual(self.meta.storages['stor2'].multiple_range, (2, 2)) + self.assertEqual(self.meta.storages['stor3'].multiple_range, (2, None)) + self.assertEqual(self.meta.storages['stor-4'].multiple_range, (2, 4)) + + charm = MyCharm(self.create_framework()) + + charm.on['stor1'].storage_attached.emit() + charm.on['stor2'].storage_detaching.emit() + charm.on['stor3'].storage_attached.emit() + charm.on['stor-4'].storage_attached.emit() + + self.assertEqual(charm.seen, [ + 'StorageAttachedEvent', + 'StorageDetachingEvent', + 'StorageAttachedEvent', + 'StorageAttachedEvent', + ]) + + @classmethod + def _get_action_test_meta(cls): + # language=YAML + return CharmMeta.from_yaml(metadata=''' +name: my-charm +''', actions=''' +foo-bar: + description: "Foos the bar." + params: + foo-name: + description: "A foo name to bar" + type: string + silent: + default: false + description: "" + type: boolean + required: foo-bar + title: foo-bar +start: + description: "Start the unit." +''') + + def _test_action_events(self, cmd_type): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + framework.observe(self.on.foo_bar_action, self._on_foo_bar_action) + framework.observe(self.on.start_action, self._on_start_action) + + def _on_foo_bar_action(self, event): + self.seen_action_params = event.params + event.log('test-log') + event.set_results({'res': 'val with spaces'}) + event.fail('test-fail') + + def _on_start_action(self, event): + pass + + fake_script(self, cmd_type + '-get', """echo '{"foo-name": "name", "silent": true}'""") + fake_script(self, cmd_type + '-set', "") + fake_script(self, cmd_type + '-log', "") + fake_script(self, cmd_type + '-fail', "") + self.meta = self._get_action_test_meta() + + os.environ['JUJU_{}_NAME'.format(cmd_type.upper())] = 'foo-bar' + framework = self.create_framework() + charm = MyCharm(framework) + + events = list(MyCharm.on.events()) + self.assertIn('foo_bar_action', events) + self.assertIn('start_action', events) + + charm.on.foo_bar_action.emit() + self.assertEqual(charm.seen_action_params, {"foo-name": "name", "silent": True}) + self.assertEqual(fake_script_calls(self), [ + [cmd_type + '-get', '--format=json'], + [cmd_type + '-log', "test-log"], + [cmd_type + '-set', "res=val with spaces"], + [cmd_type + '-fail', "test-fail"], + ]) + + # Make sure that action events that do not match the current context are + # not possible to emit by hand. + with self.assertRaises(RuntimeError): + charm.on.start_action.emit() + + def test_action_events(self): + self._test_action_events('action') + + def _test_action_event_defer_fails(self, cmd_type): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + framework.observe(self.on.start_action, self._on_start_action) + + def _on_start_action(self, event): + event.defer() + + fake_script(self, cmd_type + '-get', """echo '{"foo-name": "name", "silent": true}'""") + self.meta = self._get_action_test_meta() + + os.environ['JUJU_{}_NAME'.format(cmd_type.upper())] = 'start' + framework = self.create_framework() + charm = MyCharm(framework) + + with self.assertRaises(RuntimeError): + charm.on.start_action.emit() + + def test_action_event_defer_fails(self): + self._test_action_event_defer_fails('action') diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_framework.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_framework.py new file mode 100755 index 0000000000000000000000000000000000000000..2c4a7bf0de34396572d73918518f9617b4f7dc55 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_framework.py @@ -0,0 +1,1798 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import gc +import inspect +import io +import os +import re +import shutil +import sys +import tempfile +from unittest.mock import patch +from pathlib import Path + +import logassert + +from ops import charm +from ops.framework import ( + _BREAKPOINT_WELCOME_MESSAGE, + BoundStoredState, + CommitEvent, + EventBase, + _event_regex, + ObjectEvents, + EventSource, + Framework, + Handle, + Object, + PreCommitEvent, + StoredList, + StoredState, + StoredStateData, +) +from ops.storage import NoSnapshotError, SQLiteStorage +from test.test_helpers import fake_script, BaseTestCase + + +class TestFramework(BaseTestCase): + + def setUp(self): + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(self.tmpdir)) + + patcher = patch('ops.storage.SQLiteStorage.DB_LOCK_TIMEOUT', datetime.timedelta(0)) + patcher.start() + self.addCleanup(patcher.stop) + logassert.setup(self, 'ops') + + def test_deprecated_init(self): + # For 0.7, this still works, but it is deprecated. + framework = Framework(':memory:', None, None, None) + self.assertLoggedWarning( + "deprecated: Framework now takes a Storage not a path") + self.assertIsInstance(framework._storage, SQLiteStorage) + + def test_handle_path(self): + cases = [ + (Handle(None, "root", None), "root"), + (Handle(None, "root", "1"), "root[1]"), + (Handle(Handle(None, "root", None), "child", None), "root/child"), + (Handle(Handle(None, "root", "1"), "child", "2"), "root[1]/child[2]"), + ] + for handle, path in cases: + self.assertEqual(str(handle), path) + self.assertEqual(Handle.from_path(path), handle) + + def test_handle_attrs_readonly(self): + handle = Handle(None, 'kind', 'key') + with self.assertRaises(AttributeError): + handle.parent = 'foo' + with self.assertRaises(AttributeError): + handle.kind = 'foo' + with self.assertRaises(AttributeError): + handle.key = 'foo' + with self.assertRaises(AttributeError): + handle.path = 'foo' + + def test_restore_unknown(self): + framework = self.create_framework() + + class Foo(Object): + pass + + handle = Handle(None, "a_foo", "some_key") + + framework.register_type(Foo, None, handle.kind) + + try: + framework.load_snapshot(handle) + except NoSnapshotError as e: + self.assertEqual(e.handle_path, str(handle)) + self.assertEqual(str(e), "no snapshot data found for a_foo[some_key] object") + else: + self.fail("exception NoSnapshotError not raised") + + def test_snapshot_roundtrip(self): + class Foo: + def __init__(self, handle, n): + self.handle = handle + self.my_n = n + + def snapshot(self): + return {"My N!": self.my_n} + + def restore(self, snapshot): + self.my_n = snapshot["My N!"] + 1 + + handle = Handle(None, "a_foo", "some_key") + event = Foo(handle, 1) + + framework1 = self.create_framework(tmpdir=self.tmpdir) + framework1.register_type(Foo, None, handle.kind) + framework1.save_snapshot(event) + framework1.commit() + framework1.close() + + framework2 = self.create_framework(tmpdir=self.tmpdir) + framework2.register_type(Foo, None, handle.kind) + event2 = framework2.load_snapshot(handle) + self.assertEqual(event2.my_n, 2) + + framework2.save_snapshot(event2) + del event2 + gc.collect() + event3 = framework2.load_snapshot(handle) + self.assertEqual(event3.my_n, 3) + + framework2.drop_snapshot(event.handle) + framework2.commit() + framework2.close() + + framework3 = self.create_framework(tmpdir=self.tmpdir) + framework3.register_type(Foo, None, handle.kind) + + self.assertRaises(NoSnapshotError, framework3.load_snapshot, handle) + + def test_simple_event_observer(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + baz = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_any(self, event): + self.seen.append("on_any:" + event.handle.kind) + + def on_foo(self, event): + self.seen.append("on_foo:" + event.handle.kind) + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs.on_any) + framework.observe(pub.bar, obs.on_any) + + with self.assertRaisesRegex(RuntimeError, "^Framework.observe requires a method"): + framework.observe(pub.baz, obs) + + pub.foo.emit() + pub.bar.emit() + + self.assertEqual(obs.seen, ["on_any:foo", "on_any:bar"]) + + def test_bad_sig_observer(self): + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + baz = EventSource(MyEvent) + qux = EventSource(MyEvent) + + class MyObserver(Object): + def _on_foo(self): + assert False, 'should not be reached' + + def _on_bar(self, event, extra): + assert False, 'should not be reached' + + def _on_baz(self, event, extra=None, *, k): + assert False, 'should not be reached' + + def _on_qux(self, event, extra=None): + assert False, 'should not be reached' + + framework = self.create_framework() + pub = MyNotifier(framework, "pub") + obs = MyObserver(framework, "obs") + + with self.assertRaisesRegex(TypeError, "must accept event parameter"): + framework.observe(pub.foo, obs._on_foo) + with self.assertRaisesRegex(TypeError, "has extra required parameter"): + framework.observe(pub.bar, obs._on_bar) + with self.assertRaisesRegex(TypeError, "has extra required parameter"): + framework.observe(pub.baz, obs._on_baz) + framework.observe(pub.qux, obs._on_qux) + + def test_on_pre_commit_emitted(self): + framework = self.create_framework(tmpdir=self.tmpdir) + + class PreCommitObserver(Object): + + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + self._stored.myinitdata = 40 + + def on_pre_commit(self, event): + self._stored.myinitdata = 41 + self._stored.mydata = 42 + self.seen.append(type(event)) + + def on_commit(self, event): + # Modifications made here will not be persisted. + self._stored.myinitdata = 42 + self._stored.mydata = 43 + self._stored.myotherdata = 43 + self.seen.append(type(event)) + + obs = PreCommitObserver(framework, None) + + framework.observe(framework.on.pre_commit, obs.on_pre_commit) + + framework.commit() + + self.assertEqual(obs._stored.myinitdata, 41) + self.assertEqual(obs._stored.mydata, 42) + self.assertTrue(obs.seen, [PreCommitEvent, CommitEvent]) + framework.close() + + other_framework = self.create_framework(tmpdir=self.tmpdir) + + new_obs = PreCommitObserver(other_framework, None) + + self.assertEqual(obs._stored.myinitdata, 41) + self.assertEqual(new_obs._stored.mydata, 42) + + with self.assertRaises(AttributeError): + new_obs._stored.myotherdata + + def test_defer_and_reemit(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier1(Object): + a = EventSource(MyEvent) + b = EventSource(MyEvent) + + class MyNotifier2(Object): + c = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + self.done = {} + + def on_any(self, event): + self.seen.append(event.handle.kind) + if not self.done.get(event.handle.kind): + event.defer() + + pub1 = MyNotifier1(framework, "1") + pub2 = MyNotifier2(framework, "1") + obs1 = MyObserver(framework, "1") + obs2 = MyObserver(framework, "2") + + framework.observe(pub1.a, obs1.on_any) + framework.observe(pub1.b, obs1.on_any) + framework.observe(pub1.a, obs2.on_any) + framework.observe(pub1.b, obs2.on_any) + framework.observe(pub2.c, obs2.on_any) + + pub1.a.emit() + pub1.b.emit() + pub2.c.emit() + + # Events remain stored because they were deferred. + ev_a_handle = Handle(pub1, "a", "1") + framework.load_snapshot(ev_a_handle) + ev_b_handle = Handle(pub1, "b", "2") + framework.load_snapshot(ev_b_handle) + ev_c_handle = Handle(pub2, "c", "3") + framework.load_snapshot(ev_c_handle) + # make sure the objects are gone before we reemit them + gc.collect() + + framework.reemit() + obs1.done["a"] = True + obs2.done["b"] = True + framework.reemit() + framework.reemit() + obs1.done["b"] = True + obs2.done["a"] = True + framework.reemit() + obs2.done["c"] = True + framework.reemit() + framework.reemit() + framework.reemit() + + self.assertEqual(" ".join(obs1.seen), "a b a b a b b b") + self.assertEqual(" ".join(obs2.seen), "a b c a b c a b c a c a c c") + + # Now the event objects must all be gone from storage. + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_a_handle) + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_b_handle) + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_c_handle) + + def test_custom_event_data(self): + framework = self.create_framework() + + class MyEvent(EventBase): + def __init__(self, handle, n): + super().__init__(handle) + self.my_n = n + + def snapshot(self): + return {"My N!": self.my_n} + + def restore(self, snapshot): + super().restore(snapshot) + self.my_n = snapshot["My N!"] + 1 + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}={}".format(event.handle.kind, event.my_n)) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs._on_foo) + + pub.foo.emit(1) + + framework.reemit() + + # Two things being checked here: + # + # 1. There's a restore roundtrip before the event is first observed. + # That means the data is safe before it's ever seen, and the + # roundtrip logic is tested under normal circumstances. + # + # 2. The renotification restores from the pristine event, not + # from the one modified during the first restore (otherwise + # we'd get a foo=3). + # + self.assertEqual(obs.seen, ["on_foo:foo=2", "on_foo:foo=2"]) + + def test_weak_observer(self): + framework = self.create_framework() + + observed_events = [] + + class MyEvent(EventBase): + pass + + class MyEvents(ObjectEvents): + foo = EventSource(MyEvent) + + class MyNotifier(Object): + on = MyEvents() + + class MyObserver(Object): + def _on_foo(self, event): + observed_events.append("foo") + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "2") + + framework.observe(pub.on.foo, obs._on_foo) + pub.on.foo.emit() + self.assertEqual(observed_events, ["foo"]) + # Now delete the observer, and note that when we emit the event, it + # doesn't update the local slice again + del obs + gc.collect() + pub.on.foo.emit() + self.assertEqual(observed_events, ["foo"]) + + def test_forget_and_multiple_objects(self): + framework = self.create_framework() + + class MyObject(Object): + pass + + o1 = MyObject(framework, "path") + # Creating a second object at the same path should fail with RuntimeError + with self.assertRaises(RuntimeError): + o2 = MyObject(framework, "path") + # Unless we _forget the object first + framework._forget(o1) + o2 = MyObject(framework, "path") + self.assertEqual(o1.handle.path, o2.handle.path) + # Deleting the tracked object should also work + del o2 + gc.collect() + o3 = MyObject(framework, "path") + self.assertEqual(o1.handle.path, o3.handle.path) + framework.close() + # Or using a second framework + framework_copy = self.create_framework() + o_copy = MyObject(framework_copy, "path") + self.assertEqual(o1.handle.path, o_copy.handle.path) + + def test_forget_and_multiple_objects_with_load_snapshot(self): + framework = self.create_framework(tmpdir=self.tmpdir) + + class MyObject(Object): + def __init__(self, parent, name): + super().__init__(parent, name) + self.value = name + + def snapshot(self): + return self.value + + def restore(self, value): + self.value = value + + framework.register_type(MyObject, None, MyObject.handle_kind) + o1 = MyObject(framework, "path") + framework.save_snapshot(o1) + framework.commit() + o_handle = o1.handle + del o1 + gc.collect() + o2 = framework.load_snapshot(o_handle) + # Trying to load_snapshot a second object at the same path should fail with RuntimeError + with self.assertRaises(RuntimeError): + framework.load_snapshot(o_handle) + # Unless we _forget the object first + framework._forget(o2) + o3 = framework.load_snapshot(o_handle) + self.assertEqual(o2.value, o3.value) + # A loaded object also prevents direct creation of an object + with self.assertRaises(RuntimeError): + MyObject(framework, "path") + framework.close() + # But we can create an object, or load a snapshot in a copy of the framework + framework_copy1 = self.create_framework(tmpdir=self.tmpdir) + o_copy1 = MyObject(framework_copy1, "path") + self.assertEqual(o_copy1.value, "path") + framework_copy1.close() + framework_copy2 = self.create_framework(tmpdir=self.tmpdir) + framework_copy2.register_type(MyObject, None, MyObject.handle_kind) + o_copy2 = framework_copy2.load_snapshot(o_handle) + self.assertEqual(o_copy2.value, "path") + + def test_events_base(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyEvents(ObjectEvents): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + + class MyNotifier(Object): + on = MyEvents() + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}".format(event.handle.kind)) + event.defer() + + def _on_bar(self, event): + self.seen.append("on_bar:{}".format(event.handle.kind)) + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + # Confirm that temporary persistence of BoundEvents doesn't cause errors, + # and that events can be observed. + for bound_event, handler in [(pub.on.foo, obs._on_foo), (pub.on.bar, obs._on_bar)]: + framework.observe(bound_event, handler) + + # Confirm that events can be emitted and seen. + pub.on.foo.emit() + + self.assertEqual(obs.seen, ["on_foo:foo"]) + + def test_conflicting_event_attributes(self): + class MyEvent(EventBase): + pass + + event = EventSource(MyEvent) + + class MyEvents(ObjectEvents): + foo = event + + with self.assertRaises(RuntimeError) as cm: + class OtherEvents(ObjectEvents): + foo = event + self.assertEqual( + str(cm.exception), + "EventSource(MyEvent) reused as MyEvents.foo and OtherEvents.foo") + + with self.assertRaises(RuntimeError) as cm: + class MyNotifier(Object): + on = MyEvents() + bar = event + self.assertEqual( + str(cm.exception), + "EventSource(MyEvent) reused as MyEvents.foo and MyNotifier.bar") + + def test_reemit_ignores_unknown_event_type(self): + # The event type may have been gone for good, and nobody cares, + # so this shouldn't be an error scenario. + + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append(event.handle) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs._on_foo) + pub.foo.emit() + + event_handle = obs.seen[0] + self.assertEqual(event_handle.kind, "foo") + + framework.commit() + framework.close() + + framework_copy = self.create_framework() + + # No errors on missing event types here. + framework_copy.reemit() + + # Register the type and check that the event is gone from storage. + framework_copy.register_type(MyEvent, event_handle.parent, event_handle.kind) + self.assertRaises(NoSnapshotError, framework_copy.load_snapshot, event_handle) + + def test_auto_register_event_types(self): + framework = self.create_framework() + + class MyFoo(EventBase): + pass + + class MyBar(EventBase): + pass + + class MyEvents(ObjectEvents): + foo = EventSource(MyFoo) + + class MyNotifier(Object): + on = MyEvents() + bar = EventSource(MyBar) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + def _on_bar(self, event): + self.seen.append("on_bar:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + pub.on.foo.emit() + pub.bar.emit() + + framework.observe(pub.on.foo, obs._on_foo) + framework.observe(pub.bar, obs._on_bar) + + pub.on.foo.emit() + pub.bar.emit() + + self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"]) + + def test_dynamic_event_types(self): + framework = self.create_framework() + + class MyEventsA(ObjectEvents): + handle_kind = 'on_a' + + class MyEventsB(ObjectEvents): + handle_kind = 'on_b' + + class MyNotifier(Object): + on_a = MyEventsA() + on_b = MyEventsB() + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append("on_foo:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + def _on_bar(self, event): + self.seen.append("on_bar:{}:{}".format(type(event).__name__, event.handle.kind)) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + class MyFoo(EventBase): + pass + + class MyBar(EventBase): + pass + + class DeadBeefEvent(EventBase): + pass + + class NoneEvent(EventBase): + pass + + pub.on_a.define_event("foo", MyFoo) + pub.on_b.define_event("bar", MyBar) + + framework.observe(pub.on_a.foo, obs._on_foo) + framework.observe(pub.on_b.bar, obs._on_bar) + + pub.on_a.foo.emit() + pub.on_b.bar.emit() + + self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"]) + + # Definitions remained local to the specific type. + self.assertRaises(AttributeError, lambda: pub.on_a.bar) + self.assertRaises(AttributeError, lambda: pub.on_b.foo) + + # Try to use an event name which is not a valid python identifier. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("dead-beef", DeadBeefEvent) + + # Try to use a python keyword for an event name. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("None", NoneEvent) + + # Try to override an existing attribute. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("foo", MyFoo) + + def test_event_key_roundtrip(self): + class MyEvent(EventBase): + def __init__(self, handle, value): + super().__init__(handle) + self.value = value + + def snapshot(self): + return self.value + + def restore(self, value): + self.value = value + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + has_deferred = False + + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def _on_foo(self, event): + self.seen.append((event.handle.key, event.value)) + # Only defer the first event and once. + if not MyObserver.has_deferred: + event.defer() + MyObserver.has_deferred = True + + framework1 = self.create_framework(tmpdir=self.tmpdir) + pub1 = MyNotifier(framework1, "pub") + obs1 = MyObserver(framework1, "obs") + framework1.observe(pub1.foo, obs1._on_foo) + pub1.foo.emit('first') + self.assertEqual(obs1.seen, [('1', 'first')]) + + framework1.commit() + framework1.close() + del framework1 + + framework2 = self.create_framework(tmpdir=self.tmpdir) + pub2 = MyNotifier(framework2, "pub") + obs2 = MyObserver(framework2, "obs") + framework2.observe(pub2.foo, obs2._on_foo) + pub2.foo.emit('second') + framework2.reemit() + + # First observer didn't get updated, since framework it was bound to is gone. + self.assertEqual(obs1.seen, [('1', 'first')]) + # Second observer saw the new event plus the reemit of the first event. + # (The event key goes up by 2 due to the pre-commit and commit events.) + self.assertEqual(obs2.seen, [('4', 'second'), ('1', 'first')]) + + def test_helper_properties(self): + framework = self.create_framework() + framework.model = 'test-model' + framework.meta = 'test-meta' + + my_obj = Object(framework, 'my_obj') + self.assertEqual(my_obj.model, framework.model) + + def test_ban_concurrent_frameworks(self): + f = self.create_framework(tmpdir=self.tmpdir) + with self.assertRaises(Exception) as cm: + self.create_framework(tmpdir=self.tmpdir) + self.assertIn('database is locked', str(cm.exception)) + f.close() + + def test_snapshot_saving_restricted_to_simple_types(self): + # this can not be saved, as it has not simple types! + to_be_saved = {"bar": TestFramework} + + class FooEvent(EventBase): + def snapshot(self): + return to_be_saved + + handle = Handle(None, "a_foo", "some_key") + event = FooEvent(handle) + + framework = self.create_framework() + framework.register_type(FooEvent, None, handle.kind) + with self.assertRaises(ValueError) as cm: + framework.save_snapshot(event) + expected = ( + "unable to save the data for FooEvent, it must contain only simple types: " + "{'bar': }") + self.assertEqual(str(cm.exception), expected) + + def test_unobserved_events_dont_leave_cruft(self): + class FooEvent(EventBase): + def snapshot(self): + return {'content': 1} + + class Events(ObjectEvents): + foo = EventSource(FooEvent) + + class Emitter(Object): + on = Events() + + framework = self.create_framework() + e = Emitter(framework, 'key') + e.on.foo.emit() + ev_1_handle = Handle(e.on, "foo", "1") + with self.assertRaises(NoSnapshotError): + framework.load_snapshot(ev_1_handle) + # Committing will save the framework's state, but no other snapshots should be saved + framework.commit() + events = framework._storage.list_snapshots() + self.assertEqual(list(events), [framework._stored.handle.path]) + + def test_event_regex(self): + examples = [ + 'Ubuntu/on/config_changed[7]', + 'on/commit[9]', + 'on/pre_commit[8]', + ] + non_examples = [ + 'StoredStateData[_stored]', + 'ObjectWithSTorage[obj]StoredStateData[_stored]', + ] + regex = re.compile(_event_regex) + for e in examples: + self.assertIsNotNone(regex.match(e)) + for e in non_examples: + self.assertIsNone(regex.match(e)) + + def test_remove_unreferenced_events(self): + framework = self.create_framework() + + class Evt(EventBase): + pass + + class Events(ObjectEvents): + event = EventSource(Evt) + + class ObjectWithStorage(Object): + _stored = StoredState() + on = Events() + + def __init__(self, framework, key): + super().__init__(framework, key) + self._stored.set_default(foo=2) + self.framework.observe(self.on.event, self._on_event) + + def _on_event(self, event): + event.defer() + + # This is an event that 'happened in the past' that doesn't have an associated notice. + o = ObjectWithStorage(framework, 'obj') + handle = Handle(o.on, 'event', '100') + event = Evt(handle) + framework.save_snapshot(event) + self.assertEqual(list(framework._storage.list_snapshots()), [handle.path]) + o.on.event.emit() + self.assertEqual( + list(framework._storage.notices('')), + [('ObjectWithStorage[obj]/on/event[1]', 'ObjectWithStorage[obj]', '_on_event')]) + framework.commit() + self.assertEqual( + sorted(framework._storage.list_snapshots()), + sorted(['ObjectWithStorage[obj]/on/event[100]', + 'StoredStateData[_stored]', + 'ObjectWithStorage[obj]/StoredStateData[_stored]', + 'ObjectWithStorage[obj]/on/event[1]'])) + framework.remove_unreferenced_events() + self.assertEqual( + sorted(framework._storage.list_snapshots()), + sorted([ + 'StoredStateData[_stored]', + 'ObjectWithStorage[obj]/StoredStateData[_stored]', + 'ObjectWithStorage[obj]/on/event[1]'])) + + +class TestStoredState(BaseTestCase): + + def setUp(self): + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(self.tmpdir)) + + def test_basic_state_storage(self): + class SomeObject(Object): + _stored = StoredState() + + self._stored_state_tests(SomeObject) + + def test_straight_subclass(self): + class SomeObject(Object): + _stored = StoredState() + + class Sub(SomeObject): + pass + + self._stored_state_tests(Sub) + + def test_straight_sub_subclass(self): + class SomeObject(Object): + _stored = StoredState() + + class Sub(SomeObject): + pass + + class SubSub(SomeObject): + pass + + self._stored_state_tests(SubSub) + + def test_two_subclasses(self): + class SomeObject(Object): + _stored = StoredState() + + class SubA(SomeObject): + pass + + class SubB(SomeObject): + pass + + self._stored_state_tests(SubA) + self._stored_state_tests(SubB) + + def test_the_crazy_thing(self): + class NoState(Object): + pass + + class StatedObject(NoState): + _stored = StoredState() + + class Sibling(NoState): + pass + + class FinalChild(StatedObject, Sibling): + pass + + self._stored_state_tests(FinalChild) + + def _stored_state_tests(self, cls): + framework = self.create_framework(tmpdir=self.tmpdir) + obj = cls(framework, "1") + + try: + obj._stored.foo + except AttributeError as e: + self.assertEqual(str(e), "attribute 'foo' is not stored") + else: + self.fail("AttributeError not raised") + + try: + obj._stored.on = "nonono" + except AttributeError as e: + self.assertEqual(str(e), "attribute 'on' is reserved and cannot be set") + else: + self.fail("AttributeError not raised") + + obj._stored.foo = 41 + obj._stored.foo = 42 + obj._stored.bar = "s" + obj._stored.baz = 4.2 + obj._stored.bing = True + + self.assertEqual(obj._stored.foo, 42) + + framework.commit() + + # This won't be committed, and should not be seen. + obj._stored.foo = 43 + + framework.close() + + # Since this has the same absolute object handle, it will get its state back. + framework_copy = self.create_framework(tmpdir=self.tmpdir) + obj_copy = cls(framework_copy, "1") + self.assertEqual(obj_copy._stored.foo, 42) + self.assertEqual(obj_copy._stored.bar, "s") + self.assertEqual(obj_copy._stored.baz, 4.2) + self.assertEqual(obj_copy._stored.bing, True) + + framework_copy.close() + + def test_two_subclasses_no_conflicts(self): + class Base(Object): + _stored = StoredState() + + class SubA(Base): + pass + + class SubB(Base): + pass + + framework = self.create_framework(tmpdir=self.tmpdir) + a = SubA(framework, None) + b = SubB(framework, None) + z = Base(framework, None) + + a._stored.foo = 42 + b._stored.foo = "hello" + z._stored.foo = {1} + + framework.commit() + framework.close() + + framework2 = self.create_framework(tmpdir=self.tmpdir) + a2 = SubA(framework2, None) + b2 = SubB(framework2, None) + z2 = Base(framework2, None) + + self.assertEqual(a2._stored.foo, 42) + self.assertEqual(b2._stored.foo, "hello") + self.assertEqual(z2._stored.foo, {1}) + + def test_two_names_one_state(self): + class Mine(Object): + _stored = StoredState() + _stored2 = _stored + + framework = self.create_framework() + obj = Mine(framework, None) + + with self.assertRaises(RuntimeError): + obj._stored.foo = 42 + + with self.assertRaises(RuntimeError): + obj._stored2.foo = 42 + + framework.close() + + # make sure we're not changing the object on failure + self.assertNotIn("_stored", obj.__dict__) + self.assertNotIn("_stored2", obj.__dict__) + + def test_same_name_two_classes(self): + class Base(Object): + pass + + class A(Base): + _stored = StoredState() + + class B(Base): + _stored = A._stored + + framework = self.create_framework() + a = A(framework, None) + b = B(framework, None) + + # NOTE it's the second one that actually triggers the + # exception, but that's an implementation detail + a._stored.foo = 42 + + with self.assertRaises(RuntimeError): + b._stored.foo = "xyzzy" + + framework.close() + + # make sure we're not changing the object on failure + self.assertNotIn("_stored", b.__dict__) + + def test_mutable_types_invalid(self): + framework = self.create_framework() + + class SomeObject(Object): + _stored = StoredState() + + obj = SomeObject(framework, '1') + try: + class CustomObject: + pass + obj._stored.foo = CustomObject() + except AttributeError as e: + self.assertEqual( + str(e), + "attribute 'foo' cannot be a CustomObject: must be int/float/dict/list/etc") + else: + self.fail('AttributeError not raised') + + framework.commit() + + def test_mutable_types(self): + # Test and validation functions in a list of 2-tuples. + # Assignment and keywords like del are not supported in lambdas + # so functions are used instead. + test_operations = [( + lambda: {}, # Operand A. + None, # Operand B. + {}, # Expected result. + lambda a, b: None, # Operation to perform. + lambda res, expected_res: self.assertEqual(res, expected_res) # Validation to perform. + ), ( + lambda: {}, + {'a': {}}, + {'a': {}}, + lambda a, b: a.update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {}}, + {'b': 'c'}, + {'a': {'b': 'c'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {'b': 'c'}}, + {'d': 'e'}, + {'a': {'b': 'c', 'd': 'e'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {'b': 'c', 'd': 'e'}}, + 'd', + {'a': {'b': 'c'}}, + lambda a, b: a['a'].pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'s': set()}, + 'a', + {'s': {'a'}}, + lambda a, b: a['s'].add(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'s': {'a'}}, + 'a', + {'s': set()}, + lambda a, b: a['s'].discard(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [], + None, + [], + lambda a, b: None, + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [], + 'a', + ['a'], + lambda a, b: a.append(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a'], + ['c'], + ['a', ['c']], + lambda a, b: a.append(b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList), + ) + ), ( + lambda: ['a', ['c']], + 'b', + ['b', 'a', ['c']], + lambda a, b: a.insert(0, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], 'a', ['c']], + lambda a, b: a.insert(1, b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList) + ), + ), ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], ['c']], + # a[1] = b + lambda a, b: a.__setitem__(1, b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList) + ), + ), ( + lambda: ['b', ['d'], 'a', ['c']], + 0, + [['d'], 'a', ['c']], + lambda a, b: a.pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [['d'], 'a', ['c']], + ['d'], + ['a', ['c']], + lambda a, b: a.remove(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].append(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c', 'd']], + 1, + ['a', ['c']], + lambda a, b: a[1].pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].insert(1, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c', 'd']], + 'd', + ['a', ['c']], + lambda a, b: a[1].remove(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + None, + set(), + lambda a, b: None, + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + 'a', + set(['a']), + lambda a, b: a.add(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(['a']), + 'a', + set(), + lambda a, b: a.discard(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + {'a'}, + set(), + # Nested sets are not allowed as sets themselves are not hashable. + lambda a, b: self.assertRaises(TypeError, a.add, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + )] + + class SomeObject(Object): + _stored = StoredState() + + class WrappedFramework(Framework): + def __init__(self, store, charm_dir, meta, model): + super().__init__(store, charm_dir, meta, model) + self.snapshots = [] + + def save_snapshot(self, value): + if value.handle.path == 'SomeObject[1]/StoredStateData[_stored]': + self.snapshots.append((type(value), value.snapshot())) + return super().save_snapshot(value) + + # Validate correctness of modification operations. + for get_a, b, expected_res, op, validate_op in test_operations: + storage = SQLiteStorage(self.tmpdir / "framework.data") + framework = WrappedFramework(storage, self.tmpdir, None, None) + obj = SomeObject(framework, '1') + + obj._stored.a = get_a() + self.assertTrue(isinstance(obj._stored, BoundStoredState)) + + op(obj._stored.a, b) + validate_op(obj._stored.a, expected_res) + + obj._stored.a = get_a() + framework.commit() + # We should see an update for initializing a + self.assertEqual(framework.snapshots, [ + (StoredStateData, {'a': get_a()}), + ]) + del obj + gc.collect() + obj_copy1 = SomeObject(framework, '1') + self.assertEqual(obj_copy1._stored.a, get_a()) + + op(obj_copy1._stored.a, b) + validate_op(obj_copy1._stored.a, expected_res) + framework.commit() + framework.close() + + storage_copy = SQLiteStorage(self.tmpdir / "framework.data") + framework_copy = WrappedFramework(storage_copy, self.tmpdir, None, None) + + obj_copy2 = SomeObject(framework_copy, '1') + + validate_op(obj_copy2._stored.a, expected_res) + + # Commit saves the pre-commit and commit events, and the framework + # event counter, but shouldn't update the stored state of my object + framework.snapshots.clear() + framework_copy.commit() + self.assertEqual(framework_copy.snapshots, []) + framework_copy.close() + + def test_comparison_operations(self): + test_operations = [( + {"1"}, # Operand A. + {"1", "2"}, # Operand B. + lambda a, b: a < b, # Operation to test. + True, # Result of op(A, B). + False, # Result of op(B, A). + ), ( + {"1"}, + {"1", "2"}, + lambda a, b: a > b, + False, + True + ), ( + # Empty set comparison. + set(), + set(), + lambda a, b: a == b, + True, + True + ), ( + {"a", "c"}, + {"c", "a"}, + lambda a, b: a == b, + True, + True + ), ( + dict(), + dict(), + lambda a, b: a == b, + True, + True + ), ( + {"1": "2"}, + {"1": "2"}, + lambda a, b: a == b, + True, + True + ), ( + {"1": "2"}, + {"1": "3"}, + lambda a, b: a == b, + False, + False + ), ( + [], + [], + lambda a, b: a == b, + True, + True + ), ( + [1, 2], + [1, 2], + lambda a, b: a == b, + True, + True + ), ( + [1, 2, 5, 6], + [1, 2, 5, 8, 10], + lambda a, b: a <= b, + True, + False + ), ( + [1, 2, 5, 6], + [1, 2, 5, 8, 10], + lambda a, b: a < b, + True, + False + ), ( + [1, 2, 5, 8], + [1, 2, 5, 6, 10], + lambda a, b: a > b, + True, + False + ), ( + [1, 2, 5, 8], + [1, 2, 5, 6, 10], + lambda a, b: a >= b, + True, + False + )] + + class SomeObject(Object): + _stored = StoredState() + + framework = self.create_framework() + + for i, (a, b, op, op_ab, op_ba) in enumerate(test_operations): + obj = SomeObject(framework, str(i)) + obj._stored.a = a + self.assertEqual(op(obj._stored.a, b), op_ab) + self.assertEqual(op(b, obj._stored.a), op_ba) + + def test_set_operations(self): + test_operations = [( + {"1"}, # A set to test an operation against (other_set). + lambda a, b: a | b, # An operation to test. + {"1", "a", "b"}, # The expected result of operation(obj._stored.set, other_set). + {"1", "a", "b"} # The expected result of operation(other_set, obj._stored.set). + ), ( + {"a", "c"}, + lambda a, b: a - b, + {"b"}, + {"c"} + ), ( + {"a", "c"}, + lambda a, b: a & b, + {"a"}, + {"a"} + ), ( + {"a", "c", "d"}, + lambda a, b: a ^ b, + {"b", "c", "d"}, + {"b", "c", "d"} + ), ( + set(), + lambda a, b: set(a), + {"a", "b"}, + set() + )] + + class SomeObject(Object): + _stored = StoredState() + + framework = self.create_framework() + + # Validate that operations between StoredSet and built-in sets + # only result in built-in sets being returned. + # Make sure that commutativity is preserved and that the + # original sets are not changed or used as a result. + for i, (variable_operand, operation, ab_res, ba_res) in enumerate(test_operations): + obj = SomeObject(framework, str(i)) + obj._stored.set = {"a", "b"} + + for a, b, expected in [ + (obj._stored.set, variable_operand, ab_res), + (variable_operand, obj._stored.set, ba_res)]: + old_a = set(a) + old_b = set(b) + + result = operation(a, b) + self.assertEqual(result, expected) + + # Common sanity checks + self.assertIsNot(obj._stored.set._under, result) + self.assertIsNot(result, a) + self.assertIsNot(result, b) + self.assertEqual(a, old_a) + self.assertEqual(b, old_b) + + def test_set_default(self): + framework = self.create_framework() + + class StatefulObject(Object): + _stored = StoredState() + parent = StatefulObject(framework, 'key') + parent._stored.set_default(foo=1) + self.assertEqual(parent._stored.foo, 1) + parent._stored.set_default(foo=2) + # foo was already set, so it doesn't get replaced + self.assertEqual(parent._stored.foo, 1) + parent._stored.set_default(foo=3, bar=4) + self.assertEqual(parent._stored.foo, 1) + self.assertEqual(parent._stored.bar, 4) + # reloading the state still leaves things at the default values + framework.commit() + del parent + parent = StatefulObject(framework, 'key') + parent._stored.set_default(foo=5, bar=6) + self.assertEqual(parent._stored.foo, 1) + self.assertEqual(parent._stored.bar, 4) + # TODO: jam 2020-01-30 is there a clean way to tell that + # parent._stored._data.dirty is False? + + +class GenericObserver(Object): + """Generic observer for the tests.""" + + def __init__(self, parent, key): + super().__init__(parent, key) + self.called = False + + def callback_method(self, event): + """Set the instance .called to True.""" + self.called = True + + +@patch('sys.stderr', new_callable=io.StringIO) +class BreakpointTests(BaseTestCase): + + def setUp(self): + super().setUp() + logassert.setup(self, 'ops') + + def test_ignored(self, fake_stderr): + # It doesn't do anything really unless proper environment is there. + with patch.dict(os.environ): + os.environ.pop('JUJU_DEBUG_AT', None) + framework = self.create_framework() + + with patch('pdb.Pdb.set_trace') as mock: + framework.breakpoint() + self.assertEqual(mock.call_count, 0) + self.assertEqual(fake_stderr.getvalue(), "") + self.assertNotLoggedWarning("Breakpoint", "skipped") + + def test_pdb_properly_called(self, fake_stderr): + # The debugger needs to leave the user in the frame where the breakpoint is executed, + # which for the test is the frame we're calling it here in the test :). + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + framework = self.create_framework() + + with patch('pdb.Pdb.set_trace') as mock: + this_frame = inspect.currentframe() + framework.breakpoint() + + self.assertEqual(mock.call_count, 1) + self.assertEqual(mock.call_args, ((this_frame,), {})) + + def test_welcome_message(self, fake_stderr): + # Check that an initial message is shown to the user when code is interrupted. + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + framework = self.create_framework() + with patch('pdb.Pdb.set_trace'): + framework.breakpoint() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + + def test_welcome_message_not_multiple(self, fake_stderr): + # Check that an initial message is NOT shown twice if the breakpoint is exercised + # twice in the same run. + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + framework = self.create_framework() + with patch('pdb.Pdb.set_trace'): + framework.breakpoint() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + framework.breakpoint() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + + def test_builtin_breakpoint_hooked(self, fake_stderr): + # Verify that the proper hook is set. + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}): + self.create_framework() # creating the framework setups the hook + with patch('pdb.Pdb.set_trace') as mock: + # Calling through sys, not breakpoint() directly, so we can run the + # tests with Py < 3.7. + sys.breakpointhook() + self.assertEqual(mock.call_count, 1) + + def test_breakpoint_names(self, fake_stderr): + framework = self.create_framework() + + # Name rules: + # - must start and end with lowercase alphanumeric characters + # - only contain lowercase alphanumeric characters, or the hyphen "-" + good_names = [ + 'foobar', + 'foo-bar-baz', + 'foo-------bar', + 'foo123', + '778', + '77-xx', + 'a-b', + 'ab', + 'x', + ] + for name in good_names: + with self.subTest(name=name): + framework.breakpoint(name) + + bad_names = [ + '', + '.', + '-', + '...foo', + 'foo.bar', + 'bar--' + 'FOO', + 'FooBar', + 'foo bar', + 'foo_bar', + '/foobar', + 'break-here-☚', + ] + msg = 'breakpoint names must look like "foo" or "foo-bar"' + for name in bad_names: + with self.subTest(name=name): + with self.assertRaises(ValueError) as cm: + framework.breakpoint(name) + self.assertEqual(str(cm.exception), msg) + + reserved_names = [ + 'all', + 'hook', + ] + msg = 'breakpoint names "all" and "hook" are reserved' + for name in reserved_names: + with self.subTest(name=name): + with self.assertRaises(ValueError) as cm: + framework.breakpoint(name) + self.assertEqual(str(cm.exception), msg) + + not_really_names = [ + 123, + 1.1, + False, + ] + for name in not_really_names: + with self.subTest(name=name): + with self.assertRaises(TypeError) as cm: + framework.breakpoint(name) + self.assertEqual(str(cm.exception), 'breakpoint names must be strings') + + def check_trace_set(self, envvar_value, breakpoint_name, call_count): + """Helper to check the diverse combinations of situations.""" + with patch.dict(os.environ, {'JUJU_DEBUG_AT': envvar_value}): + framework = self.create_framework() + with patch('pdb.Pdb.set_trace') as mock: + framework.breakpoint(breakpoint_name) + self.assertEqual(mock.call_count, call_count) + + def test_unnamed_indicated_all(self, fake_stderr): + # If 'all' is indicated, unnamed breakpoints will always activate. + self.check_trace_set('all', None, 1) + + def test_unnamed_indicated_hook(self, fake_stderr): + # Special value 'hook' was indicated, nothing to do with any call. + self.check_trace_set('hook', None, 0) + + def test_named_indicated_specifically(self, fake_stderr): + # Some breakpoint was indicated, and the framework call used exactly that name. + self.check_trace_set('mybreak', 'mybreak', 1) + + def test_named_indicated_unnamed(self, fake_stderr): + # Some breakpoint was indicated, but the framework call was unnamed + self.check_trace_set('some-breakpoint', None, 0) + self.assertLoggedWarning( + "Breakpoint None skipped", + "not found in the requested breakpoints: ['some-breakpoint']") + + def test_named_indicated_somethingelse(self, fake_stderr): + # Some breakpoint was indicated, but the framework call was with a different name + self.check_trace_set('some-breakpoint', 'other-name', 0) + self.assertLoggedWarning( + "Breakpoint 'other-name' skipped", + "not found in the requested breakpoints: ['some-breakpoint']") + + def test_named_indicated_ingroup(self, fake_stderr): + # A multiple breakpoint was indicated, and the framework call used a name among those. + self.check_trace_set('some,mybreak,foobar', 'mybreak', 1) + + def test_named_indicated_all(self, fake_stderr): + # The framework indicated 'all', which includes any named breakpoint set. + self.check_trace_set('all', 'mybreak', 1) + + def test_named_indicated_hook(self, fake_stderr): + # The framework indicated the special value 'hook', nothing to do with any named call. + self.check_trace_set('hook', 'mybreak', 0) + + +class DebugHookTests(BaseTestCase): + + def test_envvar_parsing_missing(self): + with patch.dict(os.environ): + os.environ.pop('JUJU_DEBUG_AT', None) + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ()) + + def test_envvar_parsing_empty(self): + with patch.dict(os.environ, {'JUJU_DEBUG_AT': ''}): + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ()) + + def test_envvar_parsing_simple(self): + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'hook'}): + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ['hook']) + + def test_envvar_parsing_multiple(self): + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'foo,bar,all'}): + framework = self.create_framework() + self.assertEqual(framework._juju_debug_at, ['foo', 'bar', 'all']) + + def test_basic_interruption_enabled(self): + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr: + with patch('pdb.runcall') as mock: + publisher.install.emit() + + # Check that the pdb module was used correctly and that the callback method was NOT + # called (as we intercepted the normal pdb behaviour! this is to check that the + # framework didn't call the callback directly) + self.assertEqual(mock.call_count, 1) + expected_callback, expected_event = mock.call_args[0] + self.assertEqual(expected_callback, observer.callback_method) + self.assertIsInstance(expected_event, EventBase) + self.assertFalse(observer.called) + + # Verify proper message was given to the user. + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + + def test_actions_are_interrupted(self): + test_model = self.create_model() + framework = self.create_framework(model=test_model) + framework._juju_debug_at = ['hook'] + + class CustomEvents(ObjectEvents): + foobar_action = EventSource(charm.ActionEvent) + + publisher = CustomEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.foobar_action, observer.callback_method) + fake_script(self, 'action-get', "echo {}") + + with patch('sys.stderr', new_callable=io.StringIO): + with patch('pdb.runcall') as mock: + with patch.dict(os.environ, {'JUJU_ACTION_NAME': 'foobar'}): + publisher.foobar_action.emit() + + self.assertEqual(mock.call_count, 1) + self.assertFalse(observer.called) + + def test_internal_events_not_interrupted(self): + class MyNotifier(Object): + """Generic notifier for the tests.""" + bar = EventSource(EventBase) + + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = MyNotifier(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.bar, observer.callback_method) + + with patch('pdb.runcall') as mock: + publisher.bar.emit() + + self.assertEqual(mock.call_count, 0) + self.assertTrue(observer.called) + + def test_envvar_mixed(self): + framework = self.create_framework() + framework._juju_debug_at = ['foo', 'hook', 'all', 'whatever'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('sys.stderr', new_callable=io.StringIO): + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 1) + self.assertFalse(observer.called) + + def test_no_registered_method(self): + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 0) + self.assertFalse(observer.called) + + def test_envvar_nohook(self): + framework = self.create_framework() + framework._juju_debug_at = ['something-else'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'something-else'}): + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 0) + self.assertTrue(observer.called) + + def test_envvar_missing(self): + framework = self.create_framework() + framework._juju_debug_at = () + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('pdb.runcall') as mock: + publisher.install.emit() + + self.assertEqual(mock.call_count, 0) + self.assertTrue(observer.called) + + def test_welcome_message_not_multiple(self): + framework = self.create_framework() + framework._juju_debug_at = ['hook'] + + publisher = charm.CharmEvents(framework, "1") + observer = GenericObserver(framework, "1") + framework.observe(publisher.install, observer.callback_method) + + with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr: + with patch('pdb.runcall') as mock: + publisher.install.emit() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + publisher.install.emit() + self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE) + self.assertEqual(mock.call_count, 2) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_helpers.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_helpers.py new file mode 100755 index 0000000000000000000000000000000000000000..f7c6fc29a60362940d4e0259351d1d53eb9dbe93 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_helpers.py @@ -0,0 +1,121 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pathlib +import subprocess +import shutil +import tempfile +import unittest + +from ops.framework import Framework +from ops.model import Model, _ModelBackend +from ops.charm import CharmMeta +from ops.storage import SQLiteStorage + + +def fake_script(test_case, name, content): + if not hasattr(test_case, 'fake_script_path'): + fake_script_path = tempfile.mkdtemp('-fake_script') + os.environ['PATH'] = '{}:{}'.format(fake_script_path, os.environ["PATH"]) + + def cleanup(): + shutil.rmtree(fake_script_path) + os.environ['PATH'] = os.environ['PATH'].replace(fake_script_path + ':', '') + + test_case.addCleanup(cleanup) + test_case.fake_script_path = pathlib.Path(fake_script_path) + + template_args = { + 'name': name, + 'path': test_case.fake_script_path, + 'content': content, + } + + with (test_case.fake_script_path / name).open('wt') as f: + # Before executing the provided script, dump the provided arguments in calls.txt. + # ASCII 1E is RS 'record separator', and 1C is FS 'file separator', which seem appropriate. + f.write('''#!/bin/sh +{{ printf {name}; printf "\\036%s" "$@"; printf "\\034"; }} >> {path}/calls.txt +{content}'''.format_map(template_args)) + os.chmod(str(test_case.fake_script_path / name), 0o755) + + +def fake_script_calls(test_case, clear=False): + try: + with (test_case.fake_script_path / 'calls.txt').open('r+t') as f: + calls = [line.split('\x1e') for line in f.read().split('\x1c')[:-1]] + if clear: + f.truncate(0) + return calls + except FileNotFoundError: + return [] + + +class FakeScriptTest(unittest.TestCase): + + def test_fake_script_works(self): + fake_script(self, 'foo', 'echo foo runs') + fake_script(self, 'bar', 'echo bar runs') + output = subprocess.getoutput('foo a "b c "; bar "d e" f') + self.assertEqual(output, 'foo runs\nbar runs') + self.assertEqual(fake_script_calls(self), [ + ['foo', 'a', 'b c '], + ['bar', 'd e', 'f'], + ]) + + def test_fake_script_clear(self): + fake_script(self, 'foo', 'echo foo runs') + + output = subprocess.getoutput('foo a "b c"') + self.assertEqual(output, 'foo runs') + + self.assertEqual(fake_script_calls(self, clear=True), [['foo', 'a', 'b c']]) + + fake_script(self, 'bar', 'echo bar runs') + + output = subprocess.getoutput('bar "d e" f') + self.assertEqual(output, 'bar runs') + + self.assertEqual(fake_script_calls(self, clear=True), [['bar', 'd e', 'f']]) + + self.assertEqual(fake_script_calls(self, clear=True), []) + + +class BaseTestCase(unittest.TestCase): + + def create_framework(self, *, model=None, tmpdir=None): + """Create a Framework object. + + By default operate in-memory; pass a temporary directory via the 'tmpdir' + parameter if you whish to instantiate several frameworks sharing the + same dir (e.g. for storing state). + """ + if tmpdir is None: + data_fpath = ":memory:" + charm_dir = 'non-existant' + else: + data_fpath = tmpdir / "framework.data" + charm_dir = tmpdir + + framework = Framework(SQLiteStorage(data_fpath), charm_dir, meta=None, model=model) + self.addCleanup(framework.close) + return framework + + def create_model(self): + """Create a Model object.""" + backend = _ModelBackend(unit_name='myapp/0') + meta = CharmMeta() + model = Model(meta, backend) + return model diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_infra.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_infra.py new file mode 100644 index 0000000000000000000000000000000000000000..5133b35d11be89a702558d6f961facb8884553ec --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_infra.py @@ -0,0 +1,155 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import itertools +import os +import re +import subprocess +import sys +import tempfile +import unittest +from unittest.mock import patch + +import autopep8 +from flake8.api.legacy import get_style_guide + +import ops + + +def get_python_filepaths(): + """Helper to retrieve paths of Python files.""" + python_paths = ['setup.py'] + for root in ['ops', 'test']: + for dirpath, dirnames, filenames in os.walk(root): + for filename in filenames: + if filename.endswith(".py"): + python_paths.append(os.path.join(dirpath, filename)) + return python_paths + + +class InfrastructureTests(unittest.TestCase): + + def test_pep8(self): + # verify all files are nicely styled + python_filepaths = get_python_filepaths() + style_guide = get_style_guide() + fake_stdout = io.StringIO() + with patch('sys.stdout', fake_stdout): + report = style_guide.check_files(python_filepaths) + + # if flake8 didnt' report anything, we're done + if report.total_errors == 0: + return + + # grab on which files we have issues + flake8_issues = fake_stdout.getvalue().split('\n') + broken_filepaths = {item.split(':')[0] for item in flake8_issues if item} + + # give hints to the developer on how files' style could be improved + options = autopep8.parse_args(['']) + options.aggressive = 1 + options.diff = True + options.max_line_length = 99 + + issues = [] + for filepath in broken_filepaths: + diff = autopep8.fix_file(filepath, options=options) + if diff: + issues.append(diff) + + report = ["Please fix files as suggested by autopep8:"] + issues + report += ["\n-- Original flake8 reports:"] + flake8_issues + self.fail("\n".join(report)) + + def test_quote_backslashes(self): + # ensure we're not using unneeded backslash to escape strings + issues = [] + for filepath in get_python_filepaths(): + with open(filepath, "rt", encoding="utf8") as fh: + for idx, line in enumerate(fh, 1): + if (r'\"' in line or r"\'" in line) and "NOQA" not in line: + issues.append((filepath, idx, line.rstrip())) + if issues: + msgs = ["{}:{:d}:{}".format(*issue) for issue in issues] + self.fail("Spurious backslashes found, please fix these quotings:\n" + "\n".join(msgs)) + + def test_ensure_copyright(self): + # all non-empty Python files must have a proper copyright somewhere in the first 5 lines + issues = [] + regex = re.compile(r"# Copyright \d\d\d\d(-\d\d\d\d)? Canonical Ltd.\n") + for filepath in get_python_filepaths(): + if os.stat(filepath).st_size == 0: + continue + + with open(filepath, "rt", encoding="utf8") as fh: + for line in itertools.islice(fh, 5): + if regex.match(line): + break + else: + issues.append(filepath) + if issues: + self.fail("Please add copyright headers to the following files:\n" + "\n".join(issues)) + + def _run_setup(self, *args): + proc = subprocess.run( + (sys.executable, 'setup.py') + args, + stdout=subprocess.PIPE, + check=True) + return proc.stdout.strip().decode("utf8") + + def test_setup_version(self): + setup_version = self._run_setup('--version') + + self.assertEqual(setup_version, ops.__version__) + + def test_setup_description(self): + with open("README.md", "rt", encoding="utf8") as fh: + disk_readme = fh.read().strip() + + setup_readme = self._run_setup('--long-description') + + self.assertEqual(setup_readme, disk_readme) + + def test_check(self): + self._run_setup('check', '--strict') + + +class ImportersTestCase(unittest.TestCase): + + template = "from ops import {module_name}" + + def test_imports(self): + mod_names = [ + 'charm', + 'framework', + 'main', + 'model', + 'testing', + ] + + for name in mod_names: + with self.subTest(name=name): + self.check(name) + + def check(self, name): + """Helper function to run the test.""" + _, testfile = tempfile.mkstemp() + self.addCleanup(os.unlink, testfile) + + with open(testfile, 'wt', encoding='utf8') as fh: + fh.write(self.template.format(module_name=name)) + + proc = subprocess.run([sys.executable, testfile], env={'PYTHONPATH': os.getcwd()}) + self.assertEqual(proc.returncode, 0) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_jujuversion.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_jujuversion.py new file mode 100755 index 0000000000000000000000000000000000000000..1e3821017c22b1fb07c74a089b8f678d3b055fe6 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_jujuversion.py @@ -0,0 +1,145 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +import unittest.mock # in this file, importing just 'patch' would be confusing + +from ops.jujuversion import JujuVersion + + +class TestJujuVersion(unittest.TestCase): + + def test_parsing(self): + test_cases = [ + ("0.0.0", 0, 0, '', 0, 0), + ("0.0.2", 0, 0, '', 2, 0), + ("0.1.0", 0, 1, '', 0, 0), + ("0.2.3", 0, 2, '', 3, 0), + ("10.234.3456", 10, 234, '', 3456, 0), + ("10.234.3456.1", 10, 234, '', 3456, 1), + ("1.21-alpha12", 1, 21, 'alpha', 12, 0), + ("1.21-alpha1.34", 1, 21, 'alpha', 1, 34), + ("2.7", 2, 7, '', 0, 0) + ] + + for vs, major, minor, tag, patch, build in test_cases: + v = JujuVersion(vs) + self.assertEqual(v.major, major) + self.assertEqual(v.minor, minor) + self.assertEqual(v.tag, tag) + self.assertEqual(v.patch, patch) + self.assertEqual(v.build, build) + + @unittest.mock.patch('os.environ', new={}) + def test_from_environ(self): + with self.assertRaisesRegex(RuntimeError, 'environ has no JUJU_VERSION'): + JujuVersion.from_environ() + + os.environ['JUJU_VERSION'] = 'no' + with self.assertRaisesRegex(RuntimeError, 'not a valid Juju version'): + JujuVersion.from_environ() + + os.environ['JUJU_VERSION'] = '2.8.0' + v = JujuVersion.from_environ() + self.assertEqual(v, JujuVersion('2.8.0')) + + def test_has_app_data(self): + self.assertTrue(JujuVersion('2.8.0').has_app_data()) + self.assertTrue(JujuVersion('2.7.0').has_app_data()) + self.assertFalse(JujuVersion('2.6.9').has_app_data()) + + def test_parsing_errors(self): + invalid_versions = [ + "xyz", + "foo.bar", + "foo.bar.baz", + "dead.beef.ca.fe", + "1234567890.2.1", # The major version is too long. + "0.2..1", # Two periods next to each other. + "1.21.alpha1", # Tag comes after period. + "1.21-alpha", # No patch number but a tag is present. + "1.21-alpha1beta", # Non-numeric string after the patch number. + "1.21-alpha-dev", # Tag duplication. + "1.21-alpha_dev3", # Underscore in a tag. + "1.21-alpha123dev3", # Non-numeric string after the patch number. + ] + for v in invalid_versions: + with self.assertRaises(RuntimeError): + JujuVersion(v) + + def test_equality(self): + test_cases = [ + ("1.0.0", "1.0.0", True), + ("01.0.0", "1.0.0", True), + ("10.0.0", "9.0.0", False), + ("1.0.0", "1.0.1", False), + ("1.0.1", "1.0.0", False), + ("1.0.0", "1.1.0", False), + ("1.1.0", "1.0.0", False), + ("1.0.0", "2.0.0", False), + ("1.2-alpha1", "1.2.0", False), + ("1.2-alpha2", "1.2-alpha1", False), + ("1.2-alpha2.1", "1.2-alpha2", False), + ("1.2-alpha2.2", "1.2-alpha2.1", False), + ("1.2-beta1", "1.2-alpha1", False), + ("1.2-beta1", "1.2-alpha2.1", False), + ("1.2-beta1", "1.2.0", False), + ("1.2.1", "1.2.0", False), + ("2.0.0", "1.0.0", False), + ("2.0.0.0", "2.0.0", True), + ("2.0.0.0", "2.0.0.0", True), + ("2.0.0.1", "2.0.0.0", False), + ("2.0.1.10", "2.0.0.0", False), + ] + + for a, b, expected in test_cases: + self.assertEqual(JujuVersion(a) == JujuVersion(b), expected) + self.assertEqual(JujuVersion(a) == b, expected) + + def test_comparison(self): + test_cases = [ + ("1.0.0", "1.0.0", False, True), + ("01.0.0", "1.0.0", False, True), + ("10.0.0", "9.0.0", False, False), + ("1.0.0", "1.0.1", True, True), + ("1.0.1", "1.0.0", False, False), + ("1.0.0", "1.1.0", True, True), + ("1.1.0", "1.0.0", False, False), + ("1.0.0", "2.0.0", True, True), + ("1.2-alpha1", "1.2.0", True, True), + ("1.2-alpha2", "1.2-alpha1", False, False), + ("1.2-alpha2.1", "1.2-alpha2", False, False), + ("1.2-alpha2.2", "1.2-alpha2.1", False, False), + ("1.2-beta1", "1.2-alpha1", False, False), + ("1.2-beta1", "1.2-alpha2.1", False, False), + ("1.2-beta1", "1.2.0", True, True), + ("1.2.1", "1.2.0", False, False), + ("2.0.0", "1.0.0", False, False), + ("2.0.0.0", "2.0.0", False, True), + ("2.0.0.0", "2.0.0.0", False, True), + ("2.0.0.1", "2.0.0.0", False, False), + ("2.0.1.10", "2.0.0.0", False, False), + ] + + for a, b, expected_strict, expected_weak in test_cases: + self.assertEqual(JujuVersion(a) < JujuVersion(b), expected_strict) + self.assertEqual(JujuVersion(a) <= JujuVersion(b), expected_weak) + self.assertEqual(JujuVersion(b) > JujuVersion(a), expected_strict) + self.assertEqual(JujuVersion(b) >= JujuVersion(a), expected_weak) + # Implicit conversion. + self.assertEqual(JujuVersion(a) < b, expected_strict) + self.assertEqual(JujuVersion(a) <= b, expected_weak) + self.assertEqual(b > JujuVersion(a), expected_strict) + self.assertEqual(b >= JujuVersion(a), expected_weak) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_lib.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..7e9af8565449b2ff1ef13c905d8e2e201a7cefad --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_lib.py @@ -0,0 +1,545 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from importlib.machinery import ModuleSpec +from pathlib import Path +from tempfile import mkdtemp, mkstemp +from unittest import TestCase +from unittest.mock import patch +from random import shuffle +from shutil import rmtree +from textwrap import dedent + +import ops.lib + + +def _mklib(topdir: str, pkgname: str, libname: str) -> Path: + """Make a for-testing library. + + Args: + topdir: the toplevel directory in which the package will be created. + This directory must already exist. + pkgname: the name of the package to create in the toplevel directory. + this package will have an empty __init__.py. + libname: the name of the library directory to create under the package. + + Returns: + a :class:`Path` to the ``__init__.py`` of the created library. + This file will not have been created yet. + """ + pkg = Path(topdir) / pkgname + try: + pkg.mkdir() + except FileExistsError: + pass + else: + (pkg / '__init__.py').write_text('') + + lib = pkg / 'opslib' / libname + lib.mkdir(parents=True) + + return lib / '__init__.py' + + +def _flatten(specgen): + return sorted([os.path.dirname(spec.origin) for spec in specgen]) + + +class TestLibFinder(TestCase): + def _mkdtemp(self) -> str: + tmpdir = mkdtemp() + self.addCleanup(rmtree, tmpdir) + return tmpdir + + def test_single(self): + tmpdir = self._mkdtemp() + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + _mklib(tmpdir, "foo", "bar").write_text("") + + self.assertEqual( + _flatten(ops.lib._find_all_specs([tmpdir])), + [tmpdir + '/foo/opslib/bar']) + + def test_multi(self): + tmpdirA = self._mkdtemp() + tmpdirB = self._mkdtemp() + + if tmpdirA > tmpdirB: + # keep sorting happy + tmpdirA, tmpdirB = tmpdirB, tmpdirA + + dirs = [tmpdirA, tmpdirB] + + for top in [tmpdirA, tmpdirB]: + for pkg in ["bar", "baz"]: + for lib in ["meep", "quux"]: + _mklib(top, pkg, lib).write_text("") + + expected = [ + os.path.join(tmpdirA, "bar", "opslib", "meep"), + os.path.join(tmpdirA, "bar", "opslib", "quux"), + os.path.join(tmpdirA, "baz", "opslib", "meep"), + os.path.join(tmpdirA, "baz", "opslib", "quux"), + os.path.join(tmpdirB, "bar", "opslib", "meep"), + os.path.join(tmpdirB, "bar", "opslib", "quux"), + os.path.join(tmpdirB, "baz", "opslib", "meep"), + os.path.join(tmpdirB, "baz", "opslib", "quux"), + ] + + self.assertEqual(_flatten(ops.lib._find_all_specs(dirs)), expected) + + def test_cwd(self): + tmpcwd = self._mkdtemp() + cwd = os.getcwd() + os.chdir(tmpcwd) + self.addCleanup(os.chdir, cwd) + + dirs = [""] + + self.assertEqual(list(ops.lib._find_all_specs(dirs)), []) + + _mklib(tmpcwd, "foo", "bar").write_text("") + + self.assertEqual( + _flatten(ops.lib._find_all_specs(dirs)), + ['./foo/opslib/bar']) + + def test_bogus_topdir(self): + """Check that having one bogus dir in sys.path doesn't cause the finder to abort.""" + tmpdir = self._mkdtemp() + + dirs = [tmpdir, "/bogus"] + + self.assertEqual(list(ops.lib._find_all_specs(dirs)), []) + + _mklib(tmpdir, "foo", "bar").write_text("") + + self.assertEqual( + _flatten(ops.lib._find_all_specs(dirs)), + [tmpdir + '/foo/opslib/bar']) + + def test_bogus_opsdir(self): + """Check that having one bogus opslib doesn't cause the finder to abort.""" + + tmpdir = self._mkdtemp() + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + _mklib(tmpdir, "foo", "bar").write_text('') + + path = Path(tmpdir) / 'baz' + path.mkdir() + (path / 'opslib').write_text('') + + self.assertEqual( + _flatten(ops.lib._find_all_specs([tmpdir])), + [tmpdir + '/foo/opslib/bar']) + + def test_namespace(self): + """Check that namespace packages are ignored.""" + tmpdir = self._mkdtemp() + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + _mklib(tmpdir, "foo", "bar") # no __init__.py => a namespace package + + self.assertEqual(list(ops.lib._find_all_specs([tmpdir])), []) + + +class TestLibParser(TestCase): + def _mkmod(self, name: str, content: str = None) -> ModuleSpec: + fd, fname = mkstemp(text=True) + self.addCleanup(os.unlink, fname) + if content is not None: + with os.fdopen(fd, mode='wt', closefd=False) as f: + f.write(dedent(content)) + os.close(fd) + return ModuleSpec(name=name, loader=None, origin=fname) + + def test_simple(self): + """Check that we can load a reasonably straightforward lib""" + m = self._mkmod('foo', ''' + LIBNAME = "foo" + LIBEACH = float('-inf') + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + LIBANANA = True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice@example.com, API 2, patch 42>') + + def test_libauthor_has_dashes(self): + m = self._mkmod('foo', ''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice-someone@example.com" + LIBANANA = True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice-someone@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice-someone@example.com, API 2, patch 42>') + + def test_lib_definitions_without_spaces(self): + m = self._mkmod('foo', ''' + LIBNAME="foo" + LIBAPI=2 + LIBPATCH=42 + LIBAUTHOR="alice@example.com" + LIBANANA=True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice@example.com, API 2, patch 42>') + + def test_lib_definitions_trailing_comments(self): + m = self._mkmod('foo', ''' + LIBNAME = "foo" # comment style 1 + LIBAPI = 2 = comment style 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com"anything after the quote is a comment + LIBANANA = True + ''') + lib = ops.lib._parse_lib(m) + self.assertEqual(lib, ops.lib._Lib(None, "foo", "alice@example.com", 2, 42)) + # also check the repr while we're at it + self.assertEqual(repr(lib), '<_Lib foo by alice@example.com, API 2, patch 42>') + + def test_incomplete(self): + """Check that if anything is missing, nothing is returned""" + m = self._mkmod('foo', ''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_too_long(self): + """Check that if the file is too long, nothing is returned""" + m = self._mkmod('foo', '\n' * ops.lib._MAX_LIB_LINES + ''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_no_origin(self): + """Check that _parse_lib doesn't choke when given a spec with no origin""" + # 'just don't crash' + lib = ops.lib._parse_lib(ModuleSpec(name='hi', loader=None, origin=None)) + self.assertIsNone(lib) + + def test_bogus_origin(self): + """Check that if the origin is messed up, we don't crash""" + # 'just don't crash' + lib = ops.lib._parse_lib(ModuleSpec(name='hi', loader=None, origin='/')) + self.assertIsNone(lib) + + def test_bogus_lib(self): + """Check our behaviour when the lib is messed up""" + # note the syntax error (that is carefully chosen to pass the initial regexp) + m = self._mkmod('foo', ''' + LIBNAME = "1' + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_name_is_number(self): + """Check our behaviour when the name in the lib is a number""" + m = self._mkmod('foo', ''' + LIBNAME = 1 + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_api_is_string(self): + """Check our behaviour when the api in the lib is a string""" + m = self._mkmod('foo', ''' + LIBNAME = 'foo' + LIBAPI = '2' + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_patch_is_string(self): + """Check our behaviour when the patch in the lib is a string""" + m = self._mkmod('foo', ''' + LIBNAME = 'foo' + LIBAPI = 2 + LIBPATCH = '42' + LIBAUTHOR = "alice@example.com" + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_author_is_number(self): + """Check our behaviour when the author in the lib is a number""" + m = self._mkmod('foo', ''' + LIBNAME = 'foo' + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = 43 + ''') + self.assertIsNone(ops.lib._parse_lib(m)) + + def test_other_encoding(self): + """Check that we don't crash when a library is not UTF-8""" + m = self._mkmod('foo') + with open(m.origin, 'wt', encoding='latin-1') as f: + f.write(dedent(''' + LIBNAME = "foo" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + LIBANANA = "Ñoño" + ''')) + self.assertIsNone(ops.lib._parse_lib(m)) + + +class TestLib(TestCase): + + def test_lib_comparison(self): + self.assertNotEqual( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 0), + ops.lib._Lib(None, "bar", "bob@example.com", 0, 1)) + self.assertEqual( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + + self.assertLess( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 0), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + self.assertLess( + ops.lib._Lib(None, "foo", "alice@example.com", 0, 1), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + self.assertLess( + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1), + ops.lib._Lib(None, "foo", "bob@example.com", 1, 1)) + self.assertLess( + ops.lib._Lib(None, "bar", "alice@example.com", 1, 1), + ops.lib._Lib(None, "foo", "alice@example.com", 1, 1)) + + with self.assertRaises(TypeError): + 42 < ops.lib._Lib(None, "bar", "alice@example.com", 1, 1) + with self.assertRaises(TypeError): + ops.lib._Lib(None, "bar", "alice@example.com", 1, 1) < 42 + + # these two might be surprising in that they don't raise an exception, + # but they are correct: our __eq__ bailing means Python falls back to + # its default of checking object identity. + self.assertNotEqual(ops.lib._Lib(None, "bar", "alice@example.com", 1, 1), 42) + self.assertNotEqual(42, ops.lib._Lib(None, "bar", "alice@example.com", 1, 1)) + + def test_lib_order(self): + a = ops.lib._Lib(None, "bar", "alice@example.com", 1, 0) + b = ops.lib._Lib(None, "bar", "alice@example.com", 1, 1) + c = ops.lib._Lib(None, "foo", "alice@example.com", 1, 0) + d = ops.lib._Lib(None, "foo", "alice@example.com", 1, 1) + e = ops.lib._Lib(None, "foo", "bob@example.com", 1, 1) + + for i in range(20): + with self.subTest(i): + libs = [a, b, c, d, e] + shuffle(libs) + self.assertEqual(sorted(libs), [a, b, c, d, e]) + + def test_use_bad_args_types(self): + with self.assertRaises(TypeError): + ops.lib.use(1, 2, 'bob@example.com') + with self.assertRaises(TypeError): + ops.lib.use('foo', '2', 'bob@example.com') + with self.assertRaises(TypeError): + ops.lib.use('foo', 2, ops.lib.use) + + def test_use_bad_args_values(self): + with self.assertRaises(ValueError): + ops.lib.use('--help', 2, 'alice@example.com') + with self.assertRaises(ValueError): + ops.lib.use('foo', -2, 'alice@example.com') + with self.assertRaises(ValueError): + ops.lib.use('foo', 1, 'example.com') + + +@patch('sys.path', new=()) +class TestLibFunctional(TestCase): + + def _mkdtemp(self) -> str: + tmpdir = mkdtemp() + self.addCleanup(rmtree, tmpdir) + return tmpdir + + def test_use_finds_subs(self): + """Test that ops.lib.use("baz") works when baz is inside a package in the python path.""" + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + """)) + + # autoimport to reset things + ops.lib.autoimport() + + # ops.lib.use done by charm author + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBNAME, 'baz') + self.assertEqual(baz.LIBAPI, 2) + self.assertEqual(baz.LIBPATCH, 42) + self.assertEqual(baz.LIBAUTHOR, 'alice@example.com') + + def test_use_finds_best_same_toplevel(self): + """Test that ops.lib.use("baz") works when there are two baz in the same toplevel.""" + + pkg_b = "foo" + lib_b = "bar" + patch_b = 40 + for pkg_a in ["foo", "fooA"]: + for lib_a in ["bar", "barA"]: + if (pkg_a, lib_a) == (pkg_b, lib_b): + # everything-is-the-same :-) + continue + for patch_a in [38, 42]: + desc = "A: {}/{}/{}; B: {}/{}/{}".format( + pkg_a, lib_a, patch_a, pkg_b, lib_b, patch_b) + with self.subTest(desc): + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, pkg_a, lib_a).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_a)) + + _mklib(tmpdir, pkg_b, lib_b).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_b)) + + # autoimport to reset things + ops.lib.autoimport() + + # ops.lib.use done by charm author + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBNAME, 'baz') + self.assertEqual(baz.LIBAPI, 2) + self.assertEqual(baz.LIBPATCH, max(patch_a, patch_b)) + self.assertEqual(baz.LIBAUTHOR, 'alice@example.com') + + def test_use_finds_best_diff_toplevel(self): + """Test that ops.lib.use("baz") works when there are two baz in the different toplevels.""" + + pkg_b = "foo" + lib_b = "bar" + patch_b = 40 + for pkg_a in ["foo", "fooA"]: + for lib_a in ["bar", "barA"]: + for patch_a in [38, 42]: + desc = "A: {}/{}/{}; B: {}/{}/{}".format( + pkg_a, lib_a, patch_a, pkg_b, lib_b, patch_b) + with self.subTest(desc): + tmpdirA = self._mkdtemp() + tmpdirB = self._mkdtemp() + sys.path = [tmpdirA, tmpdirB] + + _mklib(tmpdirA, pkg_a, lib_a).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_a)) + + _mklib(tmpdirB, pkg_b, lib_b).write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = {} + LIBAUTHOR = "alice@example.com" + """).format(patch_b)) + + # autoimport to reset things + ops.lib.autoimport() + + # ops.lib.use done by charm author + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBNAME, 'baz') + self.assertEqual(baz.LIBAPI, 2) + self.assertEqual(baz.LIBPATCH, max(patch_a, patch_b)) + self.assertEqual(baz.LIBAUTHOR, 'alice@example.com') + + def test_none_found(self): + with self.assertRaises(ImportError): + ops.lib.use('foo', 1, 'alice@example.com') + + def test_from_scratch(self): + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + """)) + + # hard reset + ops.lib._libraries = None + + # sanity check that ops.lib.use works + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBAPI, 2) + + def test_others_found(self): + tmpdir = self._mkdtemp() + sys.path = [tmpdir] + + _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + LIBNAME = "baz" + LIBAPI = 2 + LIBPATCH = 42 + LIBAUTHOR = "alice@example.com" + """)) + + # reload + ops.lib.autoimport() + + # sanity check that ops.lib.use works + baz = ops.lib.use('baz', 2, 'alice@example.com') + self.assertEqual(baz.LIBAPI, 2) + + with self.assertRaises(ImportError): + ops.lib.use('baz', 1, 'alice@example.com') + + with self.assertRaises(ImportError): + ops.lib.use('baz', 2, 'bob@example.com') diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_log.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_log.py new file mode 100644 index 0000000000000000000000000000000000000000..b7f74d5c901ffb7833c1e5523f9f69a1959999e1 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_log.py @@ -0,0 +1,140 @@ +#!/usr/bin/python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import unittest +from unittest.mock import patch +import importlib + +import logging +import ops.log + + +class FakeModelBackend: + + def __init__(self): + self._calls = [] + + def calls(self, clear=False): + calls = self._calls + if clear: + self._calls = [] + return calls + + def juju_log(self, message, level): + self._calls.append((message, level)) + + +def reset_logging(): + logging.shutdown() + importlib.reload(logging) + + +class TestLogging(unittest.TestCase): + + def setUp(self): + self.backend = FakeModelBackend() + + reset_logging() + self.addCleanup(reset_logging) + + def test_default_logging(self): + ops.log.setup_root_logging(self.backend) + + logger = logging.getLogger() + self.assertEqual(logger.level, logging.DEBUG) + self.assertIsInstance(logger.handlers[0], ops.log.JujuLogHandler) + + test_cases = [( + lambda: logger.critical('critical'), [('CRITICAL', 'critical')] + ), ( + lambda: logger.error('error'), [('ERROR', 'error')] + ), ( + lambda: logger.warning('warning'), [('WARNING', 'warning')] + ), ( + lambda: logger.info('info'), [('INFO', 'info')] + ), ( + lambda: logger.debug('debug'), [('DEBUG', 'debug')] + )] + + for do, res in test_cases: + do() + calls = self.backend.calls(clear=True) + self.assertEqual(calls, res) + + def test_handler_filtering(self): + logger = logging.getLogger() + logger.setLevel(logging.INFO) + logger.addHandler(ops.log.JujuLogHandler(self.backend, logging.WARNING)) + logger.info('foo') + self.assertEqual(self.backend.calls(), []) + logger.warning('bar') + self.assertEqual(self.backend.calls(), [('WARNING', 'bar')]) + + def test_no_stderr_without_debug(self): + buffer = io.StringIO() + with patch('sys.stderr', buffer): + ops.log.setup_root_logging(self.backend, debug=False) + logger = logging.getLogger() + logger.debug('debug message') + logger.info('info message') + logger.warning('warning message') + logger.critical('critical message') + self.assertEqual( + self.backend.calls(), + [('DEBUG', 'debug message'), + ('INFO', 'info message'), + ('WARNING', 'warning message'), + ('CRITICAL', 'critical message'), + ]) + self.assertEqual(buffer.getvalue(), "") + + def test_debug_logging(self): + buffer = io.StringIO() + with patch('sys.stderr', buffer): + ops.log.setup_root_logging(self.backend, debug=True) + logger = logging.getLogger() + logger.debug('debug message') + logger.info('info message') + logger.warning('warning message') + logger.critical('critical message') + self.assertEqual( + self.backend.calls(), + [('DEBUG', 'debug message'), + ('INFO', 'info message'), + ('WARNING', 'warning message'), + ('CRITICAL', 'critical message'), + ]) + self.assertRegex( + buffer.getvalue(), + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG debug message\n" + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO info message\n" + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING warning message\n" + r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL critical message\n" + ) + + def test_reduced_logging(self): + ops.log.setup_root_logging(self.backend) + logger = logging.getLogger() + logger.setLevel(logging.WARNING) + logger.debug('debug') + logger.info('info') + logger.warning('warning') + self.assertEqual(self.backend.calls(), [('WARNING', 'warning')]) + + +if __name__ == '__main__': + unittest.main() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_main.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_main.py new file mode 100755 index 0000000000000000000000000000000000000000..f62c859332d672fb4140488f2c9f1ad3841b30eb --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_main.py @@ -0,0 +1,878 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import base64 +import logging +import os +import pickle +import shutil +import subprocess +import sys +import tempfile +import unittest +import importlib.util +import warnings +from pathlib import Path +from unittest.mock import patch + +from ops.charm import ( + CharmBase, + CharmEvents, + HookEvent, + InstallEvent, + StartEvent, + ConfigChangedEvent, + UpgradeCharmEvent, + UpdateStatusEvent, + LeaderSettingsChangedEvent, + RelationJoinedEvent, + RelationChangedEvent, + RelationDepartedEvent, + RelationBrokenEvent, + RelationEvent, + StorageAttachedEvent, + ActionEvent, + CollectMetricsEvent, +) +from ops.main import main +from ops.version import version + +from .test_helpers import fake_script, fake_script_calls + +# This relies on the expected repository structure to find a path to +# source of the charm under test. +TEST_CHARM_DIR = Path(__file__ + '/../charms/test_main').resolve() + +VERSION_LOGLINE = [ + 'juju-log', '--log-level', 'DEBUG', + 'Operator Framework {} up and running.'.format(version), +] +logger = logging.getLogger(__name__) + + +class SymlinkTargetError(Exception): + pass + + +class EventSpec: + def __init__(self, event_type, event_name, env_var=None, + relation_id=None, remote_app=None, remote_unit=None, + charm_config=None, model_name=None): + self.event_type = event_type + self.event_name = event_name + self.env_var = env_var + self.relation_id = relation_id + self.remote_app = remote_app + self.remote_unit = remote_unit + self.charm_config = charm_config + self.model_name = model_name + + +class CharmInitTestCase(unittest.TestCase): + + def _check(self, charm_class): + """Helper for below tests.""" + fake_environ = { + 'JUJU_UNIT_NAME': 'test_main/0', + 'JUJU_MODEL_NAME': 'mymodel', + } + with patch.dict(os.environ, fake_environ): + with patch('ops.main._emit_charm_event'): + with patch('ops.main._get_charm_dir') as mock_charmdir: + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = Path(tmpdirname) + fake_metadata = tmpdirname / 'metadata.yaml' + with fake_metadata.open('wb') as fh: + fh.write(b'name: test') + mock_charmdir.return_value = tmpdirname + + with warnings.catch_warnings(record=True) as warnings_cm: + main(charm_class) + + return warnings_cm + + def test_init_signature_passthrough(self): + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + warn_cm = self._check(MyCharm) + self.assertFalse(warn_cm) + + def test_init_signature_both_arguments(self): + class MyCharm(CharmBase): + + def __init__(self, framework, somekey): + super().__init__(framework, somekey) + + warn_cm = self._check(MyCharm) + self.assertEqual(len(warn_cm), 1) + (warn,) = warn_cm + self.assertTrue(issubclass(warn.category, DeprecationWarning)) + self.assertEqual(str(warn.message), ( + "the second argument, 'key', has been deprecated and will be removed " + "after the 0.7 release")) + + def test_init_signature_only_framework(self): + class MyCharm(CharmBase): + + def __init__(self, framework): + super().__init__(framework) + + warn_cm = self._check(MyCharm) + self.assertFalse(warn_cm) + + +class _TestMain(abc.ABC): + + @abc.abstractmethod + def _setup_entry_point(self, directory, entry_point): + """Set up the given entry point in the given directory. + + If not using dispatch, that would be a symlink / + pointing at src/charm.py; if using dispatch that would be the dispatch + symlink. It could also not be a symlink... + """ + return NotImplemented + + @abc.abstractmethod + def _call_event(self, rel_path, env): + """Set up the environment and call (i.e. run) the given event.""" + return NotImplemented + + @abc.abstractmethod + def test_setup_event_links(self): + """Test auto-creation of symlinks caused by initial events. + + Depending on the combination of dispatch and non-dispatch, this should + be checking for the creation or the _lack_ of creation, as appropriate. + """ + return NotImplemented + + def setUp(self): + self._setup_charm_dir() + + _, tmp_file = tempfile.mkstemp(dir=str(self._tmpdir)) + self._state_file = Path(tmp_file) + + # Relations events are defined dynamically and modify the class attributes. + # We use a subclass temporarily to prevent these side effects from leaking. + class TestCharmEvents(CharmEvents): + pass + CharmBase.on = TestCharmEvents() + + def cleanup(): + CharmBase.on = CharmEvents() + self.addCleanup(cleanup) + + fake_script(self, 'juju-log', "exit 0") + + # set to something other than None for tests that care + self.stdout = None + self.stderr = None + + def _setup_charm_dir(self): + self._tmpdir = Path(tempfile.mkdtemp(prefix='tmp-ops-test-')) + self.addCleanup(shutil.rmtree, str(self._tmpdir)) + self.JUJU_CHARM_DIR = self._tmpdir / 'test_main' + self.hooks_dir = self.JUJU_CHARM_DIR / 'hooks' + charm_path = str(self.JUJU_CHARM_DIR / 'src/charm.py') + self.charm_exec_path = os.path.relpath(charm_path, + str(self.hooks_dir)) + shutil.copytree(str(TEST_CHARM_DIR), str(self.JUJU_CHARM_DIR)) + + charm_spec = importlib.util.spec_from_file_location("charm", charm_path) + self.charm_module = importlib.util.module_from_spec(charm_spec) + charm_spec.loader.exec_module(self.charm_module) + + self._prepare_initial_hooks() + + def _prepare_initial_hooks(self): + initial_hooks = ('install', 'start', 'upgrade-charm', 'disks-storage-attached') + self.hooks_dir.mkdir() + for hook in initial_hooks: + self._setup_entry_point(self.hooks_dir, hook) + + def _prepare_actions(self): + # TODO: jam 2020-06-16 this same work could be done just triggering the 'install' event + # of the charm, it might be cleaner to not set up entry points directly here. + actions_dir_name = 'actions' + actions_dir = self.JUJU_CHARM_DIR / actions_dir_name + actions_dir.mkdir() + for action_name in ('start', 'foo-bar', 'get-model-name', 'get-status'): + self._setup_entry_point(actions_dir, action_name) + + def _read_and_clear_state(self): + state = None + if self._state_file.stat().st_size: + with self._state_file.open('r+b') as state_file: + state = pickle.load(state_file) + state_file.truncate(0) + return state + + def _simulate_event(self, event_spec): + ppath = Path(__file__).parent + pypath = str(ppath.parent) + if 'PYTHONPATH' in os.environ: + pypath += ':' + os.environ['PYTHONPATH'] + env = { + 'PATH': "{}:{}".format(ppath / 'bin', os.environ['PATH']), + 'PYTHONPATH': pypath, + 'JUJU_CHARM_DIR': str(self.JUJU_CHARM_DIR), + 'JUJU_UNIT_NAME': 'test_main/0', + 'CHARM_CONFIG': event_spec.charm_config, + } + if issubclass(event_spec.event_type, RelationEvent): + rel_name = event_spec.event_name.split('_')[0] + env.update({ + 'JUJU_RELATION': rel_name, + 'JUJU_RELATION_ID': str(event_spec.relation_id), + }) + remote_app = event_spec.remote_app + # For juju < 2.7 app name is extracted from JUJU_REMOTE_UNIT. + if remote_app is not None: + env['JUJU_REMOTE_APP'] = remote_app + + remote_unit = event_spec.remote_unit + if remote_unit is None: + remote_unit = '' + + env['JUJU_REMOTE_UNIT'] = remote_unit + else: + env.update({ + 'JUJU_REMOTE_UNIT': '', + 'JUJU_REMOTE_APP': '', + }) + if issubclass(event_spec.event_type, ActionEvent): + event_filename = event_spec.event_name[:-len('_action')].replace('_', '-') + env.update({ + event_spec.env_var: event_filename, + }) + if event_spec.env_var == 'JUJU_ACTION_NAME': + event_dir = 'actions' + else: + raise RuntimeError('invalid envar name specified for a action event') + else: + event_filename = event_spec.event_name.replace('_', '-') + event_dir = 'hooks' + if event_spec.model_name is not None: + env['JUJU_MODEL_NAME'] = event_spec.model_name + + self._call_event(Path(event_dir, event_filename), env) + return self._read_and_clear_state() + + def test_event_reemitted(self): + # base64 encoding is used to avoid null bytes. + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # First run "install" to make sure all hooks are set up. + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + state = self._simulate_event(EventSpec(ConfigChangedEvent, 'config-changed', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent]) + + # Re-emit should pick the deferred config-changed. + state = self._simulate_event(EventSpec(UpdateStatusEvent, 'update-status', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent, UpdateStatusEvent]) + + def test_no_reemission_on_collect_metrics(self): + # base64 encoding is used to avoid null bytes. + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + fake_script(self, 'add-metric', 'exit 0') + + # First run "install" to make sure all hooks are set up. + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + state = self._simulate_event(EventSpec(ConfigChangedEvent, 'config-changed', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent]) + + # Re-emit should not pick the deferred config-changed because + # collect-metrics runs in a restricted context. + state = self._simulate_event(EventSpec(CollectMetricsEvent, 'collect-metrics', + charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [CollectMetricsEvent]) + + def test_multiple_events_handled(self): + self._prepare_actions() + + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + actions_charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_ACTIONS': True, + })) + + fake_script(self, 'action-get', "echo '{}'") + + # Sample events with a different amount of dashes used + # and with endpoints from different sections of metadata.yaml + events_under_test = [( + EventSpec(InstallEvent, 'install', + charm_config=charm_config), + {}, + ), ( + EventSpec(StartEvent, 'start', + charm_config=charm_config), + {}, + ), ( + EventSpec(UpdateStatusEvent, 'update_status', + charm_config=charm_config), + {}, + ), ( + EventSpec(LeaderSettingsChangedEvent, 'leader_settings_changed', + charm_config=charm_config), + {}, + ), ( + EventSpec(RelationJoinedEvent, 'db_relation_joined', + relation_id=1, + remote_app='remote', remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'db', + 'relation_id': 1, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', + relation_id=2, + remote_app='remote', remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', + relation_id=2, + remote_app='remote', remote_unit=None, + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': None}, + ), ( + EventSpec(RelationDepartedEvent, 'mon_relation_departed', + relation_id=2, + remote_app='remote', remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationBrokenEvent, 'ha_relation_broken', + relation_id=3, + charm_config=charm_config), + {'relation_name': 'ha', + 'relation_id': 3}, + ), ( + # Events without a remote app specified (for Juju < 2.7). + EventSpec(RelationJoinedEvent, 'db_relation_joined', + relation_id=1, + remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'db', + 'relation_id': 1, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', + relation_id=2, + remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationDepartedEvent, 'mon_relation_departed', + relation_id=2, + remote_unit='remote/0', + charm_config=charm_config), + {'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0'}, + ), ( + EventSpec(ActionEvent, 'start_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config), + {}, + ), ( + EventSpec(ActionEvent, 'foo_bar_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config), + {}, + )] + + logger.debug('Expected events %s', events_under_test) + + # First run "install" to make sure all hooks are set up. + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + + # Simulate hook executions for every event. + for event_spec, expected_event_data in events_under_test: + state = self._simulate_event(event_spec) + + state_key = 'on_' + event_spec.event_name + handled_events = state.get(state_key, []) + + # Make sure that a handler for that event was called once. + self.assertEqual(len(handled_events), 1) + # Make sure the event handled by the Charm has the right type. + handled_event_type = handled_events[0] + self.assertEqual(handled_event_type, event_spec.event_type) + + self.assertEqual(state['observed_event_types'], [event_spec.event_type]) + + if event_spec.event_name in expected_event_data: + self.assertEqual(state[event_spec.event_name + '_data'], + expected_event_data[event_spec.event_name]) + + def test_event_not_implemented(self): + """Make sure events without implementation do not cause non-zero exit. + """ + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # Simulate a scenario where there is a symlink for an event that + # a charm does not know how to handle. + hook_path = self.JUJU_CHARM_DIR / 'hooks/not-implemented-event' + # This will be cleared up in tearDown. + hook_path.symlink_to('install') + + try: + self._simulate_event(EventSpec(HookEvent, 'not-implemented-event', + charm_config=charm_config)) + except subprocess.CalledProcessError: + self.fail('Event simulation for an unsupported event' + ' results in a non-zero exit code returned') + + def test_collect_metrics(self): + indicator_file = self.JUJU_CHARM_DIR / 'indicator' + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'INDICATOR_FILE': indicator_file + })) + fake_script(self, 'add-metric', 'exit 0') + fake_script(self, 'juju-log', 'exit 0') + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + # Clear the calls during 'install' + fake_script_calls(self, clear=True) + self._simulate_event(EventSpec(CollectMetricsEvent, 'collect_metrics', + charm_config=charm_config)) + + expected = [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event collect_metrics.'], + ['add-metric', '--labels', 'bar=4.2', 'foo=42'], + ] + calls = fake_script_calls(self) + + if self.has_dispatch: + expected.insert(1, [ + 'juju-log', '--log-level', 'DEBUG', + 'Legacy hooks/collect-metrics does not exist.']) + + self.assertEqual(calls, expected) + + def test_logger(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_LOG_ACTIONS': True, + })) + fake_script(self, 'action-get', "echo '{}'") + + test_cases = [( + EventSpec(ActionEvent, 'log_critical_action', env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'CRITICAL', 'super critical'], + ), ( + EventSpec(ActionEvent, 'log_error_action', + env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'ERROR', 'grave error'], + ), ( + EventSpec(ActionEvent, 'log_warning_action', + env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'WARNING', 'wise warning'], + ), ( + EventSpec(ActionEvent, 'log_info_action', + env_var='JUJU_ACTION_NAME', + charm_config=charm_config), + ['juju-log', '--log-level', 'INFO', 'useful info'], + )] + + # Set up action symlinks. + self._simulate_event(EventSpec(InstallEvent, 'install', + charm_config=charm_config)) + + for event_spec, calls in test_cases: + self._simulate_event(event_spec) + self.assertIn(calls, fake_script_calls(self, clear=True)) + + def test_excepthook(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'TRY_EXCEPTHOOK': True, + })) + with self.assertRaises(subprocess.CalledProcessError): + self._simulate_event(EventSpec(InstallEvent, 'install', + charm_config=charm_config)) + + calls = [' '.join(i) for i in fake_script_calls(self)] + + self.assertEqual(calls.pop(0), ' '.join(VERSION_LOGLINE)) + + if self.has_dispatch: + self.assertEqual( + calls.pop(0), + 'juju-log --log-level DEBUG Legacy hooks/install does not exist.') + + self.maxDiff = None + self.assertRegex( + calls[0], + '(?ms)juju-log --log-level ERROR Uncaught exception while in charm code:\n' + 'Traceback .most recent call last.:\n' + ' .*' + ' raise RuntimeError."failing as requested".\n' + 'RuntimeError: failing as requested' + ) + self.assertEqual(len(calls), 1, "expected 1 call, but got extra: {}".format(calls[1:])) + + def test_sets_model_name(self): + self._prepare_actions() + + actions_charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_ACTIONS': True, + })) + + fake_script(self, 'action-get', "echo '{}'") + state = self._simulate_event(EventSpec( + ActionEvent, 'get_model_name_action', + env_var='JUJU_ACTION_NAME', + model_name='test-model-name', + charm_config=actions_charm_config)) + self.assertIsNotNone(state) + self.assertEqual(state['_on_get_model_name_action'], ['test-model-name']) + + def test_has_valid_status(self): + self._prepare_actions() + + actions_charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_ACTIONS': True, + })) + + fake_script(self, 'action-get', "echo '{}'") + fake_script(self, 'status-get', """echo '{"status": "unknown", "message": ""}'""") + state = self._simulate_event(EventSpec( + ActionEvent, 'get_status_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config)) + self.assertIsNotNone(state) + self.assertEqual(state['status_name'], 'unknown') + self.assertEqual(state['status_message'], '') + fake_script( + self, 'status-get', """echo '{"status": "blocked", "message": "help meeee"}'""") + state = self._simulate_event(EventSpec( + ActionEvent, 'get_status_action', + env_var='JUJU_ACTION_NAME', + charm_config=actions_charm_config)) + self.assertIsNotNone(state) + self.assertEqual(state['status_name'], 'blocked') + self.assertEqual(state['status_message'], 'help meeee') + + def test_foo(self): + # base64 encoding is used to avoid null bytes. + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # First run "install" to make sure all hooks are set up. + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + +class TestMainWithNoDispatch(_TestMain, unittest.TestCase): + has_dispatch = False + hooks_are_symlinks = True + + def _setup_entry_point(self, directory, entry_point): + path = directory / entry_point + path.symlink_to(self.charm_exec_path) + + def _call_event(self, rel_path, env): + event_file = self.JUJU_CHARM_DIR / rel_path + # Note that sys.executable is used to make sure we are using the same + # interpreter for the child process to support virtual environments. + subprocess.run( + [sys.executable, str(event_file)], + check=True, env=env, cwd=str(self.JUJU_CHARM_DIR)) + + def test_setup_event_links(self): + """Test auto-creation of symlinks caused by initial events. + """ + all_event_hooks = ['hooks/' + e.replace("_", "-") + for e in self.charm_module.Charm.on.events().keys()] + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + initial_events = { + EventSpec(InstallEvent, 'install', charm_config=charm_config), + EventSpec(StorageAttachedEvent, 'disks-storage-attached', charm_config=charm_config), + EventSpec(StartEvent, 'start', charm_config=charm_config), + EventSpec(UpgradeCharmEvent, 'upgrade-charm', charm_config=charm_config), + } + + def _assess_event_links(event_spec): + self.assertTrue(self.hooks_dir / event_spec.event_name in self.hooks_dir.iterdir()) + for event_hook in all_event_hooks: + hook_path = self.JUJU_CHARM_DIR / event_hook + self.assertTrue(hook_path.exists(), 'Missing hook: ' + event_hook) + if self.hooks_are_symlinks: + self.assertEqual(os.readlink(str(hook_path)), self.charm_exec_path) + + for initial_event in initial_events: + self._setup_charm_dir() + + self._simulate_event(initial_event) + _assess_event_links(initial_event) + # Make sure it is idempotent. + self._simulate_event(initial_event) + _assess_event_links(initial_event) + + def test_setup_action_links(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + # foo-bar is one of the actions defined in actions.yaml + action_hook = self.JUJU_CHARM_DIR / 'actions' / 'foo-bar' + self.assertTrue(action_hook.exists()) + + +class TestMainWithNoDispatchButJujuIsDispatchAware(TestMainWithNoDispatch): + def _call_event(self, rel_path, env): + env["JUJU_DISPATCH_PATH"] = str(rel_path) + super()._call_event(rel_path, env) + + +class TestMainWithNoDispatchButScriptsAreCopies(TestMainWithNoDispatch): + hooks_are_symlinks = False + + def _setup_entry_point(self, directory, entry_point): + charm_path = str(self.JUJU_CHARM_DIR / 'src/charm.py') + path = directory / entry_point + shutil.copy(charm_path, str(path)) + + +class TestMainWithDispatch(_TestMain, unittest.TestCase): + has_dispatch = True + + def _setup_entry_point(self, directory, entry_point): + path = self.JUJU_CHARM_DIR / 'dispatch' + if not path.exists(): + path.symlink_to('src/charm.py') + + def _call_event(self, rel_path, env): + env["JUJU_DISPATCH_PATH"] = str(rel_path) + dispatch = self.JUJU_CHARM_DIR / 'dispatch' + subprocess.run( + [sys.executable, str(dispatch)], + stdout=self.stdout, + stderr=self.stderr, + check=True, env=env, cwd=str(self.JUJU_CHARM_DIR)) + + def test_setup_event_links(self): + """Test auto-creation of symlinks caused by initial events does _not_ happen when using dispatch. + """ + all_event_hooks = ['hooks/' + e.replace("_", "-") + for e in self.charm_module.Charm.on.events().keys()] + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + initial_events = { + EventSpec(InstallEvent, 'install', charm_config=charm_config), + EventSpec(StorageAttachedEvent, 'disks-storage-attached', charm_config=charm_config), + EventSpec(StartEvent, 'start', charm_config=charm_config), + EventSpec(UpgradeCharmEvent, 'upgrade-charm', charm_config=charm_config), + } + + def _assess_event_links(event_spec): + self.assertNotIn(self.hooks_dir / event_spec.event_name, self.hooks_dir.iterdir()) + for event_hook in all_event_hooks: + self.assertFalse((self.JUJU_CHARM_DIR / event_hook).exists(), + 'Spurious hook: ' + event_hook) + + for initial_event in initial_events: + self._setup_charm_dir() + + self._simulate_event(initial_event) + _assess_event_links(initial_event) + + def test_hook_and_dispatch(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + old_path = self.fake_script_path + self.fake_script_path = self.hooks_dir + fake_script(self, 'install', 'exit 0') + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + + # the script was called, *and*, the .on. was called + self.assertEqual(fake_script_calls(self), [['install', '']]) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + self.fake_script_path = old_path + self.assertEqual(fake_script_calls(self), [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'INFO', 'Running legacy hooks/install.'], + ['juju-log', '--log-level', 'DEBUG', 'Legacy hooks/install exited with status 0.'], + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event install.'], + ]) + + def test_non_executable_hook_and_dispatch(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + (self.hooks_dir / "install").write_text("") + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + self.assertEqual(fake_script_calls(self), [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'WARNING', + 'Legacy hooks/install exists but is not executable.'], + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event install.'], + ]) + + def test_hook_and_dispatch_with_failing_hook(self): + self.stdout = self.stderr = tempfile.TemporaryFile() + self.addCleanup(self.stdout.close) + + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + old_path = self.fake_script_path + self.fake_script_path = self.hooks_dir + fake_script(self, 'install', 'exit 42') + event = EventSpec(InstallEvent, 'install', charm_config=charm_config) + with self.assertRaises(subprocess.CalledProcessError): + self._simulate_event(event) + self.fake_script_path = old_path + + self.stdout.seek(0) + self.assertEqual(self.stdout.read(), b'') + calls = fake_script_calls(self) + expected = [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'INFO', 'Running legacy hooks/install.'], + ['juju-log', '--log-level', 'WARNING', 'Legacy hooks/install exited with status 42.'], + ] + self.assertEqual(calls, expected) + + def test_hook_and_dispatch_but_hook_is_dispatch(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + event = EventSpec(InstallEvent, 'install', charm_config=charm_config) + hook_path = self.hooks_dir / 'install' + for ((rel, ind), path) in { + # relative and indirect + (True, True): Path('../dispatch'), + # relative and direct + (True, False): Path(self.charm_exec_path), + # absolute and direct + (False, False): (self.hooks_dir / self.charm_exec_path).resolve(), + # absolute and indirect + (False, True): self.JUJU_CHARM_DIR / 'dispatch', + }.items(): + with self.subTest(path=path, rel=rel, ind=ind): + # sanity check + self.assertEqual(path.is_absolute(), not rel) + self.assertEqual(path.name == 'dispatch', ind) + try: + hook_path.symlink_to(path) + + state = self._simulate_event(event) + + # the .on. was only called once + self.assertEqual(state['observed_event_types'], [InstallEvent]) + self.assertEqual(state['on_install'], [InstallEvent]) + finally: + hook_path.unlink() + + def test_hook_and_dispatch_but_hook_is_dispatch_copy(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + hook_path = self.hooks_dir / 'install' + path = (self.hooks_dir / self.charm_exec_path).resolve() + shutil.copy(str(path), str(hook_path)) + fake_script(self, 'juju-log', 'exit 0') + + event = EventSpec(InstallEvent, 'install', charm_config=charm_config) + state = self._simulate_event(event) + + # the .on. was only called once + self.assertEqual(state['observed_event_types'], [InstallEvent]) + self.assertEqual(state['on_install'], [InstallEvent]) + self.assertEqual(fake_script_calls(self), [ + VERSION_LOGLINE, + ['juju-log', '--log-level', 'INFO', 'Running legacy hooks/install.'], + VERSION_LOGLINE, # because it called itself + ['juju-log', '--log-level', 'DEBUG', 'Charm called itself via hooks/install.'], + ['juju-log', '--log-level', 'DEBUG', 'Legacy hooks/install exited with status 0.'], + ['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event install.'], + ]) + + +class TestMainWithDispatchAsScript(TestMainWithDispatch): + """Here dispatch is a script that execs the charm.py instead of a symlink. + """ + + has_dispatch = True + + def _setup_entry_point(self, directory, entry_point): + path = self.JUJU_CHARM_DIR / 'dispatch' + if not path.exists(): + path.write_text('#!/bin/sh\nexec "{}" "{}"\n'.format( + sys.executable, + self.JUJU_CHARM_DIR / 'src/charm.py')) + path.chmod(0o755) + + def _call_event(self, rel_path, env): + env["JUJU_DISPATCH_PATH"] = str(rel_path) + dispatch = self.JUJU_CHARM_DIR / 'dispatch' + subprocess.check_call([str(dispatch)], + env=env, cwd=str(self.JUJU_CHARM_DIR)) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_model.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_model.py new file mode 100755 index 0000000000000000000000000000000000000000..8415efa76a3b85a1bfe3ef20497ca00987c1d6a2 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_model.py @@ -0,0 +1,1329 @@ +#!/usr/bin/python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import json +import ipaddress +import os +import pathlib +from textwrap import dedent +import unittest + +import ops.model +import ops.charm +import ops.testing +from ops.charm import RelationMeta, RelationRole + +from test.test_helpers import fake_script, fake_script_calls + + +class TestModel(unittest.TestCase): + + def setUp(self): + self.harness = ops.testing.Harness(ops.charm.CharmBase, meta=''' + name: myapp + provides: + db0: + interface: db0 + requires: + db1: + interface: db1 + peers: + db2: + interface: db2 + ''') + self.relation_id_db0 = self.harness.add_relation('db0', 'db') + self.model = self.harness.model + + def test_model_attributes(self): + self.assertIs(self.model.app, self.model.unit.app) + self.assertIsNone(self.model.name) + + def test_model_name_from_backend(self): + self.harness.set_model_name('default') + m = ops.model.Model(ops.charm.CharmMeta(), self.harness._backend) + self.assertEqual(m.name, 'default') + with self.assertRaises(AttributeError): + m.name = "changes-disallowed" + + def test_relations_keys(self): + rel_app1 = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(rel_app1, 'remoteapp1/0') + self.harness.add_relation_unit(rel_app1, 'remoteapp1/1') + rel_app2 = self.harness.add_relation('db1', 'remoteapp2') + self.harness.add_relation_unit(rel_app2, 'remoteapp2/0') + + # We invalidate db1 so that it causes us to reload it + self.model.relations._invalidate('db1') + self.resetBackendCalls() + for relation in self.model.relations['db1']: + self.assertIn(self.model.unit, relation.data) + unit_from_rel = next(filter(lambda u: u.name == 'myapp/0', relation.data.keys())) + self.assertIs(self.model.unit, unit_from_rel) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', rel_app1), + ('relation_list', rel_app2), + ]) + + def test_get_relation(self): + # one relation on db1 + # two relations on db0 + # no relations on db2 + relation_id_db1 = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id_db1, 'remoteapp1/0') + relation_id_db0_b = self.harness.add_relation('db0', 'another') + self.resetBackendCalls() + + with self.assertRaises(ops.model.ModelError): + # You have to specify it by just the integer ID + self.model.get_relation('db1', 'db1:{}'.format(relation_id_db1)) + rel_db1 = self.model.get_relation('db1', relation_id_db1) + self.assertIsInstance(rel_db1, ops.model.Relation) + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id_db1), + ]) + dead_rel = self.model.get_relation('db1', 7) + self.assertIsInstance(dead_rel, ops.model.Relation) + self.assertEqual(set(dead_rel.data.keys()), {self.model.unit, self.model.unit.app}) + self.assertEqual(dead_rel.data[self.model.unit], {}) + self.assertBackendCalls([ + ('relation_list', 7), + ('relation_get', 7, 'myapp/0', False), + ]) + + self.assertIsNone(self.model.get_relation('db2')) + self.assertBackendCalls([ + ('relation_ids', 'db2'), + ]) + self.assertIs(self.model.get_relation('db1'), rel_db1) + with self.assertRaises(ops.model.TooManyRelatedAppsError): + self.model.get_relation('db0') + + self.assertBackendCalls([ + ('relation_ids', 'db0'), + ('relation_list', self.relation_id_db0), + ('relation_list', relation_id_db0_b), + ]) + + def test_peer_relation_app(self): + self.harness.add_relation('db2', 'myapp') + rel_dbpeer = self.model.get_relation('db2') + self.assertIs(rel_dbpeer.app, self.model.app) + + def test_remote_units_is_our(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + self.resetBackendCalls() + + for u in self.model.get_relation('db1').units: + self.assertFalse(u._is_our_unit) + self.assertFalse(u.app._is_our_app) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id) + ]) + + def test_our_unit_is_our(self): + self.assertTrue(self.model.unit._is_our_unit) + self.assertTrue(self.model.unit.app._is_our_app) + + def test_unit_relation_data(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.update_relation_data( + relation_id, + 'remoteapp1/0', + {'host': 'remoteapp1-0'}) + self.model.relations._invalidate('db1') + self.resetBackendCalls() + + random_unit = self.model.get_unit('randomunit/0') + with self.assertRaises(KeyError): + self.model.get_relation('db1').data[random_unit] + remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', + self.model.get_relation('db1').units)) + self.assertEqual(self.model.get_relation('db1').data[remoteapp1_0], + {'host': 'remoteapp1-0'}) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1/0', False), + ]) + + def test_remote_app_relation_data(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + # Try to get relation data for an invalid remote application. + random_app = self.model._cache.get(ops.model.Application, 'randomapp') + with self.assertRaises(KeyError): + rel_db1.data[random_app] + + remoteapp1 = rel_db1.app + self.assertEqual(remoteapp1.name, 'remoteapp1') + self.assertEqual(rel_db1.data[remoteapp1], + {'secret': 'cafedeadbeef'}) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1', True), + ]) + + def test_relation_data_modify_remote(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.update_relation_data(relation_id, 'remoteapp1/0', {'host': 'remoteapp1/0'}) + self.model.relations._invalidate('db1') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', + self.model.get_relation('db1').units)) + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[remoteapp1_0]) + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[remoteapp1_0]['foo'] = 'bar' + self.assertNotIn('foo', rel_db1.data[remoteapp1_0]) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1/0', False), + ]) + + def test_relation_data_modify_our(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'nothing'}) + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + rel_db1.data[self.model.unit]['host'] = 'bar' + self.assertEqual(rel_db1.data[self.model.unit]['host'], 'bar') + + self.assertBackendCalls([ + ('relation_get', relation_id, 'myapp/0', False), + ('relation_set', relation_id, 'host', 'bar', False), + ]) + + def test_app_relation_data_modify_local_as_leader(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp', {'password': 'deadbeefcafe'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.set_leader(True) + self.resetBackendCalls() + + local_app = self.model.unit.app + + rel_db1 = self.model.get_relation('db1') + self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'}) + + rel_db1.data[local_app]['password'] = 'foo' + + self.assertEqual(rel_db1.data[local_app]['password'], 'foo') + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp', True), + ('is_leader',), + ('relation_set', relation_id, 'password', 'foo', True), + ]) + + def test_app_relation_data_modify_local_as_minion(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp', {'password': 'deadbeefcafe'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.set_leader(False) + self.resetBackendCalls() + + local_app = self.model.unit.app + + rel_db1 = self.model.get_relation('db1') + self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'}) + + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[local_app]['password'] = 'foobar' + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp', True), + ('is_leader',), + ]) + + def test_relation_data_del_key(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'bar'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + del rel_db1.data[self.model.unit]['host'] + self.assertNotIn('host', rel_db1.data[self.model.unit]) + self.assertEqual({}, self.harness.get_relation_data(relation_id, 'myapp/0')) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ('relation_set', relation_id, 'host', '', False), + ]) + + def test_relation_set_fail(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'myapp-0'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.resetBackendCalls() + + backend = self.harness._backend + # TODO: jam 2020-03-06 This is way too much information about relation_set + # The original test forced 'relation-set' to return exit code 2, + # but there was nothing illegal about the data that was being set, + # for us to properly test the side effects of relation-set failing. + + def broken_relation_set(relation_id, key, value, is_app): + backend._calls.append(('relation_set', relation_id, key, value, is_app)) + raise ops.model.ModelError() + backend.relation_set = broken_relation_set + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + with self.assertRaises(ops.model.ModelError): + rel_db1.data[self.model.unit]['host'] = 'bar' + self.assertEqual(rel_db1.data[self.model.unit]['host'], 'myapp-0') + with self.assertRaises(ops.model.ModelError): + del rel_db1.data[self.model.unit]['host'] + self.assertIn('host', rel_db1.data[self.model.unit]) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ('relation_set', relation_id, 'host', 'bar', False), + ('relation_set', relation_id, 'host', '', False), + ]) + + def test_relation_data_type_check(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.update_relation_data(relation_id, 'myapp/0', {'host': 'myapp-0'}) + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.resetBackendCalls() + + rel_db1 = self.model.get_relation('db1') + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = 1 + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = {'foo': 'bar'} + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = None + # No data has actually been changed + self.assertEqual(dict(rel_db1.data[self.model.unit]), {'host': 'myapp-0'}) + + self.assertBackendCalls([ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ]) + + def test_config(self): + self.harness.update_config({'foo': 'foo', 'bar': 1, 'qux': True}) + self.assertEqual(self.model.config, { + 'foo': 'foo', + 'bar': 1, + 'qux': True, + }) + with self.assertRaises(TypeError): + # Confirm that we cannot modify config values. + self.model.config['foo'] = 'bar' + + self.assertBackendCalls([('config_get',)]) + + def test_is_leader(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.set_leader(True) + self.resetBackendCalls() + + def check_remote_units(): + # Cannot determine leadership for remote units. + for u in self.model.get_relation('db1').units: + with self.assertRaises(RuntimeError): + u.is_leader() + + self.assertTrue(self.model.unit.is_leader()) + + check_remote_units() + + # Create a new model and backend to drop a cached is-leader output. + self.harness.set_leader(False) + self.assertFalse(self.model.unit.is_leader()) + + check_remote_units() + + self.assertBackendCalls([ + ('is_leader',), + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('is_leader',), + ]) + + def test_workload_version(self): + self.model.unit.set_workload_version('1.2.3') + self.assertBackendCalls([ + ('application_version_set', '1.2.3'), + ]) + + def test_workload_version_invalid(self): + with self.assertRaises(TypeError) as cm: + self.model.unit.set_workload_version(5) + self.assertEqual(str(cm.exception), "workload version must be a str, not int: 5") + self.assertBackendCalls([]) + + def test_resources(self): + # TODO: (jam) 2020-05-07 Harness doesn't yet support resource-get issue #262 + meta = ops.charm.CharmMeta() + meta.resources = {'foo': None, 'bar': None} + model = ops.model.Model(meta, ops.model._ModelBackend('myapp/0')) + + with self.assertRaises(RuntimeError): + model.resources.fetch('qux') + + fake_script(self, 'resource-get', 'exit 1') + with self.assertRaises(ops.model.ModelError): + model.resources.fetch('foo') + + fake_script(self, 'resource-get', + 'echo /var/lib/juju/agents/unit-test-0/resources/$1/$1.tgz') + self.assertEqual(model.resources.fetch('foo').name, 'foo.tgz') + self.assertEqual(model.resources.fetch('bar').name, 'bar.tgz') + + def test_pod_spec(self): + # TODO: (jam) 2020-05-07 Harness doesn't yet expose pod-spec-set issue #261 + meta = ops.charm.CharmMeta.from_yaml(''' + name: myapp + ''') + model = ops.model.Model(meta, ops.model._ModelBackend('myapp/0')) + fake_script(self, 'pod-spec-set', """ + cat $2 > $(dirname $0)/spec.json + [ -n "$4" ] && cat "$4" > $(dirname $0)/k8s_res.json || true + """) + fake_script(self, 'is-leader', 'echo true') + spec_path = self.fake_script_path / 'spec.json' + k8s_res_path = self.fake_script_path / 'k8s_res.json' + + def check_calls(calls): + # There may 1 or 2 calls because of is-leader. + self.assertLessEqual(len(fake_calls), 2) + pod_spec_call = next(filter(lambda c: c[0] == 'pod-spec-set', calls)) + self.assertEqual(pod_spec_call[:2], ['pod-spec-set', '--file']) + + # 8 bytes are used as of python 3.4.0, see Python bug #12015. + # Other characters are from POSIX 3.282 (Portable Filename + # Character Set) a subset of which Python's mkdtemp uses. + self.assertRegex(pod_spec_call[2], '.*/tmp[A-Za-z0-9._-]{8}-pod-spec-set') + + model.pod.set_spec({'foo': 'bar'}) + self.assertEqual(spec_path.read_text(), '{"foo": "bar"}') + self.assertFalse(k8s_res_path.exists()) + + fake_calls = fake_script_calls(self, clear=True) + check_calls(fake_calls) + + model.pod.set_spec({'bar': 'foo'}, {'qux': 'baz'}) + self.assertEqual(spec_path.read_text(), '{"bar": "foo"}') + self.assertEqual(k8s_res_path.read_text(), '{"qux": "baz"}') + + fake_calls = fake_script_calls(self, clear=True) + check_calls(fake_calls) + + # Create a new model to drop is-leader caching result. + self.backend = ops.model._ModelBackend('myapp/0') + meta = ops.charm.CharmMeta() + model = ops.model.Model(meta, self.backend) + fake_script(self, 'is-leader', 'echo false') + with self.assertRaises(ops.model.ModelError): + model.pod.set_spec({'foo': 'bar'}) + + def test_base_status_instance_raises(self): + with self.assertRaises(TypeError): + ops.model.StatusBase('test') + + class NoNameStatus(ops.model.StatusBase): + pass + + with self.assertRaises(AttributeError): + ops.model.StatusBase.register_status(NoNameStatus) + + def test_status_repr(self): + test_cases = { + "ActiveStatus('Seashell')": ops.model.ActiveStatus('Seashell'), + "MaintenanceStatus('Red')": ops.model.MaintenanceStatus('Red'), + "BlockedStatus('Magenta')": ops.model.BlockedStatus('Magenta'), + "WaitingStatus('Thistle')": ops.model.WaitingStatus('Thistle'), + 'UnknownStatus()': ops.model.UnknownStatus(), + } + for expected, status in test_cases.items(): + self.assertEqual(repr(status), expected) + + def test_status_eq(self): + status_types = [ + ops.model.ActiveStatus, + ops.model.MaintenanceStatus, + ops.model.BlockedStatus, + ops.model.WaitingStatus, + ] + + self.assertEqual(ops.model.UnknownStatus(), ops.model.UnknownStatus()) + for (i, t1) in enumerate(status_types): + self.assertNotEqual(t1(''), ops.model.UnknownStatus()) + for (j, t2) in enumerate(status_types): + self.assertNotEqual(t1('one'), t2('two')) + if i == j: + self.assertEqual(t1('one'), t2('one')) + else: + self.assertNotEqual(t1('one'), t2('one')) + + def test_active_message_default(self): + self.assertEqual(ops.model.ActiveStatus().message, '') + + def test_local_set_valid_unit_status(self): + test_cases = [( + 'active', + ops.model.ActiveStatus('Green'), + ('status_set', 'active', 'Green', {'is_app': False}), + ), ( + 'maintenance', + ops.model.MaintenanceStatus('Yellow'), + ('status_set', 'maintenance', 'Yellow', {'is_app': False}), + ), ( + 'blocked', + ops.model.BlockedStatus('Red'), + ('status_set', 'blocked', 'Red', {'is_app': False}), + ), ( + 'waiting', + ops.model.WaitingStatus('White'), + ('status_set', 'waiting', 'White', {'is_app': False}), + )] + + for test_case, target_status, backend_call in test_cases: + with self.subTest(test_case): + self.model.unit.status = target_status + self.assertEqual(self.model.unit.status, target_status) + self.model.unit._invalidate() + self.assertEqual(self.model.unit.status, target_status) + self.assertBackendCalls([backend_call, ('status_get', {'is_app': False})]) + + def test_local_set_valid_app_status(self): + self.harness.set_leader(True) + test_cases = [( + 'active', + ops.model.ActiveStatus('Green'), + ('status_set', 'active', 'Green', {'is_app': True}), + ), ( + 'maintenance', + ops.model.MaintenanceStatus('Yellow'), + ('status_set', 'maintenance', 'Yellow', {'is_app': True}), + ), ( + 'blocked', + ops.model.BlockedStatus('Red'), + ('status_set', 'blocked', 'Red', {'is_app': True}), + ), ( + 'waiting', + ops.model.WaitingStatus('White'), + ('status_set', 'waiting', 'White', {'is_app': True}), + )] + + for test_case, target_status, backend_call in test_cases: + with self.subTest(test_case): + self.model.app.status = target_status + self.assertEqual(self.model.app.status, target_status) + self.model.app._invalidate() + self.assertEqual(self.model.app.status, target_status) + # There is a backend call to check if we can set the value, + # and then another check each time we assert the status above + expected_calls = [ + ('is_leader',), backend_call, + ('is_leader',), + ('is_leader',), ('status_get', {'is_app': True}), + ] + self.assertBackendCalls(expected_calls) + + def test_set_app_status_non_leader_raises(self): + self.harness.set_leader(False) + with self.assertRaises(RuntimeError): + self.model.app.status + + with self.assertRaises(RuntimeError): + self.model.app.status = ops.model.ActiveStatus() + + def test_set_unit_status_invalid(self): + with self.assertRaises(ops.model.InvalidStatusError): + self.model.unit.status = 'blocked' + + def test_set_app_status_invalid(self): + with self.assertRaises(ops.model.InvalidStatusError): + self.model.app.status = 'blocked' + + def test_remote_unit_status(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + remote_unit = next(filter(lambda u: u.name == 'remoteapp1/0', + self.model.get_relation('db1').units)) + self.resetBackendCalls() + + # Remote unit status is always unknown. + self.assertEqual(remote_unit.status, ops.model.UnknownStatus()) + + test_statuses = ( + ops.model.UnknownStatus(), + ops.model.ActiveStatus('Green'), + ops.model.MaintenanceStatus('Yellow'), + ops.model.BlockedStatus('Red'), + ops.model.WaitingStatus('White'), + ) + + for target_status in test_statuses: + with self.subTest(target_status.name): + with self.assertRaises(RuntimeError): + remote_unit.status = target_status + self.assertBackendCalls([]) + + def test_remote_app_status(self): + relation_id = self.harness.add_relation('db1', 'remoteapp1') + self.harness.add_relation_unit(relation_id, 'remoteapp1/0') + self.harness.add_relation_unit(relation_id, 'remoteapp1/1') + remoteapp1 = self.model.get_relation('db1').app + self.resetBackendCalls() + + # Remote application status is always unknown. + self.assertIsInstance(remoteapp1.status, ops.model.UnknownStatus) + + test_statuses = ( + ops.model.UnknownStatus(), + ops.model.ActiveStatus(), + ops.model.MaintenanceStatus('Upgrading software'), + ops.model.BlockedStatus('Awaiting manual resolution'), + ops.model.WaitingStatus('Awaiting related app updates'), + ) + for target_status in test_statuses: + with self.subTest(target_status.name): + with self.assertRaises(RuntimeError): + remoteapp1.status = target_status + self.assertBackendCalls([]) + + def test_storage(self): + # TODO: (jam) 2020-05-07 Harness doesn't yet expose storage-get issue #263 + meta = ops.charm.CharmMeta() + meta.storages = {'disks': None, 'data': None} + model = ops.model.Model(meta, ops.model._ModelBackend('myapp/0')) + + fake_script(self, 'storage-list', ''' + if [ "$1" = disks ]; then + echo '["disks/0", "disks/1"]' + else + echo '[]' + fi + ''') + fake_script(self, 'storage-get', ''' + if [ "$2" = disks/0 ]; then + echo '"/var/srv/disks/0"' + elif [ "$2" = disks/1 ]; then + echo '"/var/srv/disks/1"' + else + exit 2 + fi + ''') + fake_script(self, 'storage-add', '') + + self.assertEqual(len(model.storages), 2) + self.assertEqual(model.storages.keys(), meta.storages.keys()) + self.assertIn('disks', model.storages) + test_cases = { + 0: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/0')}, + 1: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/1')}, + } + for storage in model.storages['disks']: + self.assertEqual(storage.name, 'disks') + self.assertIn(storage.id, test_cases) + self.assertEqual(storage.name, test_cases[storage.id]['name']) + self.assertEqual(storage.location, test_cases[storage.id]['location']) + + self.assertEqual(fake_script_calls(self, clear=True), [ + ['storage-list', 'disks', '--format=json'], + ['storage-get', '-s', 'disks/0', 'location', '--format=json'], + ['storage-get', '-s', 'disks/1', 'location', '--format=json'], + ]) + + self.assertSequenceEqual(model.storages['data'], []) + model.storages.request('data', count=3) + self.assertEqual(fake_script_calls(self), [ + ['storage-list', 'data', '--format=json'], + ['storage-add', 'data=3'], + ]) + + # Try to add storage not present in charm metadata. + with self.assertRaises(ops.model.ModelError): + model.storages.request('deadbeef') + + # Invalid count parameter types. + for count_v in [None, False, 2.0, 'a', b'beef', object]: + with self.assertRaises(TypeError): + model.storages.request('data', count_v) + + def resetBackendCalls(self): + self.harness._get_backend_calls(reset=True) + + def assertBackendCalls(self, expected, *, reset=True): + self.assertEqual(expected, self.harness._get_backend_calls(reset=reset)) + + +class TestModelBindings(unittest.TestCase): + + def setUp(self): + meta = ops.charm.CharmMeta() + meta.relations = { + 'db0': RelationMeta( + RelationRole.provides, 'db0', {'interface': 'db0', 'scope': 'global'}), + 'db1': RelationMeta( + RelationRole.requires, 'db1', {'interface': 'db1', 'scope': 'global'}), + 'db2': RelationMeta( + RelationRole.peer, 'db2', {'interface': 'db2', 'scope': 'global'}), + } + self.backend = ops.model._ModelBackend('myapp/0') + self.model = ops.model.Model(meta, self.backend) + + fake_script(self, 'relation-ids', + """([ "$1" = db0 ] && echo '["db0:4"]') || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") + self.network_get_out = '''{ + "bind-addresses": [ + { + "mac-address": "de:ad:be:ef:ca:fe", + "interface-name": "lo", + "addresses": [ + { + "hostname": "", + "value": "192.0.2.2", + "cidr": "192.0.2.0/24" + }, + { + "hostname": "deadbeef.example", + "value": "dead:beef::1", + "cidr": "dead:beef::/64" + } + ] + }, + { + "mac-address": "", + "interface-name": "tun", + "addresses": [ + { + "hostname": "", + "value": "192.0.3.3", + "cidr": "" + }, + { + "hostname": "", + "value": "2001:db8::3", + "cidr": "" + }, + { + "hostname": "deadbeef.local", + "value": "fe80::1:1", + "cidr": "fe80::/64" + } + ] + } + ], + "egress-subnets": [ + "192.0.2.2/32", + "192.0.3.0/24", + "dead:beef::/64", + "2001:db8::3/128" + ], + "ingress-addresses": [ + "192.0.2.2", + "192.0.3.3", + "dead:beef::1", + "2001:db8::3" + ] +}''' + + def _check_binding_data(self, binding_name, binding): + self.assertEqual(binding.name, binding_name) + self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2')) + self.assertEqual(binding.network.ingress_address, ipaddress.ip_address('192.0.2.2')) + # /32 and /128 CIDRs are valid one-address networks for IPv{4,6}Network types respectively. + self.assertEqual(binding.network.egress_subnets, [ipaddress.ip_network('192.0.2.2/32'), + ipaddress.ip_network('192.0.3.0/24'), + ipaddress.ip_network('dead:beef::/64'), + ipaddress.ip_network('2001:db8::3/128')]) + + for (i, (name, address, subnet)) in enumerate([ + ('lo', '192.0.2.2', '192.0.2.0/24'), + ('lo', 'dead:beef::1', 'dead:beef::/64'), + ('tun', '192.0.3.3', '192.0.3.3/32'), + ('tun', '2001:db8::3', '2001:db8::3/128'), + ('tun', 'fe80::1:1', 'fe80::/64')]): + self.assertEqual(binding.network.interfaces[i].name, name) + self.assertEqual(binding.network.interfaces[i].address, ipaddress.ip_address(address)) + self.assertEqual(binding.network.interfaces[i].subnet, ipaddress.ip_network(subnet)) + + def test_invalid_keys(self): + # Basic validation for passing invalid keys. + for name in (object, 0): + with self.assertRaises(ops.model.ModelError): + self.model.get_binding(name) + + def test_dead_relations(self): + fake_script( + self, + 'network-get', + ''' + if [ "$1" = db0 ] && [ "$2" = --format=json ]; then + echo '{}' + else + echo ERROR invalid value "$2" for option -r: relation not found >&2 + exit 2 + fi + '''.format(self.network_get_out)) + # Validate the behavior for dead relations. + binding = ops.model.Binding('db0', 42, self.model._backend) + self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2')) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['network-get', 'db0', '-r', '42', '--format=json'], + ['network-get', 'db0', '--format=json'], + ]) + + def test_binding_by_relation_name(self): + fake_script(self, 'network-get', + '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out)) + binding_name = 'db0' + expected_calls = [['network-get', 'db0', '--format=json']] + + binding = self.model.get_binding(binding_name) + self._check_binding_data(binding_name, binding) + self.assertEqual(fake_script_calls(self, clear=True), expected_calls) + + def test_binding_by_relation(self): + fake_script(self, 'network-get', + '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out)) + binding_name = 'db0' + expected_calls = [ + ['relation-ids', 'db0', '--format=json'], + # The two invocations below are due to the get_relation call. + ['relation-list', '-r', '4', '--format=json'], + ['network-get', 'db0', '-r', '4', '--format=json'], + ] + binding = self.model.get_binding(self.model.get_relation(binding_name)) + self._check_binding_data(binding_name, binding) + self.assertEqual(fake_script_calls(self, clear=True), expected_calls) + + +class TestModelBackend(unittest.TestCase): + + def setUp(self): + self._backend = None + + @property + def backend(self): + if self._backend is None: + self._backend = ops.model._ModelBackend('myapp/0') + return self._backend + + def test_relation_get_set_is_app_arg(self): + # No is_app provided. + with self.assertRaises(TypeError): + self.backend.relation_set(1, 'fookey', 'barval') + + with self.assertRaises(TypeError): + self.backend.relation_get(1, 'fooentity') + + # Invalid types for is_app. + for is_app_v in [None, 1, 2.0, 'a', b'beef']: + with self.assertRaises(TypeError): + self.backend.relation_set(1, 'fookey', 'barval', is_app=is_app_v) + + with self.assertRaises(TypeError): + self.backend.relation_get(1, 'fooentity', is_app=is_app_v) + + def test_is_leader_refresh(self): + meta = ops.charm.CharmMeta.from_yaml(''' + name: myapp + ''') + model = ops.model.Model(meta, self.backend) + fake_script(self, 'is-leader', 'echo false') + self.assertFalse(model.unit.is_leader()) + + # Change the leadership status + fake_script(self, 'is-leader', 'echo true') + # If you don't force it, we don't check, so we won't see the change + self.assertFalse(model.unit.is_leader()) + # If we force a recheck, then we notice + self.backend._leader_check_time = None + self.assertTrue(model.unit.is_leader()) + + # Force a recheck without changing the leadership status. + fake_script(self, 'is-leader', 'echo true') + self.backend._leader_check_time = None + self.assertTrue(model.unit.is_leader()) + + def test_relation_tool_errors(self): + self.addCleanup(os.environ.pop, 'JUJU_VERSION', None) + os.environ['JUJU_VERSION'] = '2.8.0' + err_msg = 'ERROR invalid value "$2" for option -r: relation not found' + + test_cases = [( + lambda: fake_script(self, 'relation-list', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_list(3), + ops.model.ModelError, + [['relation-list', '-r', '3', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-list', 'echo {} >&2 ; exit 2'.format(err_msg)), + lambda: self.backend.relation_list(3), + ops.model.RelationNotFoundError, + [['relation-list', '-r', '3', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-set', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.model.ModelError, + [['relation-set', '-r', '3', 'foo=bar']], + ), ( + lambda: fake_script(self, 'relation-set', 'echo {} >&2 ; exit 2'.format(err_msg)), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.model.RelationNotFoundError, + [['relation-set', '-r', '3', 'foo=bar']], + ), ( + lambda: None, + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=True), + ops.model.RelationNotFoundError, + [['relation-set', '-r', '3', 'foo=bar', '--app']], + ), ( + lambda: fake_script(self, 'relation-get', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.model.ModelError, + [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-get', 'echo {} >&2 ; exit 2'.format(err_msg)), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.model.RelationNotFoundError, + [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], + ), ( + lambda: None, + lambda: self.backend.relation_get(3, 'remote/0', is_app=True), + ops.model.RelationNotFoundError, + [['relation-get', '-r', '3', '-', 'remote/0', '--app', '--format=json']], + )] + + for i, (do_fake, run, exception, calls) in enumerate(test_cases): + with self.subTest(i): + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_relation_get_juju_version_quirks(self): + self.addCleanup(os.environ.pop, 'JUJU_VERSION', None) + + fake_script(self, 'relation-get', '''echo '{"foo": "bar"}' ''') + + # on 2.7.0+, things proceed as expected + for v in ['2.8.0', '2.7.0']: + with self.subTest(v): + os.environ['JUJU_VERSION'] = v + rel_data = self.backend.relation_get(1, 'foo/0', is_app=True) + self.assertEqual(rel_data, {"foo": "bar"}) + calls = [' '.join(i) for i in fake_script_calls(self, clear=True)] + self.assertEqual(calls, ['relation-get -r 1 - foo/0 --app --format=json']) + + # before 2.7.0, it just fails (no --app support) + os.environ['JUJU_VERSION'] = '2.6.9' + with self.assertRaisesRegex(RuntimeError, 'not supported on Juju version 2.6.9'): + self.backend.relation_get(1, 'foo/0', is_app=True) + self.assertEqual(fake_script_calls(self), []) + + def test_relation_set_juju_version_quirks(self): + self.addCleanup(os.environ.pop, 'JUJU_VERSION', None) + + fake_script(self, 'relation-set', 'exit 0') + + # on 2.7.0+, things proceed as expected + for v in ['2.8.0', '2.7.0']: + with self.subTest(v): + os.environ['JUJU_VERSION'] = v + self.backend.relation_set(1, 'foo', 'bar', is_app=True) + calls = [' '.join(i) for i in fake_script_calls(self, clear=True)] + self.assertEqual(calls, ['relation-set -r 1 foo=bar --app']) + + # before 2.7.0, it just fails always (no --app support) + os.environ['JUJU_VERSION'] = '2.6.9' + with self.assertRaisesRegex(RuntimeError, 'not supported on Juju version 2.6.9'): + self.backend.relation_set(1, 'foo', 'bar', is_app=True) + self.assertEqual(fake_script_calls(self), []) + + def test_status_get(self): + # taken from actual Juju output + content = '{"message": "", "status": "unknown", "status-data": {}}' + fake_script(self, 'status-get', "echo '{}'".format(content)) + s = self.backend.status_get(is_app=False) + self.assertEqual(s['status'], "unknown") + self.assertEqual(s['message'], "") + # taken from actual Juju output + content = dedent(""" + { + "application-status": { + "message": "installing", + "status": "maintenance", + "status-data": {}, + "units": { + "uo/0": { + "message": "", + "status": "active", + "status-data": {} + } + } + } + } + """) + fake_script(self, 'status-get', "echo '{}'".format(content)) + s = self.backend.status_get(is_app=True) + self.assertEqual(s['status'], "maintenance") + self.assertEqual(s['message'], "installing") + self.assertEqual(fake_script_calls(self, clear=True), [ + ['status-get', '--include-data', '--application=False', '--format=json'], + ['status-get', '--include-data', '--application=True', '--format=json'], + ]) + + def test_status_is_app_forced_kwargs(self): + fake_script(self, 'status-get', 'exit 1') + fake_script(self, 'status-set', 'exit 1') + + test_cases = ( + lambda: self.backend.status_get(False), + lambda: self.backend.status_get(True), + lambda: self.backend.status_set('active', '', False), + lambda: self.backend.status_set('active', '', True), + ) + + for case in test_cases: + with self.assertRaises(TypeError): + case() + + def test_local_set_invalid_status(self): + # juju return exit code 1 if you ask to set status to 'unknown' + meta = ops.charm.CharmMeta.from_yaml(''' + name: myapp + ''') + model = ops.model.Model(meta, self.backend) + fake_script(self, 'status-set', 'exit 1') + fake_script(self, 'is-leader', 'echo true') + + with self.assertRaises(ops.model.ModelError): + model.unit.status = ops.model.UnknownStatus() + + self.assertEqual(fake_script_calls(self, True), [ + ['status-set', '--application=False', 'unknown', ''], + ]) + + with self.assertRaises(ops.model.ModelError): + model.app.status = ops.model.UnknownStatus() + + # A leadership check is needed for application status. + self.assertEqual(fake_script_calls(self, True), [ + ['is-leader', '--format=json'], + ['status-set', '--application=True', 'unknown', ''], + ]) + + def test_status_set_is_app_not_bool_raises(self): + for is_app_v in [None, 1, 2.0, 'a', b'beef', object]: + with self.assertRaises(TypeError): + self.backend.status_set(ops.model.ActiveStatus, is_app=is_app_v) + + def test_storage_tool_errors(self): + test_cases = [( + lambda: fake_script(self, 'storage-list', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_list('foobar'), + ops.model.ModelError, + [['storage-list', 'foobar', '--format=json']], + ), ( + lambda: fake_script(self, 'storage-get', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_get('foobar', 'someattr'), + ops.model.ModelError, + [['storage-get', '-s', 'foobar', 'someattr', '--format=json']], + ), ( + lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=2), + ops.model.ModelError, + [['storage-add', 'foobar=2']], + ), ( + lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=object), + TypeError, + [], + ), ( + lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=True), + TypeError, + [], + )] + for do_fake, run, exception, calls in test_cases: + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_network_get(self): + network_get_out = '''{ + "bind-addresses": [ + { + "mac-address": "", + "interface-name": "", + "addresses": [ + { + "hostname": "", + "value": "192.0.2.2", + "cidr": "" + } + ] + } + ], + "egress-subnets": [ + "192.0.2.2/32" + ], + "ingress-addresses": [ + "192.0.2.2" + ] +}''' + fake_script(self, 'network-get', + '''[ "$1" = deadbeef ] && echo '{}' || exit 1'''.format(network_get_out)) + network_info = self.backend.network_get('deadbeef') + self.assertEqual(network_info, json.loads(network_get_out)) + self.assertEqual(fake_script_calls(self, clear=True), + [['network-get', 'deadbeef', '--format=json']]) + + network_info = self.backend.network_get('deadbeef', 1) + self.assertEqual(network_info, json.loads(network_get_out)) + self.assertEqual(fake_script_calls(self, clear=True), + [['network-get', 'deadbeef', '-r', '1', '--format=json']]) + + def test_network_get_errors(self): + err_no_endpoint = 'ERROR no network config found for binding "$2"' + err_no_rel = 'ERROR invalid value "$3" for option -r: relation not found' + + test_cases = [( + lambda: fake_script(self, 'network-get', + 'echo {} >&2 ; exit 1'.format(err_no_endpoint)), + lambda: self.backend.network_get("deadbeef"), + ops.model.ModelError, + [['network-get', 'deadbeef', '--format=json']], + ), ( + lambda: fake_script(self, 'network-get', 'echo {} >&2 ; exit 2'.format(err_no_rel)), + lambda: self.backend.network_get("deadbeef", 3), + ops.model.RelationNotFoundError, + [['network-get', 'deadbeef', '-r', '3', '--format=json']], + )] + for do_fake, run, exception, calls in test_cases: + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_get_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-get', 'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_get() + calls = [['action-get', '--format=json']] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_set_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-set', 'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_set(OrderedDict([('foo', 'bar'), ('dead', 'beef cafe')])) + calls = [["action-set", "foo=bar", "dead=beef cafe"]] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_log_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-log', 'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_log('log-message') + calls = [["action-log", "log-message"]] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_get(self): + fake_script(self, 'action-get', """echo '{"foo-name": "bar", "silent": false}'""") + params = self.backend.action_get() + self.assertEqual(params['foo-name'], 'bar') + self.assertEqual(params['silent'], False) + self.assertEqual(fake_script_calls(self), [['action-get', '--format=json']]) + + def test_action_set(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-set', 'exit 0') + self.backend.action_set(OrderedDict([('x', 'dead beef'), ('y', 1)])) + self.assertEqual(fake_script_calls(self), [['action-set', 'x=dead beef', 'y=1']]) + + def test_action_fail(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-fail', 'exit 0') + self.backend.action_fail('error 42') + self.assertEqual(fake_script_calls(self), [['action-fail', 'error 42']]) + + def test_action_log(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-log', 'exit 0') + self.backend.action_log('progress: 42%') + self.assertEqual(fake_script_calls(self), [['action-log', 'progress: 42%']]) + + def test_application_version_set(self): + fake_script(self, 'application-version-set', 'exit 0') + self.backend.application_version_set('1.2b3') + self.assertEqual(fake_script_calls(self), [['application-version-set', '--', '1.2b3']]) + + def test_application_version_set_invalid(self): + fake_script(self, 'application-version-set', 'exit 0') + with self.assertRaises(TypeError): + self.backend.application_version_set(2) + with self.assertRaises(TypeError): + self.backend.application_version_set() + self.assertEqual(fake_script_calls(self), []) + + def test_juju_log(self): + fake_script(self, 'juju-log', 'exit 0') + self.backend.juju_log('WARNING', 'foo') + self.assertEqual(fake_script_calls(self, clear=True), + [['juju-log', '--log-level', 'WARNING', 'foo']]) + + with self.assertRaises(TypeError): + self.backend.juju_log('DEBUG') + self.assertEqual(fake_script_calls(self, clear=True), []) + + fake_script(self, 'juju-log', 'exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.juju_log('BAR', 'foo') + self.assertEqual(fake_script_calls(self, clear=True), + [['juju-log', '--log-level', 'BAR', 'foo']]) + + def test_valid_metrics(self): + fake_script(self, 'add-metric', 'exit 0') + test_cases = [( + OrderedDict([('foo', 42), ('b-ar', 4.5), ('ba_-z', 4.5), ('a', 1)]), + OrderedDict([('de', 'ad'), ('be', 'ef_ -')]), + [['add-metric', '--labels', 'de=ad,be=ef_ -', + 'foo=42', 'b-ar=4.5', 'ba_-z=4.5', 'a=1']] + ), ( + OrderedDict([('foo1', 0), ('b2r', 4.5)]), + OrderedDict([('d3', 'aд'), ('b33f', '3_ -')]), + [['add-metric', '--labels', 'd3=aд,b33f=3_ -', 'foo1=0', 'b2r=4.5']], + )] + for metrics, labels, expected_calls in test_cases: + self.backend.add_metrics(metrics, labels) + self.assertEqual(fake_script_calls(self, clear=True), expected_calls) + + def test_invalid_metric_names(self): + invalid_inputs = [ + ({'': 4.2}, {}), + ({'1': 4.2}, {}), + ({'1': -4.2}, {}), + ({'123': 4.2}, {}), + ({'1foo': 4.2}, {}), + ({'-foo': 4.2}, {}), + ({'_foo': 4.2}, {}), + ({'foo-': 4.2}, {}), + ({'foo_': 4.2}, {}), + ({'a-': 4.2}, {}), + ({'a_': 4.2}, {}), + ({'BAЯ': 4.2}, {}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + def test_invalid_metric_values(self): + invalid_inputs = [ + ({'a': float('+inf')}, {}), + ({'a': float('-inf')}, {}), + ({'a': float('nan')}, {}), + ({'foo': 'bar'}, {}), + ({'foo': '1O'}, {}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + def test_invalid_metric_labels(self): + invalid_inputs = [ + ({'foo': 4.2}, {'': 'baz'}), + ({'foo': 4.2}, {',bar': 'baz'}), + ({'foo': 4.2}, {'b=a=r': 'baz'}), + ({'foo': 4.2}, {'BAЯ': 'baz'}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + def test_invalid_metric_label_values(self): + invalid_inputs = [ + ({'foo': 4.2}, {'bar': ''}), + ({'foo': 4.2}, {'bar': 'b,az'}), + ({'foo': 4.2}, {'bar': 'b=az'}), + ] + for metrics, labels in invalid_inputs: + with self.assertRaises(ops.model.ModelError): + self.backend.add_metrics(metrics, labels) + + +class TestLazyMapping(unittest.TestCase): + + def test_invalidate(self): + loaded = [] + + class MyLazyMap(ops.model.LazyMapping): + def _load(self): + loaded.append(1) + return {'foo': 'bar'} + + map = MyLazyMap() + self.assertEqual(map['foo'], 'bar') + self.assertEqual(loaded, [1]) + self.assertEqual(map['foo'], 'bar') + self.assertEqual(loaded, [1]) + map._invalidate() + self.assertEqual(map['foo'], 'bar') + self.assertEqual(loaded, [1, 1]) + + +if __name__ == "__main__": + unittest.main() diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_storage.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_storage.py new file mode 100755 index 0000000000000000000000000000000000000000..3410999bd5f045cd3d65396cc5be546a3c4a94e0 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_storage.py @@ -0,0 +1,412 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import gc +import io +import os +import pathlib +import sys +import tempfile +from textwrap import dedent + +import yaml + +from ops import ( + framework, + storage, +) +from test.test_helpers import ( + BaseTestCase, + fake_script, + fake_script_calls, +) + + +class StoragePermutations(abc.ABC): + + def create_framework(self) -> framework.Framework: + """Create a Framework that we can use to test the backend storage. + """ + return framework.Framework(self.create_storage(), None, None, None) + + @abc.abstractmethod + def create_storage(self) -> storage.SQLiteStorage: + """Create a Storage backend that we can interact with""" + return NotImplemented + + def test_save_and_load_snapshot(self): + f = self.create_framework() + + class Sample(framework.Object): + + def __init__(self, parent, key, content): + super().__init__(parent, key) + self.content = content + + def snapshot(self): + return {'content': self.content} + + def restore(self, snapshot): + self.__dict__.update(snapshot) + + f.register_type(Sample, None, Sample.handle_kind) + data = { + 'str': 'string', + 'bytes': b'bytes', + 'int': 1, + 'float': 3.0, + 'dict': {'a': 'b'}, + 'set': {'a', 'b'}, + 'list': [1, 2], + } + s = Sample(f, 'test', data) + handle = s.handle + f.save_snapshot(s) + del s + gc.collect() + res = f.load_snapshot(handle) + self.assertEqual(data, res.content) + + def test_emit_event(self): + f = self.create_framework() + + class Evt(framework.EventBase): + def __init__(self, handle, content): + super().__init__(handle) + self.content = content + + def snapshot(self): + return self.content + + def restore(self, content): + self.content = content + + class Events(framework.ObjectEvents): + event = framework.EventSource(Evt) + + class Sample(framework.Object): + + on = Events() + + def __init__(self, parent, key): + super().__init__(parent, key) + self.observed_content = None + self.framework.observe(self.on.event, self._on_event) + + def _on_event(self, event: Evt): + self.observed_content = event.content + + s = Sample(f, 'key') + f.register_type(Sample, None, Sample.handle_kind) + s.on.event.emit('foo') + self.assertEqual('foo', s.observed_content) + s.on.event.emit(1) + self.assertEqual(1, s.observed_content) + s.on.event.emit(None) + self.assertEqual(None, s.observed_content) + + def test_save_and_overwrite_snapshot(self): + store = self.create_storage() + store.save_snapshot('foo', {1: 2}) + self.assertEqual({1: 2}, store.load_snapshot('foo')) + store.save_snapshot('foo', {'three': 4}) + self.assertEqual({'three': 4}, store.load_snapshot('foo')) + + def test_drop_snapshot(self): + store = self.create_storage() + store.save_snapshot('foo', {1: 2}) + self.assertEqual({1: 2}, store.load_snapshot('foo')) + store.drop_snapshot('foo') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('foo') + + def test_save_snapshot_empty_string(self): + store = self.create_storage() + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('foo') + store.save_snapshot('foo', '') + self.assertEqual('', store.load_snapshot('foo')) + store.drop_snapshot('foo') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('foo') + + def test_save_snapshot_none(self): + store = self.create_storage() + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('bar') + store.save_snapshot('bar', None) + self.assertEqual(None, store.load_snapshot('bar')) + store.drop_snapshot('bar') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('bar') + + def test_save_snapshot_zero(self): + store = self.create_storage() + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('zero') + store.save_snapshot('zero', 0) + self.assertEqual(0, store.load_snapshot('zero')) + store.drop_snapshot('zero') + with self.assertRaises(storage.NoSnapshotError): + store.load_snapshot('zero') + + def test_save_notice(self): + store = self.create_storage() + store.save_notice('event', 'observer', 'method') + self.assertEqual( + list(store.notices('event')), + [('event', 'observer', 'method')]) + + def test_load_notices(self): + store = self.create_storage() + self.assertEqual(list(store.notices('path')), []) + + def test_save_one_load_another_notice(self): + store = self.create_storage() + store.save_notice('event', 'observer', 'method') + self.assertEqual(list(store.notices('other')), []) + + def test_save_load_drop_load_notices(self): + store = self.create_storage() + store.save_notice('event', 'observer', 'method') + store.save_notice('event', 'observer', 'method2') + self.assertEqual( + list(store.notices('event')), + [('event', 'observer', 'method'), + ('event', 'observer', 'method2'), + ]) + + +class TestSQLiteStorage(StoragePermutations, BaseTestCase): + + def create_storage(self): + return storage.SQLiteStorage(':memory:') + + +def setup_juju_backend(test_case, state_file): + """Create fake scripts for pretending to be state-set and state-get""" + template_args = { + 'executable': sys.executable, + 'pthpth': os.path.dirname(pathlib.__file__), + 'state_file': str(state_file), + } + + fake_script(test_case, 'state-set', dedent('''\ + {executable} -c ' + import sys + if "{pthpth}" not in sys.path: + sys.path.append("{pthpth}") + import sys, yaml, pathlib, pickle + assert sys.argv[1:] == ["--file", "-"] + request = yaml.load(sys.stdin, Loader=getattr(yaml, "CSafeLoader", yaml.SafeLoader)) + state_file = pathlib.Path("{state_file}") + if state_file.exists() and state_file.stat().st_size > 0: + with state_file.open("rb") as f: + state = pickle.load(f) + else: + state = {{}} + for k, v in request.items(): + state[k] = v + with state_file.open("wb") as f: + pickle.dump(state, f) + ' "$@" + ''').format(**template_args)) + + fake_script(test_case, 'state-get', dedent('''\ + {executable} -Sc ' + import sys + if "{pthpth}" not in sys.path: + sys.path.append("{pthpth}") + import sys, pathlib, pickle + assert len(sys.argv) == 2 + state_file = pathlib.Path("{state_file}") + if state_file.exists() and state_file.stat().st_size > 0: + with state_file.open("rb") as f: + state = pickle.load(f) + else: + state = {{}} + result = state.get(sys.argv[1], "\\n") + sys.stdout.write(result) + ' "$@" + ''').format(**template_args)) + + fake_script(test_case, 'state-delete', dedent('''\ + {executable} -Sc ' + import sys + if "{pthpth}" not in sys.path: + sys.path.append("{pthpth}") + import sys, pathlib, pickle + assert len(sys.argv) == 2 + state_file = pathlib.Path("{state_file}") + if state_file.exists() and state_file.stat().st_size > 0: + with state_file.open("rb") as f: + state = pickle.load(f) + else: + state = {{}} + state.pop(sys.argv[1], None) + with state_file.open("wb") as f: + pickle.dump(state, f) + ' "$@" + ''').format(**template_args)) + + +class TestJujuStorage(StoragePermutations, BaseTestCase): + + def create_storage(self): + state_file = pathlib.Path(tempfile.mkstemp(prefix='tmp-ops-test-state-')[1]) + self.addCleanup(state_file.unlink) + setup_juju_backend(self, state_file) + return storage.JujuStorage() + + +class TestSimpleLoader(BaseTestCase): + + def test_is_c_loader(self): + loader = storage._SimpleLoader(io.StringIO('')) + if getattr(yaml, 'CSafeLoader', None) is not None: + self.assertIsInstance(loader, yaml.CSafeLoader) + else: + self.assertIsInstance(loader, yaml.SafeLoader) + + def test_is_c_dumper(self): + dumper = storage._SimpleDumper(io.StringIO('')) + if getattr(yaml, 'CSafeDumper', None) is not None: + self.assertIsInstance(dumper, yaml.CSafeDumper) + else: + self.assertIsInstance(dumper, yaml.SafeDumper) + + def test_handles_tuples(self): + raw = yaml.dump((1, 'tuple'), Dumper=storage._SimpleDumper) + parsed = yaml.load(raw, Loader=storage._SimpleLoader) + self.assertEqual(parsed, (1, 'tuple')) + + def assertRefused(self, obj): + # We shouldn't allow them to be written + with self.assertRaises(yaml.representer.RepresenterError): + yaml.dump(obj, Dumper=storage._SimpleDumper) + # If they did somehow end up written, we shouldn't be able to load them + raw = yaml.dump(obj, Dumper=yaml.Dumper) + with self.assertRaises(yaml.constructor.ConstructorError): + yaml.load(raw, Loader=storage._SimpleLoader) + + def test_forbids_some_types(self): + self.assertRefused(1 + 2j) + self.assertRefused({'foo': 1 + 2j}) + self.assertRefused(frozenset(['foo', 'bar'])) + self.assertRefused(bytearray(b'foo')) + self.assertRefused(object()) + + class Foo: + pass + f = Foo() + self.assertRefused(f) + + +class TestJujuStateBackend(BaseTestCase): + + def test_is_not_available(self): + self.assertFalse(storage._JujuStorageBackend.is_available()) + + def test_is_available(self): + fake_script(self, 'state-get', 'echo ""') + self.assertTrue(storage._JujuStorageBackend.is_available()) + self.assertEqual(fake_script_calls(self, clear=True), []) + + def test_set_encodes_args(self): + t = tempfile.NamedTemporaryFile() + fake_script(self, 'state-set', dedent(""" + cat >> {} + """).format(t.name)) + backend = storage._JujuStorageBackend() + backend.set('key', {'foo': 2}) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['state-set', '--file', '-'], + ]) + t.seek(0) + content = t.read() + self.assertEqual(content.decode('utf-8'), dedent("""\ + "key": | + {foo: 2} + """)) + + def test_get(self): + fake_script(self, 'state-get', dedent(""" + echo 'foo: "bar"' + """)) + backend = storage._JujuStorageBackend() + value = backend.get('key') + self.assertEqual(value, {'foo': 'bar'}) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['state-get', 'key'], + ]) + + def test_set_and_get_complex_value(self): + t = tempfile.NamedTemporaryFile() + fake_script(self, 'state-set', dedent(""" + cat >> {} + """).format(t.name)) + backend = storage._JujuStorageBackend() + complex_val = { + 'foo': 2, + 3: [1, 2, '3'], + 'four': {2, 3}, + 'five': {'a': 2, 'b': 3.0}, + 'six': ('a', 'b'), + 'seven': b'1234', + } + backend.set('Class[foo]/_stored', complex_val) + self.assertEqual(fake_script_calls(self, clear=True), [ + ['state-set', '--file', '-'], + ]) + t.seek(0) + content = t.read() + outer = yaml.safe_load(content) + key = 'Class[foo]/_stored' + self.assertEqual(list(outer.keys()), [key]) + inner = yaml.load(outer[key], Loader=storage._SimpleLoader) + self.assertEqual(complex_val, inner) + if sys.version_info >= (3, 6): + # In Python 3.5 dicts are not ordered by default, and PyYAML only + # iterates the dict. So we read and assert the content is valid, + # but we don't assert the serialized form. + self.assertEqual(content.decode('utf-8'), dedent("""\ + "Class[foo]/_stored": | + foo: 2 + 3: [1, 2, '3'] + four: !!set {2: null, 3: null} + five: {a: 2, b: 3.0} + six: !!python/tuple [a, b] + seven: !!binary | + MTIzNA== + """)) + # Note that the content is yaml in a string, embedded inside YAML to declare the Key: + # Value of where to store the entry. + fake_script(self, 'state-get', dedent(""" + echo "foo: 2 + 3: [1, 2, '3'] + four: !!set {2: null, 3: null} + five: {a: 2, b: 3.0} + six: !!python/tuple [a, b] + seven: !!binary | + MTIzNA== + " + """)) + out = backend.get('Class[foo]/_stored') + self.assertEqual(out, complex_val) + + # TODO: Add tests for things we don't want to support. eg, YAML that has custom types should + # be properly forbidden. + # TODO: Tests for state-set/get/delete and how they handle if you ask to delete something + # that doesn't exist, or get something that doesn't exist, etc. diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_testing.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..8f9e4eb17f7a1f746834357987976343e216ee2d --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/mod/operator/test/test_testing.py @@ -0,0 +1,931 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import pathlib +import shutil +import sys +import tempfile +import textwrap +import unittest +import yaml + +from ops.charm import ( + CharmBase, + RelationEvent, +) +from ops.framework import ( + Object, +) +from ops.model import ( + ActiveStatus, + MaintenanceStatus, + UnknownStatus, + ModelError, + RelationNotFoundError, +) +from ops.testing import Harness + + +class TestHarness(unittest.TestCase): + + def test_add_relation(self): + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + self.assertIsInstance(rel_id, int) + backend = harness._backend + self.assertEqual(backend.relation_ids('db'), [rel_id]) + self.assertEqual(backend.relation_list(rel_id), []) + # Make sure the initial data bags for our app and unit are empty. + self.assertEqual(backend.relation_get(rel_id, 'test-app', is_app=True), {}) + self.assertEqual(backend.relation_get(rel_id, 'test-app/0', is_app=False), {}) + + def test_add_relation_and_unit(self): + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + self.assertIsInstance(rel_id, int) + harness.add_relation_unit(rel_id, 'postgresql/0') + harness.update_relation_data(rel_id, 'postgresql/0', {'foo': 'bar'}) + backend = harness._backend + self.assertEqual(backend.relation_ids('db'), [rel_id]) + self.assertEqual(backend.relation_list(rel_id), ['postgresql/0']) + self.assertEqual( + backend.relation_get(rel_id, 'postgresql/0', is_app=False), + {'foo': 'bar'}) + + def test_add_relation_with_remote_app_data(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + remote_app = 'postgresql' + rel_id = harness.add_relation('db', remote_app) + harness.update_relation_data(rel_id, 'postgresql', {'app': 'data'}) + self.assertIsInstance(rel_id, int) + backend = harness._backend + self.assertEqual([rel_id], backend.relation_ids('db')) + self.assertEqual({'app': 'data'}, backend.relation_get(rel_id, remote_app, is_app=True)) + + def test_add_relation_with_our_initial_data(self): + + class InitialDataTester(CharmBase): + """Record the relation-changed events.""" + + def __init__(self, framework): + super().__init__(framework) + self.observed_events = [] + self.framework.observe(self.on.db_relation_changed, self._on_db_relation_changed) + + def _on_db_relation_changed(self, event): + self.observed_events.append(event) + + # language=YAML + harness = Harness(InitialDataTester, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + harness.update_relation_data(rel_id, 'test-app', {'k': 'v1'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'}) + backend = harness._backend + self.assertEqual({'k': 'v1'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + + harness.begin() + self.assertEqual({'k': 'v1'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + # Make sure no relation-changed events are emitted for our own data bags. + self.assertEqual([], harness.charm.observed_events) + + # A remote unit can still update our app relation data bag since our unit is not a leader. + harness.update_relation_data(rel_id, 'test-app', {'k': 'v2'}) + # And we get an event + self.assertEqual([], harness.charm.observed_events) + # We can also update our own relation data, even if it is a bit 'cheaty' + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'}) + # But no event happens + + # Updating our data app relation data bag and our unit data bag does not generate events. + harness.set_leader(True) + harness.update_relation_data(rel_id, 'test-app', {'k': 'v3'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'}) + self.assertEqual([], harness.charm.observed_events) + + def test_add_peer_relation_with_initial_data_leader(self): + + class InitialDataTester(CharmBase): + """Record the relation-changed events.""" + + def __init__(self, framework): + super().__init__(framework) + self.observed_events = [] + self.framework.observe(self.on.cluster_relation_changed, + self._on_cluster_relation_changed) + + def _on_cluster_relation_changed(self, event): + self.observed_events.append(event) + + # language=YAML + harness = Harness(InitialDataTester, meta=''' + name: test-app + peers: + cluster: + interface: cluster + ''') + # TODO: dmitriis 2020-04-07 test a minion unit and initial peer relation app data + # events when the harness begins to emit events for initial data. + harness.set_leader(is_leader=True) + rel_id = harness.add_relation('cluster', 'test-app') + harness.update_relation_data(rel_id, 'test-app', {'k': 'v'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'}) + backend = harness._backend + self.assertEqual({'k': 'v'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + + harness.begin() + self.assertEqual({'k': 'v'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertEqual({'ingress-address': '192.0.2.1'}, + backend.relation_get(rel_id, 'test-app/0', is_app=False)) + # Make sure no relation-changed events are emitted for our own data bags. + self.assertEqual([], harness.charm.observed_events) + + # Updating our app relation data bag and our unit data bag does not trigger events + harness.update_relation_data(rel_id, 'test-app', {'k': 'v2'}) + harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'}) + self.assertEqual([], harness.charm.observed_events) + + # If our unit becomes a minion, updating app relation data indirectly becomes possible + # and our charm gets notifications. + harness.set_leader(False) + harness.update_relation_data(rel_id, 'test-app', {'k': 'v3'}) + self.assertEqual({'k': 'v3'}, backend.relation_get(rel_id, 'test-app', is_app=True)) + self.assertTrue(len(harness.charm.observed_events), 1) + self.assertIsInstance(harness.charm.observed_events[0], RelationEvent) + + def test_relation_events(self): + harness = Harness(RelationEventCharm, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.charm.observe_relation_events('db') + self.assertEqual(harness.charm.get_changes(), []) + rel_id = harness.add_relation('db', 'postgresql') + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-created', + 'data': { + 'app': 'postgresql', + 'unit': None, + 'relation_id': rel_id, + }}]) + harness.add_relation_unit(rel_id, 'postgresql/0') + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-joined', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'relation_id': rel_id, + }}]) + harness.update_relation_data(rel_id, 'postgresql', {'foo': 'bar'}) + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-changed', + 'data': { + 'app': 'postgresql', + 'unit': None, + 'relation_id': rel_id, + }}]) + harness.update_relation_data(rel_id, 'postgresql/0', {'baz': 'bing'}) + self.assertEqual( + harness.charm.get_changes(), + [{'name': 'relation-changed', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'relation_id': rel_id, + }}]) + + def test_get_relation_data(self): + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + rel_id = harness.add_relation('db', 'postgresql') + harness.update_relation_data(rel_id, 'postgresql', {'remote': 'data'}) + self.assertEqual(harness.get_relation_data(rel_id, 'test-app'), {}) + self.assertEqual(harness.get_relation_data(rel_id, 'test-app/0'), {}) + self.assertEqual(harness.get_relation_data(rel_id, 'test-app/1'), None) + self.assertEqual(harness.get_relation_data(rel_id, 'postgresql'), {'remote': 'data'}) + with self.assertRaises(KeyError): + # unknown relation id + harness.get_relation_data(99, 'postgresql') + + def test_create_harness_twice(self): + metadata = ''' + name: my-charm + requires: + db: + interface: pgsql + ''' + harness1 = Harness(CharmBase, meta=metadata) + harness2 = Harness(CharmBase, meta=metadata) + harness1.begin() + harness2.begin() + helper1 = DBRelationChangedHelper(harness1.charm, "helper1") + helper2 = DBRelationChangedHelper(harness2.charm, "helper2") + rel_id = harness2.add_relation('db', 'postgresql') + harness2.update_relation_data(rel_id, 'postgresql', {'key': 'value'}) + # Helper2 should see the event triggered by harness2, but helper1 should see no events. + self.assertEqual(helper1.changes, []) + self.assertEqual(helper2.changes, [(rel_id, 'postgresql')]) + + def test_begin_twice(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: test-app + requires: + db: + interface: pgsql + ''') + harness.begin() + with self.assertRaises(RuntimeError): + harness.begin() + + def test_update_relation_exposes_new_data(self): + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + viewer = RelationChangedViewer(harness.charm, 'db') + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + harness.update_relation_data(rel_id, 'postgresql/0', {'initial': 'data'}) + self.assertEqual(viewer.changes, [{'initial': 'data'}]) + harness.update_relation_data(rel_id, 'postgresql/0', {'new': 'value'}) + self.assertEqual(viewer.changes, [{'initial': 'data'}, + {'initial': 'data', 'new': 'value'}]) + + def test_update_relation_no_local_unit_change_event(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + rel = harness.charm.model.get_relation('db') + rel.data[harness.charm.model.unit]['key'] = 'value' + # there should be no event for updating our own data + harness.update_relation_data(rel_id, 'my-charm/0', {'new': 'other'}) + # but the data will be updated. + self.assertEqual({'key': 'value', 'new': 'other'}, rel.data[harness.charm.model.unit]) + + rel.data[harness.charm.model.unit]['new'] = 'value' + # Our unit data bag got updated. + self.assertEqual(rel.data[harness.charm.model.unit]['new'], 'value') + # But there were no changed events registered by our unit. + self.assertEqual([], helper.changes) + + def test_update_peer_relation_no_local_unit_change_event(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: postgresql + peers: + db: + interface: pgsql + ''') + harness.begin() + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + + rel = harness.charm.model.get_relation('db') + rel.data[harness.charm.model.unit]['key'] = 'value' + rel = harness.charm.model.get_relation('db') + harness.update_relation_data(rel_id, 'postgresql/0', {'key': 'v1'}) + self.assertEqual({'key': 'v1'}, rel.data[harness.charm.model.unit]) + # Make sure there was no event + self.assertEqual([], helper.changes) + + rel.data[harness.charm.model.unit]['key'] = 'v2' + # Our unit data bag got updated. + self.assertEqual({'key': 'v2'}, dict(rel.data[harness.charm.model.unit])) + # But there were no changed events registered by our unit. + self.assertEqual([], helper.changes) + + # Same for when our unit is a leader. + harness.set_leader(is_leader=True) + harness.update_relation_data(rel_id, 'postgresql/0', {'key': 'v3'}) + self.assertEqual({'key': 'v3'}, dict(rel.data[harness.charm.model.unit])) + self.assertEqual([], helper.changes) + + rel.data[harness.charm.model.unit]['key'] = 'v4' + self.assertEqual(rel.data[harness.charm.model.unit]['key'], 'v4') + self.assertEqual([], helper.changes) + + def test_update_peer_relation_app_data(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: postgresql + peers: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(is_leader=True) + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + rel = harness.charm.model.get_relation('db') + rel.data[harness.charm.app]['key'] = 'value' + harness.update_relation_data(rel_id, 'postgresql', {'key': 'v1'}) + self.assertEqual({'key': 'v1'}, rel.data[harness.charm.app]) + self.assertEqual([], helper.changes) + + rel.data[harness.charm.app]['key'] = 'v2' + # Our unit data bag got updated. + self.assertEqual(rel.data[harness.charm.model.app]['key'], 'v2') + # But there were no changed events registered by our unit. + self.assertEqual([], helper.changes) + + # If our unit is not a leader unit we get an update about peer app relation data changes. + harness.set_leader(is_leader=False) + harness.update_relation_data(rel_id, 'postgresql', {'k2': 'v2'}) + self.assertEqual(rel.data[harness.charm.model.app]['k2'], 'v2') + self.assertEqual(helper.changes, [(0, 'postgresql')]) + + def test_update_relation_no_local_app_change_event(self): + # language=YAML + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(False) + helper = DBRelationChangedHelper(harness.charm, "helper") + rel_id = harness.add_relation('db', 'postgresql') + # TODO: remove this as soon as https://github.com/canonical/operator/issues/175 is fixed. + harness.add_relation_unit(rel_id, 'postgresql/0') + self.assertEqual(helper.changes, []) + + harness.update_relation_data(rel_id, 'my-charm', {'new': 'value'}) + rel = harness.charm.model.get_relation('db') + self.assertEqual(rel.data[harness.charm.app]['new'], 'value') + + # Our app data bag got updated. + self.assertEqual(rel.data[harness.charm.model.app]['new'], 'value') + # But there were no changed events registered by our unit. + self.assertEqual(helper.changes, []) + + def test_update_relation_remove_data(self): + harness = Harness(CharmBase, meta=''' + name: my-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + viewer = RelationChangedViewer(harness.charm, 'db') + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + harness.update_relation_data(rel_id, 'postgresql/0', {'initial': 'data'}) + harness.update_relation_data(rel_id, 'postgresql/0', {'initial': ''}) + self.assertEqual(viewer.changes, [{'initial': 'data'}, {}]) + + def test_update_config(self): + harness = Harness(RecordingCharm) + harness.begin() + harness.update_config(key_values={'a': 'foo', 'b': 2}) + self.assertEqual( + harness.charm.changes, + [{'name': 'config', 'data': {'a': 'foo', 'b': 2}}]) + harness.update_config(key_values={'b': 3}) + self.assertEqual( + harness.charm.changes, + [{'name': 'config', 'data': {'a': 'foo', 'b': 2}}, + {'name': 'config', 'data': {'a': 'foo', 'b': 3}}]) + # you can set config values to the empty string, you can use unset to actually remove items + harness.update_config(key_values={'a': ''}, unset=set('b')) + self.assertEqual( + harness.charm.changes, + [{'name': 'config', 'data': {'a': 'foo', 'b': 2}}, + {'name': 'config', 'data': {'a': 'foo', 'b': 3}}, + {'name': 'config', 'data': {'a': ''}}, + ]) + + def test_set_leader(self): + harness = Harness(RecordingCharm) + # No event happens here + harness.set_leader(False) + harness.begin() + self.assertFalse(harness.charm.model.unit.is_leader()) + harness.set_leader(True) + self.assertEqual(harness.charm.get_changes(reset=True), [{'name': 'leader-elected'}]) + self.assertTrue(harness.charm.model.unit.is_leader()) + harness.set_leader(False) + self.assertFalse(harness.charm.model.unit.is_leader()) + # No hook event when you lose leadership. + # TODO: verify if Juju always triggers `leader-settings-changed` if you + # lose leadership. + self.assertEqual(harness.charm.get_changes(reset=True), []) + harness.disable_hooks() + harness.set_leader(True) + # No hook event if you have disabled them + self.assertEqual(harness.charm.get_changes(reset=True), []) + + def test_relation_set_app_not_leader(self): + harness = Harness(RecordingCharm, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(False) + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + rel = harness.charm.model.get_relation('db') + with self.assertRaises(ModelError): + rel.data[harness.charm.app]['foo'] = 'bar' + # The data has not actually been changed + self.assertEqual(harness.get_relation_data(rel_id, 'test-charm'), {}) + harness.set_leader(True) + rel.data[harness.charm.app]['foo'] = 'bar' + self.assertEqual(harness.get_relation_data(rel_id, 'test-charm'), {'foo': 'bar'}) + + def test_hooks_enabled_and_disabled(self): + harness = Harness(RecordingCharm, meta=''' + name: test-charm + ''') + # Before begin() there are no events. + harness.update_config({'value': 'first'}) + # By default, after begin the charm is set up to receive events. + harness.begin() + harness.update_config({'value': 'second'}) + self.assertEqual( + harness.charm.get_changes(reset=True), + [{'name': 'config', 'data': {'value': 'second'}}]) + # Once disabled, we won't see config-changed when we make an update + harness.disable_hooks() + harness.update_config({'third': '3'}) + self.assertEqual(harness.charm.get_changes(reset=True), []) + harness.enable_hooks() + harness.update_config({'value': 'fourth'}) + self.assertEqual( + harness.charm.get_changes(reset=True), + [{'name': 'config', 'data': {'value': 'fourth', 'third': '3'}}]) + + def test_metadata_from_directory(self): + tmp = pathlib.Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(tmp)) + metadata_filename = tmp / 'metadata.yaml' + with metadata_filename.open('wt') as metadata: + metadata.write(textwrap.dedent(''' + name: my-charm + requires: + db: + interface: pgsql + ''')) + harness = self._get_dummy_charm_harness(tmp) + harness.begin() + self.assertEqual(list(harness.model.relations), ['db']) + # The charm_dir also gets set + self.assertEqual(harness.framework.charm_dir, tmp) + + def test_set_model_name(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + harness.set_model_name('foo') + self.assertEqual('foo', harness.model.name) + + def test_set_model_name_after_begin(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + harness.set_model_name('bar') + harness.begin() + with self.assertRaises(RuntimeError): + harness.set_model_name('foo') + self.assertEqual(harness.model.name, 'bar') + + def test_actions_from_directory(self): + tmp = pathlib.Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, str(tmp)) + actions_filename = tmp / 'actions.yaml' + with actions_filename.open('wt') as actions: + actions.write(textwrap.dedent(''' + test: + description: a dummy action + ''')) + harness = self._get_dummy_charm_harness(tmp) + harness.begin() + self.assertEqual(list(harness.framework.meta.actions), ['test']) + # The charm_dir also gets set + self.assertEqual(harness.framework.charm_dir, tmp) + + def _get_dummy_charm_harness(self, tmp): + self._write_dummy_charm(tmp) + charm_mod = importlib.import_module('charm') + harness = Harness(charm_mod.MyTestingCharm) + return harness + + def _write_dummy_charm(self, tmp): + srcdir = tmp / 'src' + srcdir.mkdir(0o755) + charm_filename = srcdir / 'charm.py' + with charm_filename.open('wt') as charmpy: + # language=Python + charmpy.write(textwrap.dedent(''' + from ops.charm import CharmBase + class MyTestingCharm(CharmBase): + pass + ''')) + orig = sys.path[:] + sys.path.append(str(srcdir)) + + def cleanup(): + sys.path = orig + sys.modules.pop('charm') + + self.addCleanup(cleanup) + + def test_actions_passed_in(self): + harness = Harness( + CharmBase, + meta=''' + name: test-app + ''', + actions=''' + test-action: + description: a dummy test action + ''') + self.assertEqual(list(harness.framework.meta.actions), ['test-action']) + + def test_relation_set_deletes(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + harness.set_leader(False) + rel_id = harness.add_relation('db', 'postgresql') + harness.update_relation_data(rel_id, 'test-charm/0', {'foo': 'bar'}) + harness.add_relation_unit(rel_id, 'postgresql/0') + rel = harness.charm.model.get_relation('db', rel_id) + del rel.data[harness.charm.model.unit]['foo'] + self.assertEqual({}, harness.get_relation_data(rel_id, 'test-charm/0')) + + def test_set_workload_version(self): + harness = Harness(CharmBase, meta=''' + name: app + ''') + harness.begin() + self.assertIsNone(harness.get_workload_version()) + harness.charm.model.unit.set_workload_version('1.2.3') + self.assertEqual(harness.get_workload_version(), '1.2.3') + + def test_get_backend_calls(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + # No calls to the backend yet + self.assertEqual(harness._get_backend_calls(), []) + rel_id = harness.add_relation('db', 'postgresql') + # update_relation_data ensures the cached data for the relation is wiped + harness.update_relation_data(rel_id, 'test-charm/0', {'foo': 'bar'}) + self.assertEqual( + harness._get_backend_calls(reset=True), [ + ('relation_ids', 'db'), + ('relation_list', rel_id), + ]) + # add_relation_unit resets the relation_list, but doesn't trigger backend calls + harness.add_relation_unit(rel_id, 'postgresql/0') + self.assertEqual([], harness._get_backend_calls(reset=False)) + # however, update_relation_data does, because we are preparing relation-changed + harness.update_relation_data(rel_id, 'postgresql/0', {'foo': 'bar'}) + self.assertEqual( + harness._get_backend_calls(reset=False), [ + ('relation_ids', 'db'), + ('relation_list', rel_id), + ]) + # If we check again, they are still there, but now we reset it + self.assertEqual( + harness._get_backend_calls(reset=True), [ + ('relation_ids', 'db'), + ('relation_list', rel_id), + ]) + # And the calls are gone + self.assertEqual(harness._get_backend_calls(), []) + + def test_get_backend_calls_with_kwargs(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + requires: + db: + interface: pgsql + ''') + harness.begin() + unit = harness.charm.model.unit + # Reset the list, because we don't care what it took to get here + harness._get_backend_calls(reset=True) + unit.status = ActiveStatus() + self.assertEqual( + [('status_set', 'active', '', {'is_app': False})], harness._get_backend_calls()) + harness.set_leader(True) + app = harness.charm.model.app + harness._get_backend_calls(reset=True) + app.status = ActiveStatus('message') + self.assertEqual( + [('is_leader',), + ('status_set', 'active', 'message', {'is_app': True})], + harness._get_backend_calls()) + + def test_unit_status(self): + harness = Harness(CharmBase, meta='name: test-app') + harness.set_leader(True) + harness.begin() + # default status + self.assertEqual(harness.model.unit.status, MaintenanceStatus('')) + status = ActiveStatus('message') + harness.model.unit.status = status + self.assertEqual(harness.model.unit.status, status) + + def test_app_status(self): + harness = Harness(CharmBase, meta='name: test-app') + harness.set_leader(True) + harness.begin() + # default status + self.assertEqual(harness.model.app.status, UnknownStatus()) + status = ActiveStatus('message') + harness.model.app.status = status + self.assertEqual(harness.model.app.status, status) + + +class DBRelationChangedHelper(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.changes = [] + parent.framework.observe(parent.on.db_relation_changed, self.on_relation_changed) + + def on_relation_changed(self, event): + if event.unit is not None: + self.changes.append((event.relation.id, event.unit.name)) + else: + self.changes.append((event.relation.id, event.app.name)) + + +class RelationChangedViewer(Object): + """Track relation_changed events and saves the data seen in the relation bucket.""" + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.changes = [] + charm.framework.observe(charm.on[relation_name].relation_changed, self.on_relation_changed) + + def on_relation_changed(self, event): + if event.unit is not None: + data = event.relation.data[event.unit] + else: + data = event.relation.data[event.app] + self.changes.append(dict(data)) + + +class RecordingCharm(CharmBase): + """Record the events that we see, and any associated data.""" + + def __init__(self, framework): + super().__init__(framework) + self.changes = [] + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.leader_elected, self.on_leader_elected) + + def get_changes(self, reset=True): + changes = self.changes + if reset: + self.changes = [] + return changes + + def on_config_changed(self, _): + self.changes.append(dict(name='config', data=dict(self.framework.model.config))) + + def on_leader_elected(self, _): + self.changes.append(dict(name='leader-elected')) + + +class RelationEventCharm(RecordingCharm): + """Record events related to relation lifecycles.""" + + def __init__(self, framework): + super().__init__(framework) + + def observe_relation_events(self, relation_name): + self.framework.observe(self.on[relation_name].relation_created, self._on_relation_created) + self.framework.observe(self.on[relation_name].relation_joined, self._on_relation_joined) + self.framework.observe(self.on[relation_name].relation_changed, self._on_relation_changed) + self.framework.observe(self.on[relation_name].relation_departed, + self._on_relation_departed) + self.framework.observe(self.on[relation_name].relation_broken, self._on_relation_broken) + + def _on_relation_created(self, event): + self._observe_relation_event('relation-created', event) + + def _on_relation_joined(self, event): + self._observe_relation_event('relation-joined', event) + + def _on_relation_changed(self, event): + self._observe_relation_event('relation-changed', event) + + def _on_relation_departed(self, event): + self._observe_relation_event('relation-departed', event) + + def _on_relation_broken(self, event): + self._observe_relation_event('relation-broken', event) + + def _observe_relation_event(self, event_name, event): + unit_name = None + if event.unit is not None: + unit_name = event.unit.name + app_name = None + if event.app is not None: + app_name = event.app.name + self.changes.append( + dict(name=event_name, + data=dict(app=app_name, unit=unit_name, relation_id=event.relation.id))) + + +class TestTestingModelBackend(unittest.TestCase): + + def test_status_set_get_unit(self): + harness = Harness(CharmBase, meta=''' + name: app + ''') + backend = harness._backend + backend.status_set('blocked', 'message', is_app=False) + self.assertEqual( + backend.status_get(is_app=False), + {'status': 'blocked', 'message': 'message'}) + self.assertEqual( + backend.status_get(is_app=True), + {'status': 'unknown', 'message': ''}) + + def test_status_set_get_app(self): + harness = Harness(CharmBase, meta=''' + name: app + ''') + backend = harness._backend + backend.status_set('blocked', 'message', is_app=True) + self.assertEqual( + backend.status_get(is_app=True), + {'status': 'blocked', 'message': 'message'}) + self.assertEqual( + backend.status_get(is_app=False), + {'status': 'maintenance', 'message': ''}) + + def test_relation_ids_unknown_relation(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + provides: + db: + interface: mydb + ''') + backend = harness._backend + # With no relations added, we just get an empty list for the interface + self.assertEqual(backend.relation_ids('db'), []) + # But an unknown interface raises a ModelError + with self.assertRaises(ModelError): + backend.relation_ids('unknown') + + def test_relation_get_unknown_relation_id(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + backend = harness._backend + with self.assertRaises(RelationNotFoundError): + backend.relation_get(1234, 'unit/0', False) + + def test_relation_list_unknown_relation_id(self): + harness = Harness(CharmBase, meta=''' + name: test-charm + ''') + backend = harness._backend + with self.assertRaises(RelationNotFoundError): + backend.relation_list(1234) + + def test_populate_oci_resources(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: oci-image + description: "Image to deploy." + image2: + type: oci-image + description: "Another image." + ''') + harness.populate_oci_resources() + resource = harness._resource_dir / "image" / "contents.yaml" + with resource.open('r') as resource_file: + contents = yaml.safe_load(resource_file.read()) + self.assertEqual(contents['registrypath'], 'registrypath') + self.assertEqual(contents['username'], 'username') + self.assertEqual(contents['password'], 'password') + self.assertEqual(len(harness._backend._resources_map), 2) + + def test_resource_folder_cleanup(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: oci-image + description: "Image to deploy." + ''') + harness.populate_oci_resources() + resource = harness._resource_dir / "image" / "contents.yaml" + del harness + with self.assertRaises(FileNotFoundError): + with resource.open('r') as resource_file: + print("This shouldn't be here: {}".format(resource_file)) + + def test_add_oci_resource_custom(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: oci-image + description: "Image to deploy." + ''') + custom = { + "registrypath": "custompath", + "username": "custom_username", + "password": "custom_password", + } + harness.add_oci_resource('image', custom) + resource = harness._resource_dir / "image" / "contents.yaml" + with resource.open('r') as resource_file: + contents = yaml.safe_load(resource_file.read()) + self.assertEqual(contents['registrypath'], 'custompath') + self.assertEqual(contents['username'], 'custom_username') + self.assertEqual(contents['password'], 'custom_password') + self.assertEqual(len(harness._backend._resources_map), 1) + + def test_add_oci_resource_no_image(self): + harness = Harness(CharmBase, meta=''' + name: test-app + resources: + image: + type: file + description: "Image to deploy." + ''') + with self.assertRaises(RuntimeError): + harness.add_oci_resource("image") + with self.assertRaises(RuntimeError): + harness.add_oci_resource("missing-resource") + self.assertEqual(len(harness._backend._resources_map), 0) diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/src/charm.py b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..436e5a77627487fcccfa5aeb68e72a7082341a42 --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/charms/simple/src/charm.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import sys + +sys.path.append("lib") + +from charms.osm.sshproxy import SSHProxyCharm +from ops.main import main + +class MySSHProxyCharm(SSHProxyCharm): + + def __init__(self, framework, key): + super().__init__(framework, key) + + # Listen to charm events + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.install, self.on_install) + self.framework.observe(self.on.start, self.on_start) + + # Listen to the touch action event + self.framework.observe(self.on.touch_action, self.on_touch_action) + self.framework.observe(self.on.mkdir_action, self.on_mkdir_action) + + def on_config_changed(self, event): + """Handle changes in configuration""" + super().on_config_changed(event) + + def on_install(self, event): + """Called when the charm is being installed""" + super().on_install(event) + + def on_start(self, event): + """Called when the charm is being started""" + super().on_start(event) + + def on_touch_action(self, event): + """Touch a file.""" + + if self.model.unit.is_leader(): + filename = event.params["filename"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run("mkdir {}".format(filename)) + event.set_results({"output": stdout}) + else: + event.fail("Unit is not leader") + return + + def on_mkdir_action(self, event): + """Create a directory.""" + + if self.model.unit.is_leader(): + foldername = event.params["foldername"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run("mkdir {}".format(foldername)) + event.set_results({"output": stdout}) + else: + event.fail("Unit is not leader") + return + +if __name__ == "__main__": + main(MySSHProxyCharm) + diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/cloud_init/cloud-config.txt b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/cloud_init/cloud-config.txt new file mode 100755 index 0000000000000000000000000000000000000000..36c8d1bf2cdebbc4e50d1e8348003f64f419cd0b --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/cloud_init/cloud-config.txt @@ -0,0 +1,12 @@ +#cloud-config +password: osm4u +chpasswd: { expire: False } +ssh_pwauth: True + +write_files: +- content: | + # My new helloworld file + + owner: root:root + permissions: '0644' + path: /root/helloworld.txt diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/ha_proxy_charm_vnfd.yaml b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/ha_proxy_charm_vnfd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..adcd073202a6df04a70de1d0a304ceda902638ea --- /dev/null +++ b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/ha_proxy_charm_vnfd.yaml @@ -0,0 +1,119 @@ +vnfd: + id: ha_proxy_charm-vnf + mgmt-cp: vnf-mgmt-ext + product-name: ha_proxy_charm-vnf + description: A VNF consisting of 1 VDU connected to two external VL, and one for + data and another one for management + version: 1.0 + df: + - id: default-df + instantiation-level: + - id: default-instantiation-level + vdu-level: + - number-of-instances: 1 + vdu-id: mgmtVM + vdu-profile: + - id: mgmtVM + min-number-of-instances: 1 + lcm-operations-configuration: + operate-vnf-op-config: + day1-2: + - id: ha_proxy_charm-vnf + execution-environment-list: + - id: simple-ee + juju: + charm: simple + config-access: + ssh-access: + default-user: ubuntu + required: true + config-primitive: + - name: touch + execution-environment-ref: simple-ee + parameter: + - data-type: STRING + default-value: /home/ubuntu/touched + name: filename + - name: mkdir + execution-environment-ref: simple-ee + parameter: + - data-type: STRING + default-value: /home/ubuntu/newfolder + name: foldername + initial-config-primitive: + - name: config + execution-environment-ref: simple-ee + parameter: + - name: ssh-hostname + value: + - name: ssh-username + value: ubuntu + - name: ssh-password + value: osm4u + seq: 1 + - name: touch + execution-environment-ref: simple-ee + parameter: + - data-type: STRING + name: filename + value: /home/ubuntu/first-touch + seq: 2 + ext-cpd: + - id: vnf-mgmt-ext + int-cpd: + cpd: mgmtVM-eth0-int + vdu-id: mgmtVM + - id: vnf-data-ext + int-cpd: + cpd: dataVM-xe0-int + vdu-id: mgmtVM + sw-image-desc: + - id: ubuntu18.04 + image: ubuntu18.04 + name: ubuntu18.04 + - id: ubuntu18.04-aws + name: ubuntu18.04-aws + image: ubuntu/images/hvm-ssd/ubuntu-artful-17.10-amd64-server-20180509 + vim-type: aws + - id: ubuntu18.04-azure + name: ubuntu18.04-azure + image: Canonical:UbuntuServer:18.04-LTS:latest + vim-type: azure + - id: ubuntu18.04-gcp + name: ubuntu18.04-gcp + image: ubuntu-os-cloud:image-family:ubuntu-1804-lts + vim-type: gcp + vdu: + - cloud-init-file: cloud-config.txt + id: mgmtVM + int-cpd: + - id: mgmtVM-eth0-int + virtual-network-interface-requirement: + - name: mgmtVM-eth0 + position: 1 + virtual-interface: + type: PARAVIRT + - id: dataVM-xe0-int + virtual-network-interface-requirement: + - name: dataVM-xe0 + position: 2 + virtual-interface: + type: PARAVIRT + name: mgmtVM + sw-image-desc: ubuntu18.04 + alternative-sw-image-desc: + - ubuntu18.04-aws + - ubuntu18.04-azure + - ubuntu18.04-gcp + virtual-compute-desc: mgmtVM-compute + virtual-storage-desc: + - mgmtVM-storage + virtual-compute-desc: + - id: mgmtVM-compute + virtual-cpu: + num-virtual-cpu: 1 + virtual-memory: + size: 1.0 + virtual-storage-desc: + - id: mgmtVM-storage + size-of-storage: 10 diff --git a/charm-packages/updated_vnfds/ha_proxy_charm_vnf/icons/osm.png b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/icons/osm.png new file mode 100644 index 0000000000000000000000000000000000000000..62012d2a2b491bdcd536d62c3c3c863c0d8c1b33 Binary files /dev/null and b/charm-packages/updated_vnfds/ha_proxy_charm_vnf/icons/osm.png differ