From: Adam Israel Date: Thu, 11 Oct 2018 16:25:22 +0000 (+0200) Subject: Merge changes Iacd2f028,I43a6d573,Ibb6c93bb X-Git-Tag: v5.0.0~8 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FN2VC.git;a=commitdiff_plain;h=ea14f890f054d5eb85416a5b6c8c7713e2506ad8;hp=f32b6f276457966347ed0d80ec7673ebc1357dc0 Merge changes Iacd2f028,I43a6d573,Ibb6c93bb * changes: Secure Key Management Improved integration tests [WIP] Multi-vdu, multi-charm support --- diff --git a/.gitignore b/.gitignore index 8d35cb3..543898d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,9 @@ __pycache__ *.pyc +.tox/ +tests/charms/builds +tests/charms/deps +dist/ +.cache/ +.local/ +N2VC.egg-info/ diff --git a/Makefile b/Makefile index c63a6ba..abc2c39 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ clean: find . -name __pycache__ -type d -exec rm -r {} + find . -name *.pyc -delete rm -rf .tox - rm -rf tests/charms/tmp* + rm -rf tests/charms/builds/* .tox: tox -r --notest test: lint diff --git a/n2vc/vnf.py b/n2vc/vnf.py index df3ec00..a1fcfe3 100644 --- a/n2vc/vnf.py +++ b/n2vc/vnf.py @@ -3,7 +3,9 @@ import logging import os import os.path import re +import shlex import ssl +import subprocess import sys # import time @@ -87,20 +89,14 @@ class VCAMonitor(ModelObserver): self.applications[application_name]['callback_args'] if old and new: - old_status = old.workload_status - new_status = new.workload_status - - if old_status == new_status: - """The workload status may fluctuate around certain - events, so wait until the status has stabilized before - triggering the callback.""" - if callback: - callback( - self.ns_name, - delta.data['application'], - new_status, - new.workload_status_message, - *callback_args) + # Fire off a callback with the application state + if callback: + callback( + self.ns_name, + delta.data['application'], + new.workload_status, + new.workload_status_message, + *callback_args) if old and not new: # This is a charm being removed @@ -173,6 +169,12 @@ class N2VC: self.connecting = False self.authenticated = False + # For debugging + self.refcount = { + 'controller': 0, + 'model': 0, + } + self.models = {} self.default_model = None @@ -333,15 +335,14 @@ class N2VC: ######################################################## to = "" if machine_spec.keys(): - # TODO: This needs to be tested. - # if all(k in machine_spec for k in ['hostname', 'username']): - # # Enlist the existing machine in Juju - # machine = await self.model.add_machine(spec='ssh:%@%'.format( - # specs['host'], - # specs['user'], - # )) - # to = machine.id - pass + if all(k in machine_spec for k in ['host', 'user']): + # Enlist an existing machine as a Juju unit + machine = await model.add_machine(spec='ssh:{}@{}:{}'.format( + machine_spec['user'], + machine_spec['host'], + self.GetPrivateKeyPath(), + )) + to = machine.id ####################################### # Get the initial charm configuration # @@ -351,9 +352,6 @@ class N2VC: if 'rw_mgmt_ip' in params: rw_mgmt_ip = params['rw_mgmt_ip'] - # initial_config = {} - # self.log.debug(type(params)) - # self.log.debug("Params: {}".format(params)) if 'initial-config-primitive' not in params: params['initial-config-primitive'] = {} @@ -382,8 +380,8 @@ class N2VC: series='xenial', # Apply the initial 'config' primitive during deployment config=initial_config, - # TBD: Where to deploy the charm to. - to=None, + # Where to deploy the charm to. + to=to, ) # ####################################### @@ -487,6 +485,77 @@ class N2VC: return results + # async def ProvisionMachine(self, model_name, hostname, username): + # """Provision machine for usage with Juju. + # + # Provisions a previously instantiated machine for use with Juju. + # """ + # try: + # if not self.authenticated: + # await self.login() + # + # # FIXME: This is hard-coded until model-per-ns is added + # model_name = 'default' + # + # model = await self.get_model(model_name) + # model.add_machine(spec={}) + # + # machine = await model.add_machine(spec='ssh:{}@{}:{}'.format( + # "ubuntu", + # host['address'], + # private_key_path, + # )) + # return machine.id + # + # except Exception as e: + # self.log.debug( + # "Caught exception while getting primitive status: {}".format(e) + # ) + # raise N2VCPrimitiveExecutionFailed(e) + + def GetPrivateKeyPath(self): + homedir = os.environ['HOME'] + sshdir = "{}/.ssh".format(homedir) + private_key_path = "{}/id_n2vc_rsa".format(sshdir) + return private_key_path + + async def GetPublicKey(self): + """Get the N2VC SSH public key.abs + + Returns the SSH public key, to be injected into virtual machines to + be managed by the VCA. + + The first time this is run, a ssh keypair will be created. The public + key is injected into a VM so that we can provision the machine with + Juju, after which Juju will communicate with the VM directly via the + juju agent. + """ + public_key = "" + + # Find the path to where we expect our key to live. + homedir = os.environ['HOME'] + sshdir = "{}/.ssh".format(homedir) + if not os.path.exists(sshdir): + os.mkdir(sshdir) + + private_key_path = "{}/id_n2vc_rsa".format(sshdir) + public_key_path = "{}.pub".format(private_key_path) + + # If we don't have a key generated, generate it. + if not os.path.exists(private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + "rsa", + "4096", + private_key_path + ) + subprocess.check_output(shlex.split(cmd)) + + # Read the public key + with open(public_key_path, "r") as f: + public_key = f.readline() + + return public_key + async def ExecuteInitialPrimitives(self, model_name, application_name, params, callback=None, *callback_args): """Execute multiple primitives. @@ -650,6 +719,13 @@ class N2VC: return metrics + async def HasApplication(self, model_name, application_name): + model = await self.get_model(model_name) + app = await self.get_application(model, application_name) + if app: + return True + return False + # Non-public methods async def add_relation(self, a, b, via=None): """ @@ -811,6 +887,7 @@ class N2VC: self.models[model_name] = await self.controller.get_model( model_name, ) + self.refcount['model'] += 1 # Create an observer for this model self.monitors[model_name] = VCAMonitor(model_name) @@ -846,6 +923,7 @@ class N2VC: password=self.secret, cacert=cacert, ) + self.refcount['controller'] += 1 else: # current_controller no longer exists # self.log.debug("Connecting to current controller...") @@ -860,8 +938,6 @@ class N2VC: self.authenticated = True self.log.debug("JujuApi: Logged into controller") - # self.default_model = await self.controller.get_model("default") - async def logout(self): """Logout of the Juju controller.""" if not self.authenticated: @@ -873,20 +949,26 @@ class N2VC: self.default_model )) await self.default_model.disconnect() + self.refcount['model'] -= 1 self.default_model = None for model in self.models: await self.models[model].disconnect() - model = None + self.refcount['model'] -= 1 + self.models[model] = None if self.controller: self.log.debug("Disconnecting controller {}".format( self.controller )) await self.controller.disconnect() + self.refcount['controller'] -= 1 self.controller = None self.authenticated = False + + self.log.debug(self.refcount) + except Exception as e: self.log.fatal( "Fatal error logging out of Juju Controller: {}".format(e) diff --git a/tests/base.py b/tests/base.py index a02ab7e..0959059 100644 --- a/tests/base.py +++ b/tests/base.py @@ -1,16 +1,13 @@ #!/usr/bin/env python3 import asyncio -import functools - +import datetime import logging import n2vc.vnf import pylxd import pytest import os import shlex -import shutil import subprocess -import tempfile import time import uuid import yaml @@ -57,6 +54,22 @@ class CleanController(): await self._controller.disconnect() +def debug(msg): + """Format debug messages in a consistent way.""" + now = datetime.datetime.now() + + # TODO: Decide on the best way to log. Output from `logging.debug` shows up + # when a test fails, but print() will always show up when running tox with + # `-s`, which is really useful for debugging single tests without having to + # insert a False assert to see the log. + logging.debug( + "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg) + ) + # print( + # "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg) + # ) + + def get_charm_path(): return "{}/charms".format(here) @@ -65,20 +78,6 @@ def get_layer_path(): return "{}/charms/layers".format(here) -def parse_metrics(application, results): - """Parse the returned metrics into a dict.""" - - # We'll receive the results for all units, to look for the one we want - # Caveat: we're grabbing results from the first unit of the application, - # which is enough for testing, since we're only deploying a single unit. - retval = {} - for unit in results: - if unit.startswith(application): - for result in results[unit]: - retval[result['key']] = result['value'] - return retval - - def collect_metrics(application): """Invoke Juju's metrics collector. @@ -125,8 +124,6 @@ def get_n2vc(loop=None): log = logging.getLogger() log.level = logging.DEBUG - # Running under tox/pytest makes getting env variables harder. - # Extract parameters from the environment in order to run our test vca_host = os.getenv('VCA_HOST', '127.0.0.1') vca_port = os.getenv('VCA_PORT', 17070) @@ -166,42 +163,51 @@ def create_lxd_container(public_key=None, name="test_name"): name, ) - private_key_path, public_key_path = find_juju_ssh_keys() - - # create profile w/cloud-init and juju ssh key - if not public_key: - public_key = "" - with open(public_key_path, "r") as f: - public_key = f.readline() - - client.profiles.create( - test_machine, - config={ - 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)}, - devices={ - 'root': {'path': '/', 'pool': 'default', 'type': 'disk'}, - 'eth0': { - 'nictype': 'bridged', - 'parent': 'lxdbr0', - 'type': 'nic' + private_key_path, public_key_path = find_n2vc_ssh_keys() + + try: + # create profile w/cloud-init and juju ssh key + if not public_key: + public_key = "" + with open(public_key_path, "r") as f: + public_key = f.readline() + + client.profiles.create( + test_machine, + config={ + 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)}, + devices={ + 'root': {'path': '/', 'pool': 'default', 'type': 'disk'}, + 'eth0': { + 'nictype': 'bridged', + 'parent': 'lxdbr0', + 'type': 'nic' + } } - } - ) + ) + except Exception as ex: + debug("Error creating lxd profile {}: {}".format(test_machine, ex)) + raise ex - # create lxc machine - config = { - 'name': test_machine, - 'source': { - 'type': 'image', - 'alias': 'xenial', - 'mode': 'pull', - 'protocol': 'simplestreams', - 'server': 'https://cloud-images.ubuntu.com/releases', - }, - 'profiles': [test_machine], - } - container = client.containers.create(config, wait=True) - container.start(wait=True) + try: + # create lxc machine + config = { + 'name': test_machine, + 'source': { + 'type': 'image', + 'alias': 'xenial', + 'mode': 'pull', + 'protocol': 'simplestreams', + 'server': 'https://cloud-images.ubuntu.com/releases', + }, + 'profiles': [test_machine], + } + container = client.containers.create(config, wait=True) + container.start(wait=True) + except Exception as ex: + debug("Error creating lxd container {}: {}".format(test_machine, ex)) + # This is a test-ending failure. + raise ex def wait_for_network(container, timeout=30): """Wait for eth0 to have an ipv4 address.""" @@ -215,20 +221,47 @@ def create_lxd_container(public_key=None, name="test_name"): return addresses[0] return None - wait_for_network(container) + try: + wait_for_network(container) + except Exception as ex: + debug( + "Error waiting for container {} network: {}".format( + test_machine, + ex, + ) + ) # HACK: We need to give sshd a chance to bind to the interface, # and pylxd's container.execute seems to be broken and fails and/or # hangs trying to properly check if the service is up. - time.sleep(5) - client = None + (exit_code, stdout, stderr) = container.execute([ + 'ping', + '-c', '5', # Wait for 5 ECHO_REPLY + '8.8.8.8', # Ping Google's public DNS + '-W', '15', # Set a 15 second deadline + ]) + if exit_code > 0: + # The network failed + raise Exception("Unable to verify container network") return container def destroy_lxd_container(container): - """Stop and delete a LXD container.""" + """Stop and delete a LXD container. + + Sometimes we see errors talking to LXD -- ephemerial issues like + load or a bug that's killed the API. We'll do our best to clean + up here, and we should run a cleanup after all tests are finished + to remove any extra containers and profiles belonging to us. + """ + + if type(container) is bool: + return + name = container.name + debug("Destroying container {}".format(name)) + client = get_lxd_client() def wait_for_stop(timeout=30): @@ -246,16 +279,40 @@ def destroy_lxd_container(container): if client.containers.exists(name) is False: return - container.stop(wait=False) - wait_for_stop() + try: + container.stop(wait=False) + wait_for_stop() + except Exception as ex: + debug( + "Error stopping container {}: {}".format( + name, + ex, + ) + ) - container.delete(wait=False) - wait_for_delete() + try: + container.delete(wait=False) + wait_for_delete() + except Exception as ex: + debug( + "Error deleting container {}: {}".format( + name, + ex, + ) + ) - # Delete the profile created for this container - profile = client.profiles.get(name) - if profile: - profile.delete() + try: + # Delete the profile created for this container + profile = client.profiles.get(name) + if profile: + profile.delete() + except Exception as ex: + debug( + "Error deleting profile {}: {}".format( + name, + ex, + ) + ) def find_lxd_config(): @@ -273,6 +330,21 @@ def find_lxd_config(): return (None, None) +def find_n2vc_ssh_keys(): + """Find the N2VC ssh keys.""" + + paths = [] + paths.append(os.path.expanduser("~/.ssh/")) + + for path in paths: + if os.path.exists(path): + private = os.path.expanduser("{}/id_n2vc_rsa".format(path)) + public = os.path.expanduser("{}/id_n2vc_rsa.pub".format(path)) + if os.path.exists(private) and os.path.exists(public): + return (private, public) + return (None, None) + + def find_juju_ssh_keys(): """Find the Juju ssh keys.""" @@ -307,6 +379,7 @@ def get_lxd_client(host="127.0.0.1", port="8443", verify=False): return client + # TODO: This is marked serial but can be run in parallel with work, including: # - Fixing an event loop issue; seems that all tests stop when one test stops? @@ -321,13 +394,36 @@ class TestN2VC(object): We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model. """ + """ + The six phases of integration testing, for the test itself and each charm?: + + setup/teardown_class: + 1. Prepare - Verify the environment and create a new model + 2. Deploy - Mark the test as ready to execute + 3. Configure - Configuration to reach Active state + 4. Test - Execute primitive(s) to verify success + 5. Collect - Collect any useful artifacts for debugging (charm, logs) + 6. Destroy - Destroy the model + + + 1. Prepare - Building of charm + 2. Deploy - Deploying charm + 3. Configure - Configuration to reach Active state + 4. Test - Execute primitive(s) to verify success + 5. Collect - Collect any useful artifacts for debugging (charm, logs) + 6. Destroy - Destroy the charm + + """ @classmethod def setup_class(self): """ setup any state specific to the execution of the given class (which usually contains tests). """ # Initialize instance variable(s) - self.container = None + self.n2vc = None + + # Track internal state for each test run + self.state = {} # Parse the test's descriptors self.nsd = get_descriptor(self.NSD_YAML) @@ -336,6 +432,9 @@ class TestN2VC(object): self.ns_name = self.nsd['name'] self.vnf_name = self.vnfd['name'] + # Hard-coded to default for now, but this may change in the future. + self.model = "default" + self.charms = {} self.parse_vnf_descriptor() assert self.charms is not {} @@ -351,35 +450,93 @@ class TestN2VC(object): # to pytest. Test(s) should wait for this flag to change to False # before returning. self._running = True + self._stopping = False @classmethod def teardown_class(self): """ teardown any state that was previously setup with a call to setup_class. """ - if self.container: - destroy_lxd_container(self.container) + debug("Running teardown_class...") + try: + + debug("Destroying LXD containers...") + for application in self.state: + if self.state[application]['container']: + destroy_lxd_container(self.state[application]['container']) + debug("Destroying LXD containers...done.") + + # Logout of N2VC + if self.n2vc: + debug("teardown_class(): Logging out of N2VC...") + yield from self.n2vc.logout() + debug("teardown_class(): Logging out of N2VC...done.") + + debug("Running teardown_class...done.") + except Exception as ex: + debug("Exception in teardown_class: {}".format(ex)) + + @classmethod + def all_charms_active(self): + """Determine if the all deployed charms are active.""" + active = 0 + + for application in self.state: + if 'status' in self.state[application]: + debug("status of {} is '{}'".format( + application, + self.state[application]['status'], + )) + if self.state[application]['status'] == 'active': + active += 1 + + debug("Active charms: {}/{}".format( + active, + len(self.charms), + )) + + if active == len(self.charms): + return True + + return False + + @classmethod + def are_tests_finished(self): + appcount = len(self.state) + + # If we don't have state yet, keep running. + if appcount == 0: + debug("No applications") + return False - # Clean up any artifacts created during the test - logging.debug("Artifacts: {}".format(self.artifacts)) - for charm in self.artifacts: - artifact = self.artifacts[charm] - if os.path.exists(artifact['tmpdir']): - logging.debug("Removing directory '{}'".format(artifact)) - shutil.rmtree(artifact['tmpdir']) + if self._stopping: + debug("_stopping is True") + return True - # Logout of N2VC - asyncio.ensure_future(self.n2vc.logout()) + appdone = 0 + for application in self.state: + if self.state[application]['done']: + appdone += 1 + + debug("{}/{} charms tested".format(appdone, appcount)) + + if appcount == appdone: + return True + + return False @classmethod - def running(self, timeout=600): + async def running(self, timeout=600): """Returns if the test is still running. @param timeout The time, in seconds, to wait for the test to complete. """ + if self.are_tests_finished(): + await self.stop() + return False + + await asyncio.sleep(30) - # if start + now > start > timeout: - # self.stop_test() return self._running @classmethod @@ -401,19 +558,19 @@ class TestN2VC(object): if charm not in self.artifacts: try: - # Note: This builds the charm under N2VC/tests/charms/ - # The snap-installed command only has write access to the users $HOME - # so writing to /tmp isn't possible at the moment. - builds = tempfile.mkdtemp(dir=get_charm_path()) - - cmd = "charm build {}/{} -o {}/".format( - get_layer_path(), - charm, - builds, - ) - logging.debug(cmd) - - subprocess.check_call(shlex.split(cmd)) + # Note: This builds the charm under N2VC/tests/charms/builds/ + # Currently, the snap-installed command only has write access + # to the $HOME (changing in an upcoming release) so writing to + # /tmp isn't possible at the moment. + builds = get_charm_path() + + if not os.path.exists("{}/builds/{}".format(builds, charm)): + cmd = "charm build {}/{} -o {}/".format( + get_layer_path(), + charm, + builds, + ) + subprocess.check_call(shlex.split(cmd)) self.artifacts[charm] = { 'tmpdir': builds, @@ -430,23 +587,54 @@ class TestN2VC(object): either a vdu or vnf. """ - self.n2vc = get_n2vc(loop=loop) + if not self.n2vc: + self.n2vc = get_n2vc(loop=loop) - vnf_name = self.n2vc.FormatApplicationName( + application = self.n2vc.FormatApplicationName( self.ns_name, self.vnf_name, str(vnf_index), ) - logging.debug("Deploying charm at {}".format(self.artifacts[charm])) + + # Initialize the state of the application + self.state[application] = { + 'status': None, # Juju status + 'container': None, # lxd container, for proxy charms + 'actions': {}, # Actions we've executed + 'done': False, # Are we done testing this charm? + 'phase': "deploy", # What phase is this application in? + } + + debug("Deploying charm at {}".format(self.artifacts[charm])) + + # If this is a native charm, we need to provision the underlying + # machine ala an LXC container. + machine_spec = {} + + if not self.isproxy(application): + debug("Creating container for native charm") + # args = ("default", application, None, None) + self.state[application]['container'] = create_lxd_container( + name=os.path.basename(__file__) + ) + + hostname = self.get_container_ip( + self.state[application]['container'], + ) + + machine_spec = { + 'host': hostname, + 'user': 'ubuntu', + } await self.n2vc.DeployCharms( self.ns_name, - vnf_name, + application, self.vnfd, self.get_charm(charm), params, - {}, - self.n2vc_callback + machine_spec, + self.n2vc_callback, ) @classmethod @@ -462,7 +650,6 @@ class TestN2VC(object): 'vnf-member-index': 1, 'vnf-name': '', 'charm-name': '', - 'initial-config-primitive': {}, 'config-primitive': {} ] @@ -527,7 +714,7 @@ class TestN2VC(object): assert 'proxy' in self.charms[application_name] assert type(self.charms[application_name]['proxy']) is bool - # logging.debug(self.charms[application_name]) + # debug(self.charms[application_name]) return self.charms[application_name]['proxy'] @classmethod @@ -572,48 +759,183 @@ class TestN2VC(object): return charms.keys() + @classmethod + def get_phase(self, application): + return self.state[application]['phase'] + + @classmethod + def set_phase(self, application, phase): + self.state[application]['phase'] = phase + + @classmethod + async def configure_proxy_charm(self, *args): + """Configure a container for use via ssh.""" + (model, application, _, _) = args + + try: + if self.get_phase(application) == "deploy": + self.set_phase(application, "configure") + + debug("Start CreateContainer for {}".format(application)) + self.state[application]['container'] = \ + await self.CreateContainer(*args) + debug("Done CreateContainer for {}".format(application)) + + if self.state[application]['container']: + debug("Configure {} for container".format(application)) + if await self.configure_ssh_proxy(application): + await asyncio.sleep(0.1) + return True + else: + debug("Failed to configure container for {}".format(application)) + else: + debug("skipping CreateContainer for {}: {}".format( + application, + self.get_phase(application), + )) + + except Exception as ex: + debug("configure_proxy_charm exception: {}".format(ex)) + finally: + await asyncio.sleep(0.1) + + return False + + @classmethod + async def execute_charm_tests(self, *args): + (model, application, _, _) = args + + debug("Executing charm test(s) for {}".format(application)) + + if self.state[application]['done']: + debug("Trying to execute tests against finished charm...aborting") + return False + + try: + phase = self.get_phase(application) + # We enter the test phase when after deploy (for native charms) or + # configure, for proxy charms. + if phase in ["deploy", "configure"]: + self.set_phase(application, "test") + if self.are_tests_finished(): + raise Exception("Trying to execute init-config on finished test") + + if await self.execute_initial_config_primitives(application): + # check for metrics + await self.check_metrics(application) + + debug("Done testing {}".format(application)) + self.state[application]['done'] = True + + except Exception as ex: + debug("Exception in execute_charm_tests: {}".format(ex)) + finally: + await asyncio.sleep(0.1) + + return True + @classmethod async def CreateContainer(self, *args): """Create a LXD container for use with a proxy charm.abs 1. Get the public key from the charm via `get-ssh-public-key` action 2. Create container with said key injected for the ubuntu user - """ - if self.container is None: - # logging.debug("CreateContainer called.") - # HACK: Set this so the n2vc_callback knows - # there's a container being created - self.container = True + Returns a Container object + """ + # Create and configure a LXD container for use with a proxy charm. + (model, application, _, _) = args - # Create and configure a LXD container for use with a proxy charm. - (model_name, application_name, _, _) = args + debug("[CreateContainer] {}".format(args)) + container = None + try: # Execute 'get-ssh-public-key' primitive and get returned value uuid = await self.n2vc.ExecutePrimitive( - model_name, - application_name, + model, + application, "get-ssh-public-key", None, ) - # logging.debug("Action UUID: {}".format(uuid)) - result = await self.n2vc.GetPrimitiveOutput(model_name, uuid) - # logging.debug("Action result: {}".format(result)) + + result = await self.n2vc.GetPrimitiveOutput(model, uuid) pubkey = result['pubkey'] - self.container = create_lxd_container( + container = create_lxd_container( public_key=pubkey, name=os.path.basename(__file__) ) - return self.container + return container + except Exception as ex: + debug("Error creating container: {}".format(ex)) + pass + + return None + + @classmethod + async def stop(self): + """Stop the test. + + - Remove charms + - Stop and delete containers + - Logout of N2VC + + TODO: Clean up duplicate code between teardown_class() and stop() + """ + debug("stop() called") + + if self.n2vc and self._running and not self._stopping: + self._running = False + self._stopping = True + + for application in self.charms: + try: + await self.n2vc.RemoveCharms(self.model, application) + + while True: + # Wait for the application to be removed + await asyncio.sleep(10) + if not await self.n2vc.HasApplication( + self.model, + application, + ): + break + + # Need to wait for the charm to finish, because native charms + if self.state[application]['container']: + debug("Deleting LXD container...") + destroy_lxd_container( + self.state[application]['container'] + ) + self.state[application]['container'] = None + debug("Deleting LXD container...done.") + else: + debug("No container found for {}".format(application)) + except Exception as e: + debug("Error while deleting container: {}".format(e)) + + # Logout of N2VC + try: + debug("stop(): Logging out of N2VC...") + await self.n2vc.logout() + self.n2vc = None + debug("stop(): Logging out of N2VC...Done.") + except Exception as ex: + debug(ex) + + # Let the test know we're finished. + debug("Marking test as finished.") + # self._running = False + else: + debug("Skipping stop()") @classmethod - def get_container_ip(self): + def get_container_ip(self, container): """Return the IPv4 address of container's eth0 interface.""" ipaddr = None - if self.container: - addresses = self.container.state().network['eth0']['addresses'] + if container: + addresses = container.state().network['eth0']['addresses'] # The interface may have more than one address, but we only need # the first one for testing purposes. ipaddr = addresses[0]['address'] @@ -621,104 +943,53 @@ class TestN2VC(object): return ipaddr @classmethod - def n2vc_callback(self, *args, **kwargs): - """Monitor and react to changes in the charm state. - - This is where we will monitor the state of the charm: - - is it active? - - is it in error? - - is it waiting on input to continue? - - When the state changes, we respond appropriately: - - configuring ssh credentials for a proxy charm - - running a service primitive - - Lastly, when the test has finished we begin the teardown, removing the - charm and associated LXD container, and notify pytest that this test - is over. - - Args are expected to contain four values, received from N2VC: - - str, the name of the model - - str, the name of the application - - str, the workload status as reported by Juju - - str, the workload message as reported by Juju - """ - (model, application, status, message) = args - # logging.debug("Callback for {}/{} - {} ({})".format( - # model, - # application, - # status, - # message - # )) - - # Make sure we're only receiving valid status. This will catch charms - # that aren't setting their workload state and appear as "unknown" - # assert status not in ["active", "blocked", "waiting", "maintenance"] - - task = None - if kwargs and 'task' in kwargs: - task = kwargs['task'] - # logging.debug("Got task: {}".format(task)) - - # Closures and inner functions, oh my. - def is_active(): - """Is the charm in an active state?""" - if status in ["active"]: - return True - return False - - def is_blocked(): - """Is the charm waiting for us?""" - if status in ["blocked"]: - return True - return False - - def configure_ssh_proxy(task): - """Configure the proxy charm to use the lxd container.""" - logging.debug("configure_ssh_proxy({})".format(task)) + async def configure_ssh_proxy(self, application, task=None): + """Configure the proxy charm to use the lxd container. - mgmtaddr = self.get_container_ip() + Configure the charm to use a LXD container as it's VNF. + """ + debug("Configuring ssh proxy for {}".format(application)) - logging.debug( - "Setting config ssh-hostname={}".format(mgmtaddr) - ) + mgmtaddr = self.get_container_ip( + self.state[application]['container'], + ) - task = asyncio.ensure_future( - self.n2vc.ExecutePrimitive( - model, - application, - "config", - None, - params={ - 'ssh-hostname': mgmtaddr, - 'ssh-username': 'ubuntu', - } - ) + debug( + "Setting ssh-hostname for {} to {}".format( + application, + mgmtaddr, ) + ) - # Execute the VNFD's 'initial-config-primitive' - task.add_done_callback(functools.partial( - execute_initial_config_primitives, - )) + await self.n2vc.ExecutePrimitive( + self.model, + application, + "config", + None, + params={ + 'ssh-hostname': mgmtaddr, + 'ssh-username': 'ubuntu', + } + ) - def execute_initial_config_primitives(task=None): - logging.debug("execute_initial_config_primitives({})".format(task)) + return True + @classmethod + async def execute_initial_config_primitives(self, application, task=None): + debug("Executing initial_config_primitives for {}".format(application)) + try: init_config = self.charms[application] """ The initial-config-primitive is run during deploy but may fail on some steps because proxy charm access isn't configured. - At this stage, we'll re-run those actions. + Re-run those actions so we can inspect the status. """ - - task = asyncio.ensure_future( - self.n2vc.ExecuteInitialPrimitives( - model, - application, - init_config, - ) + uuids = await self.n2vc.ExecuteInitialPrimitives( + self.model, + application, + init_config, ) """ @@ -726,174 +997,130 @@ class TestN2VC(object): status of each. The test continues if all Actions succeed, and fails if any of them fail. """ - task.add_done_callback(functools.partial(wait_for_uuids)) + await self.wait_for_uuids(application, uuids) + debug("Primitives for {} finished.".format(application)) - def check_metrics(): - task = asyncio.ensure_future( - self.n2vc.GetMetrics( - model, - application, - ) - ) + return True + except Exception as ex: + debug("execute_initial_config_primitives exception: {}".format(ex)) - task.add_done_callback( - functools.partial( - verify_metrics, - ) + return False + + @classmethod + async def check_metrics(self, application, task=None): + """Check and run metrics, if present. + + Checks to see if metrics are specified by the charm. If so, collects + the metrics. + + If no metrics, then mark the test as finished. + """ + if has_metrics(self.charms[application]['name']): + debug("Collecting metrics for {}".format(application)) + + metrics = await self.n2vc.GetMetrics( + self.model, + application, ) - def verify_metrics(task): - logging.debug("Verifying metrics!") - # Check if task returned metrics - results = task.result() + return await self.verify_metrics(application, metrics) - metrics = parse_metrics(application, results) - logging.debug(metrics) + @classmethod + async def verify_metrics(self, application, metrics): + """Verify the charm's metrics. - if len(metrics): - task = asyncio.ensure_future( - self.n2vc.RemoveCharms(model, application) - ) + Verify that the charm has sent metrics successfully. - task.add_done_callback(functools.partial(stop_test)) + Stops the test when finished. + """ + debug("Verifying metrics for {}: {}".format(application, metrics)) - else: - # TODO: Ran into a case where it took 9 attempts before metrics - # were available; the controller is slow sometimes. - time.sleep(60) - check_metrics() - - def wait_for_uuids(task): - logging.debug("wait_for_uuids({})".format(task)) - uuids = task.result() - - waitfor = len(uuids) - finished = 0 - - def get_primitive_result(uuid, task): - logging.debug("Got result from action") - # completed, failed, or running - result = task.result() - - if status in result and result['status'] \ - in ["completed", "failed"]: - - # It's over - logging.debug("Action {} is {}".format( - uuid, - task.result['status']) - ) - pass - else: - logging.debug("action is still running") + if len(metrics): + return True - def get_primitive_status(uuid, task): - result = task.result() + else: + # TODO: Ran into a case where it took 9 attempts before metrics + # were available; the controller is slow sometimes. + await asyncio.sleep(30) + return await self.check_metrics(application) - if result == "completed": - # Make sure all primitives are finished - global finished - finished += 1 + @classmethod + async def wait_for_uuids(self, application, uuids): + """Wait for primitives to execute. - if waitfor == finished: - # logging.debug("Action complete; removing charm") - task = asyncio.ensure_future( - self.n2vc.RemoveCharms(model, application) - ) + The task will provide a list of uuids representing primitives that are + queued to run. + """ + debug("Waiting for uuids for {}: {}".format(application, uuids)) + waitfor = len(uuids) + finished = 0 - task.add_done_callback(functools.partial(stop_test)) - elif result == "failed": - # logging.debug("Action failed; removing charm") - assert False - self._running = False - return - else: - # logging.debug("action is still running: {}".format(result)) - # logging.debug(result) - # pass - # The primitive is running; try again. - task = asyncio.ensure_future( - self.n2vc.GetPrimitiveStatus(model, uuid) - ) - task.add_done_callback(functools.partial( - get_primitive_result, - uuid, - )) - - for actionid in uuids: - task = asyncio.ensure_future( - self.n2vc.GetPrimitiveStatus(model, actionid) - ) - task.add_done_callback(functools.partial( - get_primitive_result, - actionid, - )) + while waitfor > finished: + for uid in uuids: + await asyncio.sleep(10) - def stop_test(task): - """Stop the test. + if uuid not in self.state[application]['actions']: + self.state[application]['actions'][uid] = "pending" - When the test has either succeeded or reached a failing state, - begin the process of removing the test fixtures. - """ - asyncio.ensure_future( - self.n2vc.RemoveCharms(model, application) - ) + status = self.state[application]['actions'][uid] - self._running = False + # Have we already marked this as done? + if status in ["pending", "running"]: - if is_blocked(): - # logging.debug("Charm is in a blocked state!") + debug("Getting status of {} ({})...".format(uid, status)) + status = await self.n2vc.GetPrimitiveStatus( + self.model, + uid, + ) + debug("...state of {} is {}".format(uid, status)) + self.state[application]['actions'][uid] = status - # Container only applies to proxy charms. - if self.isproxy(application): + if status in ['completed', 'failed']: + finished += 1 - if self.container is None: - # logging.debug( - # "Ensuring CreateContainer: status is {}".format(status) - # ) + debug("{}/{} actions complete".format(finished, waitfor)) - # Create the new LXD container - task = asyncio.ensure_future(self.CreateContainer(*args)) + # Wait for the primitive to finish and try again + if waitfor > finished: + debug("Waiting 10s for action to finish...") + await asyncio.sleep(10) - # Configure the proxy charm to use the container when ready - task.add_done_callback(functools.partial( - configure_ssh_proxy, - )) + @classmethod + def n2vc_callback(self, *args, **kwargs): + (model, application, status, message) = args + # debug("callback: {}".format(args)) + + if application not in self.state: + # Initialize the state of the application + self.state[application] = { + 'status': None, # Juju status + 'container': None, # lxd container, for proxy charms + 'actions': {}, # Actions we've executed + 'done': False, # Are we done testing this charm? + 'phase': "deploy", # What phase is this application in? + } - # task.add_done_callback(functools.partial( - # stop_test, - # )) - # create_lxd_container() - # self.container = True - else: - # A charm may validly be in a blocked state if it's waiting for - # relations or some other kind of manual intervention - # logging.debug("This is not a proxy charm.") - # TODO: needs testing - task = asyncio.ensure_future( - execute_initial_config_primitives() - ) + self.state[application]['status'] = status - task.add_done_callback(functools.partial(stop_test)) + if status in ['waiting', 'maintenance', 'unknown']: + # Nothing to do for these + return - elif is_active(): - # Does the charm have metrics defined? - if has_metrics(self.charms[application]['name']): - # logging.debug("metrics.yaml defined in the layer!") + debug("callback: {}".format(args)) - # Force a run of the metric collector, so we don't have - # to wait for it's normal 5 minute interval run. - # NOTE: this shouldn't be done outside of CI - collect_metrics(application) + if self.state[application]['done']: + debug("{} is done".format(application)) + return - # get the current metrics - check_metrics() - else: - # When the charm reaches an active state and hasn't been - # handled (metrics collection, etc)., the test has succeded. - # logging.debug("Charm is active! Removing charm...") - task = asyncio.ensure_future( - self.n2vc.RemoveCharms(model, application) - ) + if status in ["blocked"] and self.isproxy(application): + if self.state[application]['phase'] == "deploy": + debug("Configuring proxy charm for {}".format(application)) + asyncio.ensure_future(self.configure_proxy_charm(*args)) - task.add_done_callback(functools.partial(stop_test)) + elif status in ["active"]: + """When a charm is active, we can assume that it has been properly + configured (not blocked), regardless of if it's a proxy or not. + + All primitives should be complete by init_config_primitive + """ + asyncio.ensure_future(self.execute_charm_tests(*args)) diff --git a/tests/charms/layers/proxy-ci/reactive/proxy_ci.py b/tests/charms/layers/proxy-ci/reactive/proxy_ci.py index 30e4eea..98b7f96 100644 --- a/tests/charms/layers/proxy-ci/reactive/proxy_ci.py +++ b/tests/charms/layers/proxy-ci/reactive/proxy_ci.py @@ -13,7 +13,7 @@ import charms.sshproxy @when_not('proxy-ci.installed') -def install_metrics_ci(): +def install_proxy_ci(): status_set('blocked', "Waiting for SSH credentials.") set_flag('proxy-ci.installed') diff --git a/tests/integration/test_charm_native.py b/tests/integration/test_charm_native.py index d1b60ff..85a282e 100644 --- a/tests/integration/test_charm_native.py +++ b/tests/integration/test_charm_native.py @@ -3,7 +3,6 @@ Deploy a native charm (to LXD) and execute a primitive """ import asyncio -import logging import pytest from .. import base @@ -133,9 +132,10 @@ class TestCharm(base.TestN2VC): loop=event_loop, ) - while self.running(): - logging.debug("Waiting for test to finish...") + while await self.running(): + print("Waiting for test to finish...") await asyncio.sleep(15) - logging.debug("test_charm_native stopped") + + print("test_charm_native stopped") return 'ok' diff --git a/tests/integration/test_charm_proxy.py b/tests/integration/test_charm_proxy.py index c1661ac..a05df5f 100644 --- a/tests/integration/test_charm_proxy.py +++ b/tests/integration/test_charm_proxy.py @@ -134,9 +134,9 @@ class TestCharm(base.TestN2VC): event_loop, ) - while self.running(): - logging.debug("Waiting for test to finish...") + while await self.running(): + print("Waiting for test to finish...") await asyncio.sleep(15) - logging.debug("test_charm_native stopped") + logging.debug("test_charm_proxy stopped") return 'ok' diff --git a/tests/integration/test_metrics_native.py b/tests/integration/test_metrics_native.py index 74faebf..4288915 100644 --- a/tests/integration/test_metrics_native.py +++ b/tests/integration/test_metrics_native.py @@ -136,9 +136,10 @@ class TestCharm(base.TestN2VC): event_loop, ) - while self.running(): - logging.debug("Waiting for test to finish...") + while await self.running(): + print("Waiting for test to finish...") await asyncio.sleep(15) + logging.debug("test_metrics_native stopped") return 'ok' diff --git a/tests/integration/test_metrics_proxy.py b/tests/integration/test_metrics_proxy.py index 98285fd..e7fa920 100644 --- a/tests/integration/test_metrics_proxy.py +++ b/tests/integration/test_metrics_proxy.py @@ -130,8 +130,8 @@ class TestCharm(base.TestN2VC): event_loop, ) - while self.running(): - logging.debug("Waiting for test to finish...") + while await self.running(): + print("Waiting for test to finish...") await asyncio.sleep(15) logging.debug("test_metrics_proxy stopped") diff --git a/tests/integration/test_multivdu_multicharm.py b/tests/integration/test_multivdu_multicharm.py new file mode 100644 index 0000000..e0fb9c7 --- /dev/null +++ b/tests/integration/test_multivdu_multicharm.py @@ -0,0 +1,179 @@ +""" +Deploy a multi-vdu, multi-charm VNF +""" + +import asyncio +import logging +import pytest +from .. import base + + +# @pytest.mark.serial +class TestCharm(base.TestN2VC): + + NSD_YAML = """ + nsd:nsd-catalog: + nsd: + - id: multivdumulticharm-ns + name: multivdumulticharm-ns + short-name: multivdumulticharm-ns + description: NS with 1 VNF connected by datanet and mgmtnet VLs + version: '1.0' + logo: osm.png + constituent-vnfd: + - vnfd-id-ref: multivdumulticharm-vnf + member-vnf-index: '1' + vld: + - id: mgmtnet + name: mgmtnet + short-name: mgmtnet + type: ELAN + mgmt-network: 'true' + vim-network-name: mgmt + vnfd-connection-point-ref: + - vnfd-id-ref: multivdumulticharm-vnf + member-vnf-index-ref: '1' + vnfd-connection-point-ref: vnf-mgmt + - vnfd-id-ref: multivdumulticharm-vnf + member-vnf-index-ref: '2' + vnfd-connection-point-ref: vnf-mgmt + - id: datanet + name: datanet + short-name: datanet + type: ELAN + vnfd-connection-point-ref: + - vnfd-id-ref: multivdumulticharm-vnf + member-vnf-index-ref: '1' + vnfd-connection-point-ref: vnf-data + - vnfd-id-ref: multivdumulticharm-vnf + member-vnf-index-ref: '2' + vnfd-connection-point-ref: vnf-data + """ + + VNFD_YAML = """ + vnfd:vnfd-catalog: + vnfd: + - id: multivdumulticharm-vnf + name: multivdumulticharm-vnf + short-name: multivdumulticharm-vnf + version: '1.0' + description: A VNF consisting of 1 VDUs w/proxy charm + logo: osm.png + connection-point: + - id: vnf-mgmt + name: vnf-mgmt + short-name: vnf-mgmt + type: VPORT + - id: vnf-data + name: vnf-data + short-name: vnf-data + type: VPORT + mgmt-interface: + cp: vnf-mgmt + internal-vld: + - id: internal + name: internal + short-name: internal + type: ELAN + internal-connection-point: + - id-ref: mgmtVM-internal + - id-ref: dataVM-internal + vdu: + - id: mgmtVM + name: mgmtVM + image: xenial + count: '1' + vm-flavor: + vcpu-count: '1' + memory-mb: '1024' + storage-gb: '10' + interface: + - name: mgmtVM-eth0 + position: '1' + type: EXTERNAL + virtual-interface: + type: VIRTIO + external-connection-point-ref: vnf-mgmt + - name: mgmtVM-eth1 + position: '2' + type: INTERNAL + virtual-interface: + type: VIRTIO + internal-connection-point-ref: mgmtVM-internal + internal-connection-point: + - id: mgmtVM-internal + name: mgmtVM-internal + short-name: mgmtVM-internal + type: VPORT + cloud-init-file: cloud-config.txt + vdu-configuration: + juju: + charm: proxy-ci + proxy: true + initial-config-primitive: + - seq: '1' + name: test + - id: dataVM + name: dataVM + image: xenial + count: '1' + vm-flavor: + vcpu-count: '1' + memory-mb: '1024' + storage-gb: '10' + interface: + - name: dataVM-eth0 + position: '1' + type: EXTERNAL + virtual-interface: + type: VIRTIO + external-connection-point-ref: vnf-mgmt + - name: dataVM-eth1 + position: '2' + type: INTERNAL + virtual-interface: + type: VIRTIO + internal-connection-point-ref: dataVM-internal + internal-connection-point: + - id: dataVM-internal + name: dataVM-internal + short-name: dataVM-internal + type: VPORT + cloud-init-file: cloud-config.txt + vdu-configuration: + juju: + charm: proxy-ci + proxy: true + initial-config-primitive: + - seq: '1' + name: test + + """ + + # @pytest.mark.serial + @pytest.mark.asyncio + async def test_multivdu_multicharm(self, event_loop): + """Deploy and execute the initial-config-primitive of a VNF.""" + + if self.nsd and self.vnfd: + vnf_index = 0 + + for config in self.get_config(): + juju = config['juju'] + charm = juju['charm'] + + await self.deploy( + vnf_index, + charm, + config, + event_loop, + ) + vnf_index += 1 + + while await self.running(): + logging.debug("Waiting for test to finish...") + await asyncio.sleep(15) + # assert False + logging.debug("test_multivdu_multicharm stopped") + + return 'ok' diff --git a/tests/integration/test_no_initial_config_primitive.py b/tests/integration/test_no_initial_config_primitive.py index e66a695..0d90205 100644 --- a/tests/integration/test_no_initial_config_primitive.py +++ b/tests/integration/test_no_initial_config_primitive.py @@ -133,9 +133,10 @@ class TestCharm(base.TestN2VC): event_loop, ) - while self.running(): - logging.debug("Waiting for test to finish...") + while await self.running(): + print("Waiting for test to finish...") await asyncio.sleep(15) - logging.debug("test_charm_native stopped") + + logging.debug("test_charm_no_initial_config_primitive stopped") return 'ok' diff --git a/tests/integration/test_no_parameter.py b/tests/integration/test_no_parameter.py index 39c2443..55c2c3a 100644 --- a/tests/integration/test_no_parameter.py +++ b/tests/integration/test_no_parameter.py @@ -133,10 +133,8 @@ class TestCharm(base.TestN2VC): event_loop, ) - while self.running(): - logging.debug("Waiting for test to finish...") + while await self.running(): + print("Waiting for test to finish...") await asyncio.sleep(15) - logging.debug("test_charm_native stopped") - await self.n2vc.logout() return 'ok' diff --git a/tests/integration/test_non_string_parameter.py b/tests/integration/test_non_string_parameter.py index ed3dfc7..b93dfed 100644 --- a/tests/integration/test_non_string_parameter.py +++ b/tests/integration/test_non_string_parameter.py @@ -139,9 +139,9 @@ class TestCharm(base.TestN2VC): event_loop, ) - while self.running(): - logging.debug("Waiting for test to finish...") + while await self.running(): + print("Waiting for test to finish...") await asyncio.sleep(15) - logging.debug("test_charm_native stopped") + logging.debug("test_charm_non_string_parameter stopped") return 'ok' diff --git a/tests/test_ssh_keygen.py b/tests/test_ssh_keygen.py new file mode 100644 index 0000000..3a129a3 --- /dev/null +++ b/tests/test_ssh_keygen.py @@ -0,0 +1,18 @@ +""" +Test N2VC's ssh key generation +""" +import os +import pytest +from . import base +import tempfile + + +@pytest.mark.asyncio +async def test_ssh_keygen(monkeypatch): + with tempfile.TemporaryDirectory() as tmpdirname: + monkeypatch.setitem(os.environ, "HOME", tmpdirname) + + client = base.get_n2vc() + + public_key = await client.GetPublicKey() + assert len(public_key) diff --git a/tox.ini b/tox.ini index 9ef529f..a568d3f 100644 --- a/tox.ini +++ b/tox.ini @@ -42,7 +42,7 @@ commands = [testenv:lint] envdir = {toxworkdir}/py3 commands = - flake8 --ignore E501,E402 {posargs} n2vc tests + flake8 --ignore E501,E402 --exclude tests/charms/builds,tests/charms/deps {posargs} n2vc tests deps = flake8