Improved Primitive support and better testing
This changeset addresses several issues.
- Improve primitive support so the status and output of an executed
primitive can be retrieved
- Merge latest upstream libjuju (required for new primive features)
- New testing framework
This is the start of a new testing framework with the ability to
create and configure LXD containers with SSH, to use while testing proxy
charms.
- Add support for using ssh keys with proxy charms
See Feature 1429. This uses the per-proxy charm/unit ssh keypair
Signed-off-by: Adam Israel <adam.israel@canonical.com>
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/test_single_vdu_proxy_charm.py b/tests/test_single_vdu_proxy_charm.py
new file mode 100644
index 0000000..a971872
--- /dev/null
+++ b/tests/test_single_vdu_proxy_charm.py
@@ -0,0 +1,351 @@
+"""Test the deployment and configuration of a proxy charm.
+ 1. Deploy proxy charm to a unit
+ 2. Execute 'get-ssh-public-key' primitive and get returned value
+ 3. Create LXD container with unit's public ssh key
+ 4. Verify SSH works between unit and container
+ 5. Destroy Juju unit
+ 6. Stop and Destroy LXD container
+"""
+import asyncio
+import functools
+import os
+import sys
+import logging
+import unittest
+from . import utils
+import yaml
+from n2vc.vnf import N2VC
+
+NSD_YAML = """
+nsd:nsd-catalog:
+ nsd:
+ - id: singlecharmvdu-ns
+ name: singlecharmvdu-ns
+ short-name: singlecharmvdu-ns
+ description: NS with 1 VNFs singlecharmvdu-vnf connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: singlecharmvdu-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: singlecharmvdu-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: singlecharmvdu-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: singlecharmvdu-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: singlecharmvdu-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+"""
+
+VNFD_YAML = """
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: singlecharmvdu-vnf
+ name: singlecharmvdu-vnf
+ short-name: singlecharmvdu-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs w/charms connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: simple
+ initial-config-primitive:
+ - seq: '1'
+ name: config
+ parameter:
+ - name: ssh-hostname
+ value: <rw_mgmt_ip>
+ - name: ssh-username
+ value: ubuntu
+ - name: ssh-password
+ value: ubuntu
+ - seq: '2'
+ name: touch
+ parameter:
+ - name: filename
+ value: '/home/ubuntu/first-touch-mgmtVM'
+ config-primitive:
+ - name: touch
+ parameter:
+ - name: filename
+ data-type: STRING
+ default-value: '/home/ubuntu/touched'
+
+"""
+
+
+class PythonTest(unittest.TestCase):
+ n2vc = None
+ container = None
+
+ def setUp(self):
+ self.log = logging.getLogger()
+ self.log.level = logging.DEBUG
+
+ self.loop = asyncio.get_event_loop()
+
+ # self.container = utils.create_lxd_container()
+ self.n2vc = utils.get_n2vc()
+
+ def tearDown(self):
+ if self.container:
+ self.container.stop()
+ self.container.delete()
+
+ self.loop.run_until_complete(self.n2vc.logout())
+
+ def n2vc_callback(self, model_name, application_name, workload_status, workload_message, task=None):
+ """We pass the vnfd when setting up the callback, so expect it to be
+ returned as a tuple."""
+ self.log.debug("[Callback] Workload status '{}' for application {}".format(workload_status, application_name))
+ self.log.debug("[Callback] Task: \"{}\"".format(task))
+
+ if workload_status == "exec_primitive" and task:
+ self.log.debug("Getting Primitive Status")
+ # get the uuid from the task
+ uuid = task.result()
+
+ # get the status of the action
+ task = asyncio.ensure_future(
+ self.n2vc.GetPrimitiveStatus(
+ model_name,
+ uuid,
+ )
+ )
+ task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, "primitive_status", task))
+
+ if workload_status == "primitive_status" and task and not self.container:
+ self.log.debug("Creating LXD container")
+ # Get the ssh key
+ result = task.result()
+ pubkey = result['pubkey']
+
+ self.container = utils.create_lxd_container(pubkey)
+ mgmtaddr = self.container.state().network['eth0']['addresses']
+
+ self.log.debug("Setting config ssh-hostname={}".format(mgmtaddr[0]['address']))
+ task = asyncio.ensure_future(
+ self.n2vc.ExecutePrimitive(
+ model_name,
+ application_name,
+ "config",
+ None,
+ params={
+ 'ssh-hostname': mgmtaddr[0]['address'],
+ }
+ )
+ )
+ task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, None, None))
+
+ if workload_status and not task:
+ self.log.debug("Callback: workload status \"{}\"".format(workload_status))
+
+ if workload_status in ["blocked"] and not self.container:
+ self.log.debug("Getting public SSH key")
+
+ # Execute 'get-ssh-public-key' primitive and get returned value
+ task = asyncio.ensure_future(
+ self.n2vc.ExecutePrimitive(
+ model_name,
+ application_name,
+ "get-ssh-public-key",
+ None,
+ params={
+ 'ssh-hostname': '10.195.8.78',
+ 'ssh-username': 'ubuntu',
+ 'ssh-password': 'ubuntu'
+ }
+ )
+ )
+ task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, "exec_primitive", task))
+
+
+ # task = asyncio.ensure_future(
+ # self.n2vc.ExecutePrimitive(
+ # model_name,
+ # application_name,
+ # "config",
+ # None,
+ # params={
+ # 'ssh-hostname': '10.195.8.78',
+ # 'ssh-username': 'ubuntu',
+ # 'ssh-password': 'ubuntu'
+ # }
+ # )
+ # )
+ # task.add_done_callback(functools.partial(self.n2vc_callback, None, None, None))
+ pass
+ elif workload_status in ["active"]:
+ self.log.debug("Removing charm")
+ task = asyncio.ensure_future(
+ self.n2vc.RemoveCharms(model_name, application_name, self.n2vc_callback)
+ )
+ task.add_done_callback(functools.partial(self.n2vc_callback, None, None, None))
+
+ if self.container:
+ utils.destroy_lxd_container(self.container)
+ self.container = None
+
+ # Stop the test
+ self.loop.call_soon_threadsafe(self.loop.stop)
+
+ def test_deploy_application(self):
+ """Deploy proxy charm to a unit."""
+ stream_handler = logging.StreamHandler(sys.stdout)
+ self.log.addHandler(stream_handler)
+ try:
+ self.log.info("Log handler installed")
+ nsd = utils.get_descriptor(NSD_YAML)
+ vnfd = utils.get_descriptor(VNFD_YAML)
+
+ if nsd and vnfd:
+
+ vca_charms = os.getenv('VCA_CHARMS', None)
+
+ params = {}
+ vnf_index = 0
+
+ def deploy():
+ """An inner function to do the deployment of a charm from
+ either a vdu or vnf.
+ """
+ charm_dir = "{}/{}".format(vca_charms, charm)
+
+ # Setting this to an IP that will fail the initial config.
+ # This will be detected in the callback, which will execute
+ # the "config" primitive with the right IP address.
+ # mgmtaddr = self.container.state().network['eth0']['addresses']
+ # params['rw_mgmt_ip'] = mgmtaddr[0]['address']
+
+ # Legacy method is to set the ssh-private-key config
+ # with open(utils.get_juju_private_key(), "r") as f:
+ # pkey = f.readline()
+ # params['ssh-private-key'] = pkey
+
+ ns_name = "default"
+
+ vnf_name = self.n2vc.FormatApplicationName(
+ ns_name,
+ vnfd['name'],
+ str(vnf_index),
+ )
+
+ self.loop.run_until_complete(
+ self.n2vc.DeployCharms(
+ ns_name,
+ vnf_name,
+ vnfd,
+ charm_dir,
+ params,
+ {},
+ self.n2vc_callback
+ )
+ )
+
+ # Check if the VDUs in this VNF have a charm
+ for vdu in vnfd['vdu']:
+ vdu_config = vdu.get('vdu-configuration')
+ if vdu_config:
+ juju = vdu_config['juju']
+ self.assertIsNotNone(juju)
+
+ charm = juju['charm']
+ self.assertIsNotNone(charm)
+
+ params['initial-config-primitive'] = vdu_config['initial-config-primitive']
+
+ deploy()
+ vnf_index += 1
+
+ # Check if this VNF has a charm
+ vnf_config = vnfd.get("vnf-configuration")
+ if vnf_config:
+ juju = vnf_config['juju']
+ self.assertIsNotNone(juju)
+
+ charm = juju['charm']
+ self.assertIsNotNone(charm)
+
+ params['initial-config-primitive'] = vnf_config['initial-config-primitive']
+
+ deploy()
+ vnf_index += 1
+
+ self.loop.run_forever()
+ # while self.loop.is_running():
+ # # await asyncio.sleep(1)
+ # time.sleep(1)
+
+ # Test actions
+ # ExecutePrimitive(self, nsd, vnfd, vnf_member_index, primitive, callback, *callback_args, **params):
+
+ # self.loop.run_until_complete(n.DestroyNetworkService(nsd))
+
+ # self.loop.run_until_complete(self.n2vc.logout())
+ finally:
+ self.log.removeHandler(stream_handler)
diff --git a/tests/utils.py b/tests/utils.py
new file mode 100644
index 0000000..9f9000e
--- /dev/null
+++ b/tests/utils.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python3
+
+import logging
+import n2vc.vnf
+import pylxd
+import os
+import time
+import uuid
+import yaml
+
+# Disable InsecureRequestWarning w/LXD
+import urllib3
+urllib3.disable_warnings()
+
+
+def get_descriptor(descriptor):
+ desc = None
+ try:
+ tmp = yaml.load(descriptor)
+
+ # Remove the envelope
+ root = list(tmp.keys())[0]
+ if root == "nsd:nsd-catalog":
+ desc = tmp['nsd:nsd-catalog']['nsd'][0]
+ elif root == "vnfd:vnfd-catalog":
+ desc = tmp['vnfd:vnfd-catalog']['vnfd'][0]
+ except ValueError:
+ assert False
+ return desc
+
+def get_n2vc():
+ """Return an instance of N2VC.VNF."""
+ log = logging.getLogger()
+ log.level = logging.DEBUG
+
+ # Extract parameters from the environment in order to run our test
+ vca_host = os.getenv('VCA_HOST', '127.0.0.1')
+ vca_port = os.getenv('VCA_PORT', 17070)
+ vca_user = os.getenv('VCA_USER', 'admin')
+ vca_charms = os.getenv('VCA_CHARMS', None)
+ vca_secret = os.getenv('VCA_SECRET', None)
+ client = n2vc.vnf.N2VC(
+ log=log,
+ server=vca_host,
+ port=vca_port,
+ user=vca_user,
+ secret=vca_secret,
+ artifacts=vca_charms,
+ )
+ return client
+
+def create_lxd_container(public_key=None):
+ """
+ Returns a container object
+
+ If public_key isn't set, we'll use the Juju ssh key
+ """
+
+ client = get_lxd_client()
+ test_machine = "test-{}-add-manual-machine-ssh".format(
+ uuid.uuid4().hex[-4:]
+ )
+
+ private_key_path, public_key_path = find_juju_ssh_keys()
+ # private_key_path = os.path.expanduser(
+ # "~/.local/share/juju/ssh/juju_id_rsa"
+ # )
+ # public_key_path = os.path.expanduser(
+ # "~/.local/share/juju/ssh/juju_id_rsa.pub"
+ # )
+
+ # Use the self-signed cert generated by lxc on first run
+ crt = os.path.expanduser('~/snap/lxd/current/.config/lxc/client.crt')
+ assert os.path.exists(crt)
+
+ key = os.path.expanduser('~/snap/lxd/current/.config/lxc/client.key')
+ assert os.path.exists(key)
+
+ # create profile w/cloud-init and juju ssh key
+ if not public_key:
+ public_key = ""
+ with open(public_key_path, "r") as f:
+ public_key = f.readline()
+
+ profile = client.profiles.create(
+ test_machine,
+ config={'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)},
+ devices={
+ 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
+ 'eth0': {
+ 'nictype': 'bridged',
+ 'parent': 'lxdbr0',
+ 'type': 'nic'
+ }
+ }
+ )
+
+ # create lxc machine
+ config = {
+ 'name': test_machine,
+ 'source': {
+ 'type': 'image',
+ 'alias': 'xenial',
+ 'mode': 'pull',
+ 'protocol': 'simplestreams',
+ 'server': 'https://cloud-images.ubuntu.com/releases',
+ },
+ 'profiles': [test_machine],
+ }
+ container = client.containers.create(config, wait=True)
+ container.start(wait=True)
+
+ def wait_for_network(container, timeout=30):
+ """Wait for eth0 to have an ipv4 address."""
+ starttime = time.time()
+ while(time.time() < starttime + timeout):
+ time.sleep(1)
+ if 'eth0' in container.state().network:
+ addresses = container.state().network['eth0']['addresses']
+ if len(addresses) > 0:
+ if addresses[0]['family'] == 'inet':
+ return addresses[0]
+ return None
+
+ host = wait_for_network(container)
+
+ # HACK: We need to give sshd a chance to bind to the interface,
+ # and pylxd's container.execute seems to be broken and fails and/or
+ # hangs trying to properly check if the service is up.
+ time.sleep(5)
+
+ return container
+
+
+def destroy_lxd_container(container):
+ """Stop and delete a LXD container."""
+ container.stop(wait=True)
+ container.delete()
+
+
+def find_lxd_config():
+ """Find the LXD configuration directory."""
+ paths = []
+ paths.append(os.path.expanduser("~/.config/lxc"))
+ paths.append(os.path.expanduser("~/snap/lxd/current/.config/lxc"))
+
+ for path in paths:
+ if os.path.exists(path):
+ crt = os.path.expanduser("{}/client.crt".format(path))
+ key = os.path.expanduser("{}/client.key".format(path))
+ if os.path.exists(crt) and os.path.exists(key):
+ return (crt, key)
+ return (None, None)
+
+
+def find_juju_ssh_keys():
+ """Find the Juju ssh keys."""
+
+ paths = []
+ paths.append(os.path.expanduser("~/.local/share/juju/ssh/"))
+
+ for path in paths:
+ if os.path.exists(path):
+ private = os.path.expanduser("{}/juju_id_rsa".format(path))
+ public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
+ if os.path.exists(private) and os.path.exists(public):
+ return (private, public)
+ return (None, None)
+
+
+def get_juju_private_key():
+ keys = find_juju_ssh_keys()
+ return keys[0]
+
+
+def get_lxd_client(host="127.0.0.1", port="8443", verify=False):
+ """ Get the LXD client."""
+ client = None
+ (crt, key) = find_lxd_config()
+
+ if crt and key:
+ client = pylxd.Client(
+ endpoint="https://{}:{}".format(host, port),
+ cert=(crt, key),
+ verify=verify,
+ )
+
+ return client