#!/usr/bin/env python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import asyncio
import datetime
import logging
here = os.path.dirname(os.path.realpath(__file__))
-def is_bootstrapped():
- result = subprocess.run(['juju', 'switch'], stdout=subprocess.PIPE)
- return (
- result.returncode == 0 and
- len(result.stdout.decode().strip()) > 0)
-
-
-bootstrapped = pytest.mark.skipif(
- not is_bootstrapped(),
- reason='bootstrapped Juju environment required')
-
-
class CleanController():
"""
Context manager that automatically connects and disconnects from
logging.debug(
"[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
)
- # print(
- # "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
- # )
+ print(
+ "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
+ )
def get_charm_path():
def get_descriptor(descriptor):
desc = None
try:
- tmp = yaml.load(descriptor)
+ tmp = yaml.safe_load(descriptor)
# Remove the envelope
root = list(tmp.keys())[0]
vca_user = os.getenv('VCA_USER', 'admin')
vca_charms = os.getenv('VCA_CHARMS', None)
vca_secret = os.getenv('VCA_SECRET', None)
+ vca_cacert = os.getenv('VCA_CACERT', None)
+
+ # Get the Juju Public key
+ juju_public_key = get_juju_public_key()
+ if juju_public_key:
+ debug("Reading Juju public key @ {}".format(juju_public_key))
+ with open(juju_public_key, 'r') as f:
+ juju_public_key = f.read()
+ debug("Found public key: {}".format(juju_public_key))
+ else:
+ raise Exception("No Juju Public Key found")
+
+ # Get the ca-cert
+ # os.path.expanduser("~/.config/lxc")
+ # with open("{}/agent.conf".format(AGENT_PATH), "r") as f:
+ # try:
+ # y = yaml.safe_load(f)
+ # self.cacert = y['cacert']
+ # except yaml.YAMLError as exc:
+ # log("Unable to find Juju ca-cert.")
+ # raise exc
client = n2vc.vnf.N2VC(
log=log,
user=vca_user,
secret=vca_secret,
artifacts=vca_charms,
- loop=loop
+ loop=loop,
+ juju_public_key=juju_public_key,
+ ca_cert=vca_cacert,
)
return client
name = name.replace("_", "-").replace(".", "")
client = get_lxd_client()
+ if not client:
+ raise Exception("Unable to connect to LXD")
+
test_machine = "test-{}-{}".format(
uuid.uuid4().hex[-4:],
name,
)
- private_key_path, public_key_path = find_juju_ssh_keys()
+ private_key_path, public_key_path = find_n2vc_ssh_keys()
try:
# create profile w/cloud-init and juju ssh key
)
)
+ try:
+ waitcount = 0
+ while waitcount <= 5:
+ if is_sshd_running(container):
+ break
+ waitcount += 1
+ time.sleep(1)
+ if waitcount >= 5:
+ debug("couldn't detect sshd running")
+ raise Exception("Unable to verify container sshd")
+
+ except Exception as ex:
+ debug(
+ "Error checking sshd status on {}: {}".format(
+ test_machine,
+ ex,
+ )
+ )
+
# HACK: We need to give sshd a chance to bind to the interface,
# and pylxd's container.execute seems to be broken and fails and/or
# hangs trying to properly check if the service is up.
return container
+def is_sshd_running(container):
+ """Check if sshd is running in the container.
+
+ Check to see if the sshd process is running and listening on port 22.
+
+ :param container: The container to check
+ :return boolean: True if sshd is running.
+ """
+ debug("Container: {}".format(container))
+ try:
+ (rc, stdout, stderr) = container.execute(
+ ["service", "ssh", "status"]
+ )
+ # If the status is a) found and b) running, the exit code will be 0
+ if rc == 0:
+ return True
+ except Exception as ex:
+ debug("Failed to check sshd service status: {}".format(ex))
+
+ return False
+
+
def destroy_lxd_container(container):
"""Stop and delete a LXD container.
return (None, None)
+def find_n2vc_ssh_keys():
+ """Find the N2VC ssh keys."""
+
+ paths = []
+ paths.append(os.path.expanduser("~/.ssh/"))
+
+ for path in paths:
+ if os.path.exists(path):
+ private = os.path.expanduser("{}/id_n2vc_rsa".format(path))
+ public = os.path.expanduser("{}/id_n2vc_rsa.pub".format(path))
+ if os.path.exists(private) and os.path.exists(public):
+ return (private, public)
+ return (None, None)
+
+
def find_juju_ssh_keys():
"""Find the Juju ssh keys."""
paths = []
- paths.append(os.path.expanduser("~/.local/share/juju/ssh/"))
+ paths.append(os.path.expanduser("~/.local/share/juju/ssh"))
for path in paths:
if os.path.exists(path):
return keys[0]
-def get_lxd_client(host="127.0.0.1", port="8443", verify=False):
+def get_juju_public_key():
+ """Find the Juju public key."""
+ paths = []
+
+ if 'VCA_PATH' in os.environ:
+ paths.append("{}/ssh".format(os.environ["VCA_PATH"]))
+
+ paths.append(os.path.expanduser("~/.local/share/juju/ssh"))
+ paths.append("/root/.local/share/juju/ssh")
+
+ for path in paths:
+ if os.path.exists(path):
+ public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
+ if os.path.exists(public):
+ return public
+ return None
+
+
+def get_lxd_client(host=None, port="8443", verify=False):
""" Get the LXD client."""
+
+ if host is None:
+ if 'LXD_HOST' in os.environ:
+ host = os.environ['LXD_HOST']
+ else:
+ host = '127.0.0.1'
+
+ passwd = None
+ if 'LXD_SECRET' in os.environ:
+ passwd = os.environ['LXD_SECRET']
+
+ # debug("Connecting to LXD remote {} w/authentication ({})".format(
+ # host,
+ # passwd
+ # ))
client = None
(crt, key) = find_lxd_config()
verify=verify,
)
+ # If the LXD server has a pasword set, authenticate with it.
+ if not client.trusted and passwd:
+ try:
+ client.authenticate(passwd)
+ if not client.trusted:
+ raise Exception("Unable to authenticate with LXD remote")
+ except pylxd.exceptions.LXDAPIException as ex:
+ if 'Certificate already in trust store' in ex:
+ pass
+
return client
"""TODO:
1. Validator Validation
- Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
+ Automatically validate the descriptors we're using here, unless the test
+ author explicitly wants to skip them. Useful to make sure tests aren't
+ being run against invalid descriptors, validating functionality that may
+ fail against a properly written descriptor.
- We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
+ We need to have a flag (instance variable) that controls this behavior. It
+ may be necessary to skip validation and run against a descriptor
+ implementing features that have not yet been released in the Information
+ Model.
"""
"""
self.ns_name = self.nsd['name']
self.vnf_name = self.vnfd['name']
- # Hard-coded to default for now, but this may change in the future.
- self.model = "default"
-
self.charms = {}
self.parse_vnf_descriptor()
assert self.charms is not {}
# Build the charm(s) needed for this test
for charm in self.get_charm_names():
+ # debug("Building charm {}".format(charm))
self.get_charm(charm)
# A bit of a hack, in order to allow the N2VC callback to run parallel
"""
debug("Running teardown_class...")
try:
+
debug("Destroying LXD containers...")
for application in self.state:
if self.state[application]['container']:
# Logout of N2VC
if self.n2vc:
- debug("Logging out of N2VC...")
+ debug("teardown_class(): Logging out of N2VC...")
yield from self.n2vc.logout()
- debug("Logging out of N2VC...done.")
+ debug("teardown_class(): Logging out of N2VC...done.")
+
debug("Running teardown_class...done.")
except Exception as ex:
debug("Exception in teardown_class: {}".format(ex))
Returns: The path to the built charm or None if `charm build` failed.
"""
-
# Make sure the charm snap is installed
+ charm_cmd = None
try:
subprocess.check_call(['which', 'charm'])
- except subprocess.CalledProcessError as e:
+ charm_cmd = "charm build"
+ except subprocess.CalledProcessError:
+ # charm_cmd = "charm-build"
+ # debug("Using legacy charm-build")
raise Exception("charm snap not installed.")
if charm not in self.artifacts:
# Currently, the snap-installed command only has write access
# to the $HOME (changing in an upcoming release) so writing to
# /tmp isn't possible at the moment.
- builds = get_charm_path()
+ builds = get_charm_path()
if not os.path.exists("{}/builds/{}".format(builds, charm)):
- cmd = "charm build {}/{} -o {}/".format(
+ cmd = "{} --no-local-layers {}/{} -o {}/".format(
+ charm_cmd,
get_layer_path(),
charm,
builds,
)
- subprocess.check_call(shlex.split(cmd))
+ # debug(cmd)
+
+ env = os.environ.copy()
+ env["CHARM_BUILD_DIR"] = builds
+
+ subprocess.check_call(shlex.split(cmd), env=env)
- self.artifacts[charm] = {
- 'tmpdir': builds,
- 'charm': "{}/builds/{}".format(builds, charm),
- }
except subprocess.CalledProcessError as e:
- raise Exception("charm build failed: {}.".format(e))
+ # charm build will return error code 100 if the charm fails
+ # the auto-run of charm proof, which we can safely ignore for
+ # our CI charms.
+ if e.returncode != 100:
+ raise Exception("charm build failed: {}.".format(e))
+
+ self.artifacts[charm] = {
+ 'tmpdir': builds,
+ 'charm': "{}/builds/{}".format(builds, charm),
+ }
return self.artifacts[charm]['charm']
if not self.n2vc:
self.n2vc = get_n2vc(loop=loop)
- vnf_name = self.n2vc.FormatApplicationName(
+ debug("Creating model for Network Service {}".format(self.ns_name))
+ await self.n2vc.CreateNetworkService(self.ns_name)
+
+ application = self.n2vc.FormatApplicationName(
self.ns_name,
self.vnf_name,
str(vnf_index),
)
+
+ # Initialize the state of the application
+ self.state[application] = {
+ 'status': None, # Juju status
+ 'container': None, # lxd container, for proxy charms
+ 'actions': {}, # Actions we've executed
+ 'done': False, # Are we done testing this charm?
+ 'phase': "deploy", # What phase is this application in?
+ }
+
debug("Deploying charm at {}".format(self.artifacts[charm]))
+ # If this is a native charm, we need to provision the underlying
+ # machine ala an LXC container.
+ machine_spec = {}
+
+ if not self.isproxy(application):
+ debug("Creating container for native charm")
+ # args = ("default", application, None, None)
+ self.state[application]['container'] = create_lxd_container(
+ name=os.path.basename(__file__)
+ )
+
+ hostname = self.get_container_ip(
+ self.state[application]['container'],
+ )
+
+ machine_spec = {
+ 'hostname': hostname,
+ 'username': 'ubuntu',
+ }
+
await self.n2vc.DeployCharms(
self.ns_name,
- vnf_name,
+ application,
self.vnfd,
self.get_charm(charm),
params,
- {},
+ machine_spec,
self.n2vc_callback,
)
@classmethod
async def configure_proxy_charm(self, *args):
+ """Configure a container for use via ssh."""
(model, application, _, _) = args
try:
self._running = False
self._stopping = True
+ # Destroy the network service
+ try:
+ await self.n2vc.DestroyNetworkService(self.ns_name)
+ except Exception as e:
+ debug(
+ "Error Destroying Network Service \"{}\": {}".format(
+ self.ns_name,
+ e,
+ )
+ )
+
+ # Wait for the applications to be removed and delete the containers
for application in self.charms:
try:
- await self.n2vc.RemoveCharms(self.model, application)
+
+ while True:
+ # Wait for the application to be removed
+ await asyncio.sleep(10)
+ if not await self.n2vc.HasApplication(
+ self.ns_name,
+ application,
+ ):
+ break
+
+ # Need to wait for the charm to finish, because native charms
if self.state[application]['container']:
debug("Deleting LXD container...")
destroy_lxd_container(
# Logout of N2VC
try:
- debug("Logging out of N2VC...")
+ debug("stop(): Logging out of N2VC...")
await self.n2vc.logout()
self.n2vc = None
- debug("Logging out of N2VC...Done.")
+ debug("stop(): Logging out of N2VC...Done.")
except Exception as ex:
debug(ex)
)
await self.n2vc.ExecutePrimitive(
- self.model,
+ self.ns_name,
application,
"config",
None,
Re-run those actions so we can inspect the status.
"""
uuids = await self.n2vc.ExecuteInitialPrimitives(
- self.model,
+ self.ns_name,
application,
init_config,
)
return True
except Exception as ex:
debug("execute_initial_config_primitives exception: {}".format(ex))
-
+ raise ex
+
return False
@classmethod
debug("Collecting metrics for {}".format(application))
metrics = await self.n2vc.GetMetrics(
- self.model,
+ self.ns_name,
application,
)
debug("Getting status of {} ({})...".format(uid, status))
status = await self.n2vc.GetPrimitiveStatus(
- self.model,
+ self.ns_name,
uid,
)
debug("...state of {} is {}".format(uid, status))
debug("{} is done".format(application))
return
+ if status in ['error']:
+ # To test broken charms, if a charm enters an error state we should
+ # end the test
+ debug("{} is in an error state, stop the test.".format(application))
+ # asyncio.ensure_future(self.stop())
+ self.state[application]['done'] = True
+ assert False
+
if status in ["blocked"] and self.isproxy(application):
if self.state[application]['phase'] == "deploy":
debug("Configuring proxy charm for {}".format(application))