15 from juju
.controller
import Controller
17 # Disable InsecureRequestWarning w/LXD
19 urllib3
.disable_warnings()
20 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
22 here
= os
.path
.dirname(os
.path
.realpath(__file__
))
25 def is_bootstrapped():
26 result
= subprocess
.run(['juju', 'switch'], stdout
=subprocess
.PIPE
)
28 result
.returncode
== 0 and len(result
.stdout
.decode().strip()) > 0)
31 bootstrapped
= pytest
.mark
.skipif(
32 not is_bootstrapped(),
33 reason
='bootstrapped Juju environment required')
36 class CleanController():
38 Context manager that automatically connects and disconnects from
39 the currently active controller.
41 Note: Unlike CleanModel, this will not create a new controller for you,
42 and an active controller must already be available.
45 self
._controller
= None
47 async def __aenter__(self
):
48 self
._controller
= Controller()
49 await self
._controller
.connect()
50 return self
._controller
52 async def __aexit__(self
, exc_type
, exc
, tb
):
53 await self
._controller
.disconnect()
57 """Format debug messages in a consistent way."""
58 now
= datetime
.datetime
.now()
60 # TODO: Decide on the best way to log. Output from `logging.debug` shows up
61 # when a test fails, but print() will always show up when running tox with
62 # `-s`, which is really useful for debugging single tests without having to
63 # insert a False assert to see the log.
65 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
68 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
73 return "{}/charms".format(here
)
77 return "{}/charms/layers".format(here
)
80 def collect_metrics(application
):
81 """Invoke Juju's metrics collector.
83 Caveat: this shells out to the `juju collect-metrics` command, rather than
84 making an API call. At the time of writing, that API is not exposed through
89 subprocess
.check_call(['juju', 'collect-metrics', application
])
90 except subprocess
.CalledProcessError
as e
:
91 raise Exception("Unable to collect metrics: {}".format(e
))
94 def has_metrics(charm
):
95 """Check if a charm has metrics defined."""
96 metricsyaml
= "{}/{}/metrics.yaml".format(
100 if os
.path
.exists(metricsyaml
):
105 def get_descriptor(descriptor
):
108 tmp
= yaml
.load(descriptor
)
110 # Remove the envelope
111 root
= list(tmp
.keys())[0]
112 if root
== "nsd:nsd-catalog":
113 desc
= tmp
['nsd:nsd-catalog']['nsd'][0]
114 elif root
== "vnfd:vnfd-catalog":
115 desc
= tmp
['vnfd:vnfd-catalog']['vnfd'][0]
121 def get_n2vc(loop
=None):
122 """Return an instance of N2VC.VNF."""
123 log
= logging
.getLogger()
124 log
.level
= logging
.DEBUG
126 # Extract parameters from the environment in order to run our test
127 vca_host
= os
.getenv('VCA_HOST', '127.0.0.1')
128 vca_port
= os
.getenv('VCA_PORT', 17070)
129 vca_user
= os
.getenv('VCA_USER', 'admin')
130 vca_charms
= os
.getenv('VCA_CHARMS', None)
131 vca_secret
= os
.getenv('VCA_SECRET', None)
133 client
= n2vc
.vnf
.N2VC(
139 artifacts
=vca_charms
,
145 def create_lxd_container(public_key
=None, name
="test_name"):
147 Returns a container object
149 If public_key isn't set, we'll use the Juju ssh key
151 :param public_key: The public key to inject into the container
152 :param name: The name of the test being run
156 # Format name so it's valid
157 name
= name
.replace("_", "-").replace(".", "")
159 client
= get_lxd_client()
160 test_machine
= "test-{}-{}".format(
161 uuid
.uuid4().hex[-4:],
165 private_key_path
, public_key_path
= find_n2vc_ssh_keys()
168 # create profile w/cloud-init and juju ssh key
171 with
open(public_key_path
, "r") as f
:
172 public_key
= f
.readline()
174 client
.profiles
.create(
177 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key
)},
179 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
181 'nictype': 'bridged',
187 except Exception as ex
:
188 debug("Error creating lxd profile {}: {}".format(test_machine
, ex
))
194 'name': test_machine
,
199 'protocol': 'simplestreams',
200 'server': 'https://cloud-images.ubuntu.com/releases',
202 'profiles': [test_machine
],
204 container
= client
.containers
.create(config
, wait
=True)
205 container
.start(wait
=True)
206 except Exception as ex
:
207 debug("Error creating lxd container {}: {}".format(test_machine
, ex
))
208 # This is a test-ending failure.
211 def wait_for_network(container
, timeout
=30):
212 """Wait for eth0 to have an ipv4 address."""
213 starttime
= time
.time()
214 while(time
.time() < starttime
+ timeout
):
216 if 'eth0' in container
.state().network
:
217 addresses
= container
.state().network
['eth0']['addresses']
218 if len(addresses
) > 0:
219 if addresses
[0]['family'] == 'inet':
224 wait_for_network(container
)
225 except Exception as ex
:
227 "Error waiting for container {} network: {}".format(
233 # HACK: We need to give sshd a chance to bind to the interface,
234 # and pylxd's container.execute seems to be broken and fails and/or
235 # hangs trying to properly check if the service is up.
236 (exit_code
, stdout
, stderr
) = container
.execute([
238 '-c', '5', # Wait for 5 ECHO_REPLY
239 '8.8.8.8', # Ping Google's public DNS
240 '-W', '15', # Set a 15 second deadline
244 raise Exception("Unable to verify container network")
249 def destroy_lxd_container(container
):
250 """Stop and delete a LXD container.
252 Sometimes we see errors talking to LXD -- ephemerial issues like
253 load or a bug that's killed the API. We'll do our best to clean
254 up here, and we should run a cleanup after all tests are finished
255 to remove any extra containers and profiles belonging to us.
258 if type(container
) is bool:
261 name
= container
.name
262 debug("Destroying container {}".format(name
))
264 client
= get_lxd_client()
266 def wait_for_stop(timeout
=30):
267 """Wait for eth0 to have an ipv4 address."""
268 starttime
= time
.time()
269 while(time
.time() < starttime
+ timeout
):
271 if container
.state
== "Stopped":
274 def wait_for_delete(timeout
=30):
275 starttime
= time
.time()
276 while(time
.time() < starttime
+ timeout
):
278 if client
.containers
.exists(name
) is False:
282 container
.stop(wait
=False)
284 except Exception as ex
:
286 "Error stopping container {}: {}".format(
293 container
.delete(wait
=False)
295 except Exception as ex
:
297 "Error deleting container {}: {}".format(
304 # Delete the profile created for this container
305 profile
= client
.profiles
.get(name
)
308 except Exception as ex
:
310 "Error deleting profile {}: {}".format(
317 def find_lxd_config():
318 """Find the LXD configuration directory."""
320 paths
.append(os
.path
.expanduser("~/.config/lxc"))
321 paths
.append(os
.path
.expanduser("~/snap/lxd/current/.config/lxc"))
324 if os
.path
.exists(path
):
325 crt
= os
.path
.expanduser("{}/client.crt".format(path
))
326 key
= os
.path
.expanduser("{}/client.key".format(path
))
327 if os
.path
.exists(crt
) and os
.path
.exists(key
):
332 def find_n2vc_ssh_keys():
333 """Find the N2VC ssh keys."""
336 paths
.append(os
.path
.expanduser("~/.ssh/"))
339 if os
.path
.exists(path
):
340 private
= os
.path
.expanduser("{}/id_n2vc_rsa".format(path
))
341 public
= os
.path
.expanduser("{}/id_n2vc_rsa.pub".format(path
))
342 if os
.path
.exists(private
) and os
.path
.exists(public
):
343 return (private
, public
)
347 def find_juju_ssh_keys():
348 """Find the Juju ssh keys."""
351 paths
.append(os
.path
.expanduser("~/.local/share/juju/ssh/"))
354 if os
.path
.exists(path
):
355 private
= os
.path
.expanduser("{}/juju_id_rsa".format(path
))
356 public
= os
.path
.expanduser("{}/juju_id_rsa.pub".format(path
))
357 if os
.path
.exists(private
) and os
.path
.exists(public
):
358 return (private
, public
)
362 def get_juju_private_key():
363 keys
= find_juju_ssh_keys()
367 def get_lxd_client(host
="127.0.0.1", port
="8443", verify
=False):
368 """ Get the LXD client."""
370 (crt
, key
) = find_lxd_config()
373 client
= pylxd
.Client(
374 endpoint
="https://{}:{}".format(host
, port
),
382 # TODO: This is marked serial but can be run in parallel with work, including:
383 # - Fixing an event loop issue; seems that all tests stop when one test stops?
387 class TestN2VC(object):
389 1. Validator Validation
391 Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
393 We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
397 The six phases of integration testing, for the test itself and each charm?:
399 setup/teardown_class:
400 1. Prepare - Verify the environment and create a new model
401 2. Deploy - Mark the test as ready to execute
402 3. Configure - Configuration to reach Active state
403 4. Test - Execute primitive(s) to verify success
404 5. Collect - Collect any useful artifacts for debugging (charm, logs)
405 6. Destroy - Destroy the model
408 1. Prepare - Building of charm
409 2. Deploy - Deploying charm
410 3. Configure - Configuration to reach Active state
411 4. Test - Execute primitive(s) to verify success
412 5. Collect - Collect any useful artifacts for debugging (charm, logs)
413 6. Destroy - Destroy the charm
417 def setup_class(self
):
418 """ setup any state specific to the execution of the given class (which
419 usually contains tests).
421 # Initialize instance variable(s)
424 # Track internal state for each test run
427 # Parse the test's descriptors
428 self
.nsd
= get_descriptor(self
.NSD_YAML
)
429 self
.vnfd
= get_descriptor(self
.VNFD_YAML
)
431 self
.ns_name
= self
.nsd
['name']
432 self
.vnf_name
= self
.vnfd
['name']
435 self
.parse_vnf_descriptor()
436 assert self
.charms
is not {}
438 # Track artifacts, like compiled charms, that will need to be removed
441 # Build the charm(s) needed for this test
442 for charm
in self
.get_charm_names():
443 self
.get_charm(charm
)
445 # A bit of a hack, in order to allow the N2VC callback to run parallel
446 # to pytest. Test(s) should wait for this flag to change to False
449 self
._stopping
= False
452 def teardown_class(self
):
453 """ teardown any state that was previously setup with a call to
456 debug("Running teardown_class...")
459 debug("Destroying LXD containers...")
460 for application
in self
.state
:
461 if self
.state
[application
]['container']:
462 destroy_lxd_container(self
.state
[application
]['container'])
463 debug("Destroying LXD containers...done.")
467 debug("teardown_class(): Logging out of N2VC...")
468 yield from self
.n2vc
.logout()
469 debug("teardown_class(): Logging out of N2VC...done.")
471 debug("Running teardown_class...done.")
472 except Exception as ex
:
473 debug("Exception in teardown_class: {}".format(ex
))
476 def all_charms_active(self
):
477 """Determine if the all deployed charms are active."""
480 for application
in self
.state
:
481 if 'status' in self
.state
[application
]:
482 debug("status of {} is '{}'".format(
484 self
.state
[application
]['status'],
486 if self
.state
[application
]['status'] == 'active':
489 debug("Active charms: {}/{}".format(
494 if active
== len(self
.charms
):
500 def are_tests_finished(self
):
501 appcount
= len(self
.state
)
503 # If we don't have state yet, keep running.
505 debug("No applications")
509 debug("_stopping is True")
513 for application
in self
.state
:
514 if self
.state
[application
]['done']:
517 debug("{}/{} charms tested".format(appdone
, appcount
))
519 if appcount
== appdone
:
525 async def running(self
, timeout
=600):
526 """Returns if the test is still running.
528 @param timeout The time, in seconds, to wait for the test to complete.
530 if self
.are_tests_finished():
534 await asyncio
.sleep(30)
539 def get_charm(self
, charm
):
540 """Build and return the path to the test charm.
542 Builds one of the charms in tests/charms/layers and returns the path
543 to the compiled charm. The charm will automatically be removed when
544 when the test is complete.
546 Returns: The path to the built charm or None if `charm build` failed.
549 # Make sure the charm snap is installed
551 subprocess
.check_call(['which', 'charm'])
552 except subprocess
.CalledProcessError
:
553 raise Exception("charm snap not installed.")
555 if charm
not in self
.artifacts
:
557 # Note: This builds the charm under N2VC/tests/charms/builds/
558 # Currently, the snap-installed command only has write access
559 # to the $HOME (changing in an upcoming release) so writing to
560 # /tmp isn't possible at the moment.
561 builds
= get_charm_path()
563 if not os
.path
.exists("{}/builds/{}".format(builds
, charm
)):
564 cmd
= "charm build --no-local-layers {}/{} -o {}/".format(
569 subprocess
.check_call(shlex
.split(cmd
))
571 except subprocess
.CalledProcessError
as e
:
572 # charm build will return error code 100 if the charm fails
573 # the auto-run of charm proof, which we can safely ignore for
575 if e
.returncode
!= 100:
576 raise Exception("charm build failed: {}.".format(e
))
578 self
.artifacts
[charm
] = {
580 'charm': "{}/builds/{}".format(builds
, charm
),
583 return self
.artifacts
[charm
]['charm']
586 async def deploy(self
, vnf_index
, charm
, params
, loop
):
587 """An inner function to do the deployment of a charm from
592 self
.n2vc
= get_n2vc(loop
=loop
)
594 debug("Creating model for Network Service {}".format(self
.ns_name
))
595 await self
.n2vc
.CreateNetworkService(self
.ns_name
)
597 application
= self
.n2vc
.FormatApplicationName(
603 # Initialize the state of the application
604 self
.state
[application
] = {
605 'status': None, # Juju status
606 'container': None, # lxd container, for proxy charms
607 'actions': {}, # Actions we've executed
608 'done': False, # Are we done testing this charm?
609 'phase': "deploy", # What phase is this application in?
612 debug("Deploying charm at {}".format(self
.artifacts
[charm
]))
614 # If this is a native charm, we need to provision the underlying
615 # machine ala an LXC container.
618 if not self
.isproxy(application
):
619 debug("Creating container for native charm")
620 # args = ("default", application, None, None)
621 self
.state
[application
]['container'] = create_lxd_container(
622 name
=os
.path
.basename(__file__
)
625 hostname
= self
.get_container_ip(
626 self
.state
[application
]['container'],
634 await self
.n2vc
.DeployCharms(
638 self
.get_charm(charm
),
645 def parse_vnf_descriptor(self
):
646 """Parse the VNF descriptor to make running tests easier.
648 Parse the charm information in the descriptor to make it easy to write
649 tests to run again it.
651 Each charm becomes a dictionary in a list:
654 'vnf-member-index': 1,
657 'initial-config-primitive': {},
658 'config-primitive': {}
661 - is this a proxy charm?
662 - what are the initial-config-primitives (day 1)?
663 - what are the config primitives (day 2)?
668 # You'd think this would be explicit, but it's just an incremental
669 # value that should be consistent.
672 """Get all vdu and/or vdu config in a descriptor."""
673 config
= self
.get_config()
677 # Get the name to be used for the deployed application
678 application_name
= n2vc
.vnf
.N2VC().FormatApplicationName(
681 str(vnf_member_index
),
685 'application-name': application_name
,
687 'vnf-member-index': vnf_member_index
,
688 'vnf-name': self
.vnf_name
,
690 'initial-config-primitive': {},
691 'config-primitive': {},
695 charm
['name'] = juju
['charm']
698 charm
['proxy'] = juju
['proxy']
700 if 'initial-config-primitive' in cfg
:
701 charm
['initial-config-primitive'] = \
702 cfg
['initial-config-primitive']
704 if 'config-primitive' in cfg
:
705 charm
['config-primitive'] = cfg
['config-primitive']
707 charms
[application_name
] = charm
709 # Increment the vnf-member-index
710 vnf_member_index
+= 1
715 def isproxy(self
, application_name
):
717 assert application_name
in self
.charms
718 assert 'proxy' in self
.charms
[application_name
]
719 assert type(self
.charms
[application_name
]['proxy']) is bool
721 # debug(self.charms[application_name])
722 return self
.charms
[application_name
]['proxy']
725 def get_config(self
):
726 """Return an iterable list of config items (vdu and vnf).
728 As far as N2VC is concerned, the config section for vdu and vnf are
729 identical. This joins them together so tests only need to iterate
734 """Get all vdu and/or vdu config in a descriptor."""
735 vnf_config
= self
.vnfd
.get("vnf-configuration")
737 juju
= vnf_config
['juju']
739 configs
.append(vnf_config
)
741 for vdu
in self
.vnfd
['vdu']:
742 vdu_config
= vdu
.get('vdu-configuration')
744 juju
= vdu_config
['juju']
746 configs
.append(vdu_config
)
751 def get_charm_names(self
):
752 """Return a list of charms used by the test descriptor."""
756 # Check if the VDUs in this VNF have a charm
757 for config
in self
.get_config():
758 juju
= config
['juju']
761 if name
not in charms
:
767 def get_phase(self
, application
):
768 return self
.state
[application
]['phase']
771 def set_phase(self
, application
, phase
):
772 self
.state
[application
]['phase'] = phase
775 async def configure_proxy_charm(self
, *args
):
776 """Configure a container for use via ssh."""
777 (model
, application
, _
, _
) = args
780 if self
.get_phase(application
) == "deploy":
781 self
.set_phase(application
, "configure")
783 debug("Start CreateContainer for {}".format(application
))
784 self
.state
[application
]['container'] = \
785 await self
.CreateContainer(*args
)
786 debug("Done CreateContainer for {}".format(application
))
788 if self
.state
[application
]['container']:
789 debug("Configure {} for container".format(application
))
790 if await self
.configure_ssh_proxy(application
):
791 await asyncio
.sleep(0.1)
794 debug("Failed to configure container for {}".format(application
))
796 debug("skipping CreateContainer for {}: {}".format(
798 self
.get_phase(application
),
801 except Exception as ex
:
802 debug("configure_proxy_charm exception: {}".format(ex
))
804 await asyncio
.sleep(0.1)
809 async def execute_charm_tests(self
, *args
):
810 (model
, application
, _
, _
) = args
812 debug("Executing charm test(s) for {}".format(application
))
814 if self
.state
[application
]['done']:
815 debug("Trying to execute tests against finished charm...aborting")
819 phase
= self
.get_phase(application
)
820 # We enter the test phase when after deploy (for native charms) or
821 # configure, for proxy charms.
822 if phase
in ["deploy", "configure"]:
823 self
.set_phase(application
, "test")
824 if self
.are_tests_finished():
825 raise Exception("Trying to execute init-config on finished test")
827 if await self
.execute_initial_config_primitives(application
):
829 await self
.check_metrics(application
)
831 debug("Done testing {}".format(application
))
832 self
.state
[application
]['done'] = True
834 except Exception as ex
:
835 debug("Exception in execute_charm_tests: {}".format(ex
))
837 await asyncio
.sleep(0.1)
842 async def CreateContainer(self
, *args
):
843 """Create a LXD container for use with a proxy charm.abs
845 1. Get the public key from the charm via `get-ssh-public-key` action
846 2. Create container with said key injected for the ubuntu user
848 Returns a Container object
850 # Create and configure a LXD container for use with a proxy charm.
851 (model
, application
, _
, _
) = args
853 debug("[CreateContainer] {}".format(args
))
857 # Execute 'get-ssh-public-key' primitive and get returned value
858 uuid
= await self
.n2vc
.ExecutePrimitive(
861 "get-ssh-public-key",
865 result
= await self
.n2vc
.GetPrimitiveOutput(model
, uuid
)
866 pubkey
= result
['pubkey']
868 container
= create_lxd_container(
870 name
=os
.path
.basename(__file__
)
874 except Exception as ex
:
875 debug("Error creating container: {}".format(ex
))
881 async def stop(self
):
885 - Stop and delete containers
888 TODO: Clean up duplicate code between teardown_class() and stop()
890 debug("stop() called")
892 if self
.n2vc
and self
._running
and not self
._stopping
:
893 self
._running
= False
894 self
._stopping
= True
896 for application
in self
.charms
:
898 await self
.n2vc
.RemoveCharms(self
.ns_name
, application
)
900 await self
.n2vc
.DestroyNetworkService(self
.ns_name
)
903 # Wait for the application to be removed
904 await asyncio
.sleep(10)
905 if not await self
.n2vc
.HasApplication(
910 await self
.n2vc
.DestroyNetworkService(self
.ns_name
)
912 # Need to wait for the charm to finish, because native charms
913 if self
.state
[application
]['container']:
914 debug("Deleting LXD container...")
915 destroy_lxd_container(
916 self
.state
[application
]['container']
918 self
.state
[application
]['container'] = None
919 debug("Deleting LXD container...done.")
921 debug("No container found for {}".format(application
))
922 except Exception as e
:
923 debug("Error while deleting container: {}".format(e
))
927 debug("stop(): Logging out of N2VC...")
928 await self
.n2vc
.logout()
930 debug("stop(): Logging out of N2VC...Done.")
931 except Exception as ex
:
934 # Let the test know we're finished.
935 debug("Marking test as finished.")
936 # self._running = False
938 debug("Skipping stop()")
941 def get_container_ip(self
, container
):
942 """Return the IPv4 address of container's eth0 interface."""
945 addresses
= container
.state().network
['eth0']['addresses']
946 # The interface may have more than one address, but we only need
947 # the first one for testing purposes.
948 ipaddr
= addresses
[0]['address']
953 async def configure_ssh_proxy(self
, application
, task
=None):
954 """Configure the proxy charm to use the lxd container.
956 Configure the charm to use a LXD container as it's VNF.
958 debug("Configuring ssh proxy for {}".format(application
))
960 mgmtaddr
= self
.get_container_ip(
961 self
.state
[application
]['container'],
965 "Setting ssh-hostname for {} to {}".format(
971 await self
.n2vc
.ExecutePrimitive(
977 'ssh-hostname': mgmtaddr
,
978 'ssh-username': 'ubuntu',
985 async def execute_initial_config_primitives(self
, application
, task
=None):
986 debug("Executing initial_config_primitives for {}".format(application
))
988 init_config
= self
.charms
[application
]
991 The initial-config-primitive is run during deploy but may fail
992 on some steps because proxy charm access isn't configured.
994 Re-run those actions so we can inspect the status.
996 uuids
= await self
.n2vc
.ExecuteInitialPrimitives(
1003 ExecutePrimitives will return a list of uuids. We need to check the
1004 status of each. The test continues if all Actions succeed, and
1005 fails if any of them fail.
1007 await self
.wait_for_uuids(application
, uuids
)
1008 debug("Primitives for {} finished.".format(application
))
1011 except Exception as ex
:
1012 debug("execute_initial_config_primitives exception: {}".format(ex
))
1017 async def check_metrics(self
, application
, task
=None):
1018 """Check and run metrics, if present.
1020 Checks to see if metrics are specified by the charm. If so, collects
1023 If no metrics, then mark the test as finished.
1025 if has_metrics(self
.charms
[application
]['name']):
1026 debug("Collecting metrics for {}".format(application
))
1028 metrics
= await self
.n2vc
.GetMetrics(
1033 return await self
.verify_metrics(application
, metrics
)
1036 async def verify_metrics(self
, application
, metrics
):
1037 """Verify the charm's metrics.
1039 Verify that the charm has sent metrics successfully.
1041 Stops the test when finished.
1043 debug("Verifying metrics for {}: {}".format(application
, metrics
))
1049 # TODO: Ran into a case where it took 9 attempts before metrics
1050 # were available; the controller is slow sometimes.
1051 await asyncio
.sleep(30)
1052 return await self
.check_metrics(application
)
1055 async def wait_for_uuids(self
, application
, uuids
):
1056 """Wait for primitives to execute.
1058 The task will provide a list of uuids representing primitives that are
1061 debug("Waiting for uuids for {}: {}".format(application
, uuids
))
1062 waitfor
= len(uuids
)
1065 while waitfor
> finished
:
1067 await asyncio
.sleep(10)
1069 if uuid
not in self
.state
[application
]['actions']:
1070 self
.state
[application
]['actions'][uid
] = "pending"
1072 status
= self
.state
[application
]['actions'][uid
]
1074 # Have we already marked this as done?
1075 if status
in ["pending", "running"]:
1077 debug("Getting status of {} ({})...".format(uid
, status
))
1078 status
= await self
.n2vc
.GetPrimitiveStatus(
1082 debug("...state of {} is {}".format(uid
, status
))
1083 self
.state
[application
]['actions'][uid
] = status
1085 if status
in ['completed', 'failed']:
1088 debug("{}/{} actions complete".format(finished
, waitfor
))
1090 # Wait for the primitive to finish and try again
1091 if waitfor
> finished
:
1092 debug("Waiting 10s for action to finish...")
1093 await asyncio
.sleep(10)
1096 def n2vc_callback(self
, *args
, **kwargs
):
1097 (model
, application
, status
, message
) = args
1098 # debug("callback: {}".format(args))
1100 if application
not in self
.state
:
1101 # Initialize the state of the application
1102 self
.state
[application
] = {
1103 'status': None, # Juju status
1104 'container': None, # lxd container, for proxy charms
1105 'actions': {}, # Actions we've executed
1106 'done': False, # Are we done testing this charm?
1107 'phase': "deploy", # What phase is this application in?
1110 self
.state
[application
]['status'] = status
1112 if status
in ['waiting', 'maintenance', 'unknown']:
1113 # Nothing to do for these
1116 debug("callback: {}".format(args
))
1118 if self
.state
[application
]['done']:
1119 debug("{} is done".format(application
))
1122 if status
in ["blocked"] and self
.isproxy(application
):
1123 if self
.state
[application
]['phase'] == "deploy":
1124 debug("Configuring proxy charm for {}".format(application
))
1125 asyncio
.ensure_future(self
.configure_proxy_charm(*args
))
1127 elif status
in ["active"]:
1128 """When a charm is active, we can assume that it has been properly
1129 configured (not blocked), regardless of if it's a proxy or not.
1131 All primitives should be complete by init_config_primitive
1133 asyncio
.ensure_future(self
.execute_charm_tests(*args
))