15 from juju
.controller
import Controller
17 # Disable InsecureRequestWarning w/LXD
19 urllib3
.disable_warnings()
20 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
22 here
= os
.path
.dirname(os
.path
.realpath(__file__
))
25 def is_bootstrapped():
26 result
= subprocess
.run(['juju', 'switch'], stdout
=subprocess
.PIPE
)
28 result
.returncode
== 0 and len(result
.stdout
.decode().strip()) > 0)
31 bootstrapped
= pytest
.mark
.skipif(
32 not is_bootstrapped(),
33 reason
='bootstrapped Juju environment required')
36 class CleanController():
38 Context manager that automatically connects and disconnects from
39 the currently active controller.
41 Note: Unlike CleanModel, this will not create a new controller for you,
42 and an active controller must already be available.
45 self
._controller
= None
47 async def __aenter__(self
):
48 self
._controller
= Controller()
49 await self
._controller
.connect()
50 return self
._controller
52 async def __aexit__(self
, exc_type
, exc
, tb
):
53 await self
._controller
.disconnect()
57 """Format debug messages in a consistent way."""
58 now
= datetime
.datetime
.now()
60 # TODO: Decide on the best way to log. Output from `logging.debug` shows up
61 # when a test fails, but print() will always show up when running tox with
62 # `-s`, which is really useful for debugging single tests without having to
63 # insert a False assert to see the log.
65 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
68 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
73 return "{}/charms".format(here
)
77 return "{}/charms/layers".format(here
)
80 def collect_metrics(application
):
81 """Invoke Juju's metrics collector.
83 Caveat: this shells out to the `juju collect-metrics` command, rather than
84 making an API call. At the time of writing, that API is not exposed through
89 subprocess
.check_call(['juju', 'collect-metrics', application
])
90 except subprocess
.CalledProcessError
as e
:
91 raise Exception("Unable to collect metrics: {}".format(e
))
94 def has_metrics(charm
):
95 """Check if a charm has metrics defined."""
96 metricsyaml
= "{}/{}/metrics.yaml".format(
100 if os
.path
.exists(metricsyaml
):
105 def get_descriptor(descriptor
):
108 tmp
= yaml
.load(descriptor
)
110 # Remove the envelope
111 root
= list(tmp
.keys())[0]
112 if root
== "nsd:nsd-catalog":
113 desc
= tmp
['nsd:nsd-catalog']['nsd'][0]
114 elif root
== "vnfd:vnfd-catalog":
115 desc
= tmp
['vnfd:vnfd-catalog']['vnfd'][0]
121 def get_n2vc(loop
=None):
122 """Return an instance of N2VC.VNF."""
123 log
= logging
.getLogger()
124 log
.level
= logging
.DEBUG
126 # Extract parameters from the environment in order to run our test
127 vca_host
= os
.getenv('VCA_HOST', '127.0.0.1')
128 vca_port
= os
.getenv('VCA_PORT', 17070)
129 vca_user
= os
.getenv('VCA_USER', 'admin')
130 vca_charms
= os
.getenv('VCA_CHARMS', None)
131 vca_secret
= os
.getenv('VCA_SECRET', None)
133 client
= n2vc
.vnf
.N2VC(
139 artifacts
=vca_charms
,
145 def create_lxd_container(public_key
=None, name
="test_name"):
147 Returns a container object
149 If public_key isn't set, we'll use the Juju ssh key
151 :param public_key: The public key to inject into the container
152 :param name: The name of the test being run
156 # Format name so it's valid
157 name
= name
.replace("_", "-").replace(".", "")
159 client
= get_lxd_client()
160 test_machine
= "test-{}-{}".format(
161 uuid
.uuid4().hex[-4:],
165 private_key_path
, public_key_path
= find_n2vc_ssh_keys()
168 # create profile w/cloud-init and juju ssh key
171 with
open(public_key_path
, "r") as f
:
172 public_key
= f
.readline()
174 client
.profiles
.create(
177 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key
)},
179 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
181 'nictype': 'bridged',
187 except Exception as ex
:
188 debug("Error creating lxd profile {}: {}".format(test_machine
, ex
))
194 'name': test_machine
,
199 'protocol': 'simplestreams',
200 'server': 'https://cloud-images.ubuntu.com/releases',
202 'profiles': [test_machine
],
204 container
= client
.containers
.create(config
, wait
=True)
205 container
.start(wait
=True)
206 except Exception as ex
:
207 debug("Error creating lxd container {}: {}".format(test_machine
, ex
))
208 # This is a test-ending failure.
211 def wait_for_network(container
, timeout
=30):
212 """Wait for eth0 to have an ipv4 address."""
213 starttime
= time
.time()
214 while(time
.time() < starttime
+ timeout
):
216 if 'eth0' in container
.state().network
:
217 addresses
= container
.state().network
['eth0']['addresses']
218 if len(addresses
) > 0:
219 if addresses
[0]['family'] == 'inet':
224 wait_for_network(container
)
225 except Exception as ex
:
227 "Error waiting for container {} network: {}".format(
233 # HACK: We need to give sshd a chance to bind to the interface,
234 # and pylxd's container.execute seems to be broken and fails and/or
235 # hangs trying to properly check if the service is up.
236 (exit_code
, stdout
, stderr
) = container
.execute([
238 '-c', '5', # Wait for 5 ECHO_REPLY
239 '8.8.8.8', # Ping Google's public DNS
240 '-W', '15', # Set a 15 second deadline
244 raise Exception("Unable to verify container network")
249 def destroy_lxd_container(container
):
250 """Stop and delete a LXD container.
252 Sometimes we see errors talking to LXD -- ephemerial issues like
253 load or a bug that's killed the API. We'll do our best to clean
254 up here, and we should run a cleanup after all tests are finished
255 to remove any extra containers and profiles belonging to us.
258 if type(container
) is bool:
261 name
= container
.name
262 debug("Destroying container {}".format(name
))
264 client
= get_lxd_client()
266 def wait_for_stop(timeout
=30):
267 """Wait for eth0 to have an ipv4 address."""
268 starttime
= time
.time()
269 while(time
.time() < starttime
+ timeout
):
271 if container
.state
== "Stopped":
274 def wait_for_delete(timeout
=30):
275 starttime
= time
.time()
276 while(time
.time() < starttime
+ timeout
):
278 if client
.containers
.exists(name
) is False:
282 container
.stop(wait
=False)
284 except Exception as ex
:
286 "Error stopping container {}: {}".format(
293 container
.delete(wait
=False)
295 except Exception as ex
:
297 "Error deleting container {}: {}".format(
304 # Delete the profile created for this container
305 profile
= client
.profiles
.get(name
)
308 except Exception as ex
:
310 "Error deleting profile {}: {}".format(
317 def find_lxd_config():
318 """Find the LXD configuration directory."""
320 paths
.append(os
.path
.expanduser("~/.config/lxc"))
321 paths
.append(os
.path
.expanduser("~/snap/lxd/current/.config/lxc"))
324 if os
.path
.exists(path
):
325 crt
= os
.path
.expanduser("{}/client.crt".format(path
))
326 key
= os
.path
.expanduser("{}/client.key".format(path
))
327 if os
.path
.exists(crt
) and os
.path
.exists(key
):
332 def find_n2vc_ssh_keys():
333 """Find the N2VC ssh keys."""
336 paths
.append(os
.path
.expanduser("~/.ssh/"))
339 if os
.path
.exists(path
):
340 private
= os
.path
.expanduser("{}/id_n2vc_rsa".format(path
))
341 public
= os
.path
.expanduser("{}/id_n2vc_rsa.pub".format(path
))
342 if os
.path
.exists(private
) and os
.path
.exists(public
):
343 return (private
, public
)
347 def find_juju_ssh_keys():
348 """Find the Juju ssh keys."""
351 paths
.append(os
.path
.expanduser("~/.local/share/juju/ssh/"))
354 if os
.path
.exists(path
):
355 private
= os
.path
.expanduser("{}/juju_id_rsa".format(path
))
356 public
= os
.path
.expanduser("{}/juju_id_rsa.pub".format(path
))
357 if os
.path
.exists(private
) and os
.path
.exists(public
):
358 return (private
, public
)
362 def get_juju_private_key():
363 keys
= find_juju_ssh_keys()
367 def get_lxd_client(host
="127.0.0.1", port
="8443", verify
=False):
368 """ Get the LXD client."""
370 (crt
, key
) = find_lxd_config()
373 client
= pylxd
.Client(
374 endpoint
="https://{}:{}".format(host
, port
),
382 # TODO: This is marked serial but can be run in parallel with work, including:
383 # - Fixing an event loop issue; seems that all tests stop when one test stops?
387 class TestN2VC(object):
389 1. Validator Validation
391 Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
393 We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
397 The six phases of integration testing, for the test itself and each charm?:
399 setup/teardown_class:
400 1. Prepare - Verify the environment and create a new model
401 2. Deploy - Mark the test as ready to execute
402 3. Configure - Configuration to reach Active state
403 4. Test - Execute primitive(s) to verify success
404 5. Collect - Collect any useful artifacts for debugging (charm, logs)
405 6. Destroy - Destroy the model
408 1. Prepare - Building of charm
409 2. Deploy - Deploying charm
410 3. Configure - Configuration to reach Active state
411 4. Test - Execute primitive(s) to verify success
412 5. Collect - Collect any useful artifacts for debugging (charm, logs)
413 6. Destroy - Destroy the charm
417 def setup_class(self
):
418 """ setup any state specific to the execution of the given class (which
419 usually contains tests).
421 # Initialize instance variable(s)
424 # Track internal state for each test run
427 # Parse the test's descriptors
428 self
.nsd
= get_descriptor(self
.NSD_YAML
)
429 self
.vnfd
= get_descriptor(self
.VNFD_YAML
)
431 self
.ns_name
= self
.nsd
['name']
432 self
.vnf_name
= self
.vnfd
['name']
435 self
.parse_vnf_descriptor()
436 assert self
.charms
is not {}
438 # Track artifacts, like compiled charms, that will need to be removed
441 # Build the charm(s) needed for this test
442 for charm
in self
.get_charm_names():
443 self
.get_charm(charm
)
445 # A bit of a hack, in order to allow the N2VC callback to run parallel
446 # to pytest. Test(s) should wait for this flag to change to False
449 self
._stopping
= False
452 def teardown_class(self
):
453 """ teardown any state that was previously setup with a call to
456 debug("Running teardown_class...")
459 debug("Destroying LXD containers...")
460 for application
in self
.state
:
461 if self
.state
[application
]['container']:
462 destroy_lxd_container(self
.state
[application
]['container'])
463 debug("Destroying LXD containers...done.")
467 debug("teardown_class(): Logging out of N2VC...")
468 yield from self
.n2vc
.logout()
469 debug("teardown_class(): Logging out of N2VC...done.")
471 debug("Running teardown_class...done.")
472 except Exception as ex
:
473 debug("Exception in teardown_class: {}".format(ex
))
476 def all_charms_active(self
):
477 """Determine if the all deployed charms are active."""
480 for application
in self
.state
:
481 if 'status' in self
.state
[application
]:
482 debug("status of {} is '{}'".format(
484 self
.state
[application
]['status'],
486 if self
.state
[application
]['status'] == 'active':
489 debug("Active charms: {}/{}".format(
494 if active
== len(self
.charms
):
500 def are_tests_finished(self
):
501 appcount
= len(self
.state
)
503 # If we don't have state yet, keep running.
505 debug("No applications")
509 debug("_stopping is True")
513 for application
in self
.state
:
514 if self
.state
[application
]['done']:
517 debug("{}/{} charms tested".format(appdone
, appcount
))
519 if appcount
== appdone
:
525 async def running(self
, timeout
=600):
526 """Returns if the test is still running.
528 @param timeout The time, in seconds, to wait for the test to complete.
530 if self
.are_tests_finished():
534 await asyncio
.sleep(30)
539 def get_charm(self
, charm
):
540 """Build and return the path to the test charm.
542 Builds one of the charms in tests/charms/layers and returns the path
543 to the compiled charm. The charm will automatically be removed when
544 when the test is complete.
546 Returns: The path to the built charm or None if `charm build` failed.
549 # Make sure the charm snap is installed
551 subprocess
.check_call(['which', 'charm'])
552 except subprocess
.CalledProcessError
:
553 raise Exception("charm snap not installed.")
555 if charm
not in self
.artifacts
:
557 # Note: This builds the charm under N2VC/tests/charms/builds/
558 # Currently, the snap-installed command only has write access
559 # to the $HOME (changing in an upcoming release) so writing to
560 # /tmp isn't possible at the moment.
561 builds
= get_charm_path()
563 if not os
.path
.exists("{}/builds/{}".format(builds
, charm
)):
564 cmd
= "charm build --no-local-layers {}/{} -o {}/".format(
569 subprocess
.check_call(shlex
.split(cmd
))
571 self
.artifacts
[charm
] = {
573 'charm': "{}/builds/{}".format(builds
, charm
),
575 except subprocess
.CalledProcessError
as e
:
576 raise Exception("charm build failed: {}.".format(e
))
578 return self
.artifacts
[charm
]['charm']
581 async def deploy(self
, vnf_index
, charm
, params
, loop
):
582 """An inner function to do the deployment of a charm from
587 self
.n2vc
= get_n2vc(loop
=loop
)
589 debug("Creating model for Network Service {}".format(self
.ns_name
))
590 await self
.n2vc
.CreateNetworkService(self
.ns_name
)
592 application
= self
.n2vc
.FormatApplicationName(
598 # Initialize the state of the application
599 self
.state
[application
] = {
600 'status': None, # Juju status
601 'container': None, # lxd container, for proxy charms
602 'actions': {}, # Actions we've executed
603 'done': False, # Are we done testing this charm?
604 'phase': "deploy", # What phase is this application in?
607 debug("Deploying charm at {}".format(self
.artifacts
[charm
]))
609 # If this is a native charm, we need to provision the underlying
610 # machine ala an LXC container.
613 if not self
.isproxy(application
):
614 debug("Creating container for native charm")
615 # args = ("default", application, None, None)
616 self
.state
[application
]['container'] = create_lxd_container(
617 name
=os
.path
.basename(__file__
)
620 hostname
= self
.get_container_ip(
621 self
.state
[application
]['container'],
629 await self
.n2vc
.DeployCharms(
633 self
.get_charm(charm
),
640 def parse_vnf_descriptor(self
):
641 """Parse the VNF descriptor to make running tests easier.
643 Parse the charm information in the descriptor to make it easy to write
644 tests to run again it.
646 Each charm becomes a dictionary in a list:
649 'vnf-member-index': 1,
652 'initial-config-primitive': {},
653 'config-primitive': {}
656 - is this a proxy charm?
657 - what are the initial-config-primitives (day 1)?
658 - what are the config primitives (day 2)?
663 # You'd think this would be explicit, but it's just an incremental
664 # value that should be consistent.
667 """Get all vdu and/or vdu config in a descriptor."""
668 config
= self
.get_config()
672 # Get the name to be used for the deployed application
673 application_name
= n2vc
.vnf
.N2VC().FormatApplicationName(
676 str(vnf_member_index
),
680 'application-name': application_name
,
682 'vnf-member-index': vnf_member_index
,
683 'vnf-name': self
.vnf_name
,
685 'initial-config-primitive': {},
686 'config-primitive': {},
690 charm
['name'] = juju
['charm']
693 charm
['proxy'] = juju
['proxy']
695 if 'initial-config-primitive' in cfg
:
696 charm
['initial-config-primitive'] = \
697 cfg
['initial-config-primitive']
699 if 'config-primitive' in cfg
:
700 charm
['config-primitive'] = cfg
['config-primitive']
702 charms
[application_name
] = charm
704 # Increment the vnf-member-index
705 vnf_member_index
+= 1
710 def isproxy(self
, application_name
):
712 assert application_name
in self
.charms
713 assert 'proxy' in self
.charms
[application_name
]
714 assert type(self
.charms
[application_name
]['proxy']) is bool
716 # debug(self.charms[application_name])
717 return self
.charms
[application_name
]['proxy']
720 def get_config(self
):
721 """Return an iterable list of config items (vdu and vnf).
723 As far as N2VC is concerned, the config section for vdu and vnf are
724 identical. This joins them together so tests only need to iterate
729 """Get all vdu and/or vdu config in a descriptor."""
730 vnf_config
= self
.vnfd
.get("vnf-configuration")
732 juju
= vnf_config
['juju']
734 configs
.append(vnf_config
)
736 for vdu
in self
.vnfd
['vdu']:
737 vdu_config
= vdu
.get('vdu-configuration')
739 juju
= vdu_config
['juju']
741 configs
.append(vdu_config
)
746 def get_charm_names(self
):
747 """Return a list of charms used by the test descriptor."""
751 # Check if the VDUs in this VNF have a charm
752 for config
in self
.get_config():
753 juju
= config
['juju']
756 if name
not in charms
:
762 def get_phase(self
, application
):
763 return self
.state
[application
]['phase']
766 def set_phase(self
, application
, phase
):
767 self
.state
[application
]['phase'] = phase
770 async def configure_proxy_charm(self
, *args
):
771 """Configure a container for use via ssh."""
772 (model
, application
, _
, _
) = args
775 if self
.get_phase(application
) == "deploy":
776 self
.set_phase(application
, "configure")
778 debug("Start CreateContainer for {}".format(application
))
779 self
.state
[application
]['container'] = \
780 await self
.CreateContainer(*args
)
781 debug("Done CreateContainer for {}".format(application
))
783 if self
.state
[application
]['container']:
784 debug("Configure {} for container".format(application
))
785 if await self
.configure_ssh_proxy(application
):
786 await asyncio
.sleep(0.1)
789 debug("Failed to configure container for {}".format(application
))
791 debug("skipping CreateContainer for {}: {}".format(
793 self
.get_phase(application
),
796 except Exception as ex
:
797 debug("configure_proxy_charm exception: {}".format(ex
))
799 await asyncio
.sleep(0.1)
804 async def execute_charm_tests(self
, *args
):
805 (model
, application
, _
, _
) = args
807 debug("Executing charm test(s) for {}".format(application
))
809 if self
.state
[application
]['done']:
810 debug("Trying to execute tests against finished charm...aborting")
814 phase
= self
.get_phase(application
)
815 # We enter the test phase when after deploy (for native charms) or
816 # configure, for proxy charms.
817 if phase
in ["deploy", "configure"]:
818 self
.set_phase(application
, "test")
819 if self
.are_tests_finished():
820 raise Exception("Trying to execute init-config on finished test")
822 if await self
.execute_initial_config_primitives(application
):
824 await self
.check_metrics(application
)
826 debug("Done testing {}".format(application
))
827 self
.state
[application
]['done'] = True
829 except Exception as ex
:
830 debug("Exception in execute_charm_tests: {}".format(ex
))
832 await asyncio
.sleep(0.1)
837 async def CreateContainer(self
, *args
):
838 """Create a LXD container for use with a proxy charm.abs
840 1. Get the public key from the charm via `get-ssh-public-key` action
841 2. Create container with said key injected for the ubuntu user
843 Returns a Container object
845 # Create and configure a LXD container for use with a proxy charm.
846 (model
, application
, _
, _
) = args
848 debug("[CreateContainer] {}".format(args
))
852 # Execute 'get-ssh-public-key' primitive and get returned value
853 uuid
= await self
.n2vc
.ExecutePrimitive(
856 "get-ssh-public-key",
860 result
= await self
.n2vc
.GetPrimitiveOutput(model
, uuid
)
861 pubkey
= result
['pubkey']
863 container
= create_lxd_container(
865 name
=os
.path
.basename(__file__
)
869 except Exception as ex
:
870 debug("Error creating container: {}".format(ex
))
876 async def stop(self
):
880 - Stop and delete containers
883 TODO: Clean up duplicate code between teardown_class() and stop()
885 debug("stop() called")
887 if self
.n2vc
and self
._running
and not self
._stopping
:
888 self
._running
= False
889 self
._stopping
= True
891 for application
in self
.charms
:
893 await self
.n2vc
.RemoveCharms(self
.ns_name
, application
)
895 await self
.n2vc
.DestroyNetworkService(self
.ns_name
)
898 # Wait for the application to be removed
899 await asyncio
.sleep(10)
900 if not await self
.n2vc
.HasApplication(
906 # Need to wait for the charm to finish, because native charms
907 if self
.state
[application
]['container']:
908 debug("Deleting LXD container...")
909 destroy_lxd_container(
910 self
.state
[application
]['container']
912 self
.state
[application
]['container'] = None
913 debug("Deleting LXD container...done.")
915 debug("No container found for {}".format(application
))
916 except Exception as e
:
917 debug("Error while deleting container: {}".format(e
))
921 debug("stop(): Logging out of N2VC...")
922 await self
.n2vc
.logout()
924 debug("stop(): Logging out of N2VC...Done.")
925 except Exception as ex
:
928 # Let the test know we're finished.
929 debug("Marking test as finished.")
930 # self._running = False
932 debug("Skipping stop()")
935 def get_container_ip(self
, container
):
936 """Return the IPv4 address of container's eth0 interface."""
939 addresses
= container
.state().network
['eth0']['addresses']
940 # The interface may have more than one address, but we only need
941 # the first one for testing purposes.
942 ipaddr
= addresses
[0]['address']
947 async def configure_ssh_proxy(self
, application
, task
=None):
948 """Configure the proxy charm to use the lxd container.
950 Configure the charm to use a LXD container as it's VNF.
952 debug("Configuring ssh proxy for {}".format(application
))
954 mgmtaddr
= self
.get_container_ip(
955 self
.state
[application
]['container'],
959 "Setting ssh-hostname for {} to {}".format(
965 await self
.n2vc
.ExecutePrimitive(
971 'ssh-hostname': mgmtaddr
,
972 'ssh-username': 'ubuntu',
979 async def execute_initial_config_primitives(self
, application
, task
=None):
980 debug("Executing initial_config_primitives for {}".format(application
))
982 init_config
= self
.charms
[application
]
985 The initial-config-primitive is run during deploy but may fail
986 on some steps because proxy charm access isn't configured.
988 Re-run those actions so we can inspect the status.
990 uuids
= await self
.n2vc
.ExecuteInitialPrimitives(
997 ExecutePrimitives will return a list of uuids. We need to check the
998 status of each. The test continues if all Actions succeed, and
999 fails if any of them fail.
1001 await self
.wait_for_uuids(application
, uuids
)
1002 debug("Primitives for {} finished.".format(application
))
1005 except Exception as ex
:
1006 debug("execute_initial_config_primitives exception: {}".format(ex
))
1011 async def check_metrics(self
, application
, task
=None):
1012 """Check and run metrics, if present.
1014 Checks to see if metrics are specified by the charm. If so, collects
1017 If no metrics, then mark the test as finished.
1019 if has_metrics(self
.charms
[application
]['name']):
1020 debug("Collecting metrics for {}".format(application
))
1022 metrics
= await self
.n2vc
.GetMetrics(
1027 return await self
.verify_metrics(application
, metrics
)
1030 async def verify_metrics(self
, application
, metrics
):
1031 """Verify the charm's metrics.
1033 Verify that the charm has sent metrics successfully.
1035 Stops the test when finished.
1037 debug("Verifying metrics for {}: {}".format(application
, metrics
))
1043 # TODO: Ran into a case where it took 9 attempts before metrics
1044 # were available; the controller is slow sometimes.
1045 await asyncio
.sleep(30)
1046 return await self
.check_metrics(application
)
1049 async def wait_for_uuids(self
, application
, uuids
):
1050 """Wait for primitives to execute.
1052 The task will provide a list of uuids representing primitives that are
1055 debug("Waiting for uuids for {}: {}".format(application
, uuids
))
1056 waitfor
= len(uuids
)
1059 while waitfor
> finished
:
1061 await asyncio
.sleep(10)
1063 if uuid
not in self
.state
[application
]['actions']:
1064 self
.state
[application
]['actions'][uid
] = "pending"
1066 status
= self
.state
[application
]['actions'][uid
]
1068 # Have we already marked this as done?
1069 if status
in ["pending", "running"]:
1071 debug("Getting status of {} ({})...".format(uid
, status
))
1072 status
= await self
.n2vc
.GetPrimitiveStatus(
1076 debug("...state of {} is {}".format(uid
, status
))
1077 self
.state
[application
]['actions'][uid
] = status
1079 if status
in ['completed', 'failed']:
1082 debug("{}/{} actions complete".format(finished
, waitfor
))
1084 # Wait for the primitive to finish and try again
1085 if waitfor
> finished
:
1086 debug("Waiting 10s for action to finish...")
1087 await asyncio
.sleep(10)
1090 def n2vc_callback(self
, *args
, **kwargs
):
1091 (model
, application
, status
, message
) = args
1092 # debug("callback: {}".format(args))
1094 if application
not in self
.state
:
1095 # Initialize the state of the application
1096 self
.state
[application
] = {
1097 'status': None, # Juju status
1098 'container': None, # lxd container, for proxy charms
1099 'actions': {}, # Actions we've executed
1100 'done': False, # Are we done testing this charm?
1101 'phase': "deploy", # What phase is this application in?
1104 self
.state
[application
]['status'] = status
1106 if status
in ['waiting', 'maintenance', 'unknown']:
1107 # Nothing to do for these
1110 debug("callback: {}".format(args
))
1112 if self
.state
[application
]['done']:
1113 debug("{} is done".format(application
))
1116 if status
in ["blocked"] and self
.isproxy(application
):
1117 if self
.state
[application
]['phase'] == "deploy":
1118 debug("Configuring proxy charm for {}".format(application
))
1119 asyncio
.ensure_future(self
.configure_proxy_charm(*args
))
1121 elif status
in ["active"]:
1122 """When a charm is active, we can assume that it has been properly
1123 configured (not blocked), regardless of if it's a proxy or not.
1125 All primitives should be complete by init_config_primitive
1127 asyncio
.ensure_future(self
.execute_charm_tests(*args
))