From 59178d705894364069e2a2cb1ab6af68af1c1249 Mon Sep 17 00:00:00 2001 From: Jeremy Mordkoff Date: Thu, 31 Mar 2016 19:37:12 -0400 Subject: [PATCH] Initial submission of RIFT.ware 4.1.1.0 to ETSI/OSM Signed-off-by: Jeremy Mordkoff --- .gitignore | 28 + .gitmodules | 45 + .gitmodules.deps | 24 + CMakeLists.txt | 496 +++ LICENSE | 13 + Makefile | 591 +++ Makefile.env | 50 + Makefile.top | 175 + RELEASE | 1 + RIFTWARE_COMPILATION_LICENSE | 244 ++ bin/build_ladder.sh | 228 ++ bin/catchsegv.sh | 151 + bin/dependency_parser.py | 219 ++ bin/dependency_sort.py | 105 + bin/dev.sh | 165 + bin/extract_rpm.sh | 121 + bin/generate_descriptor_pkg.sh | 38 + bin/generate_protopy.sh | 38 + bin/generate_supermodule_hash.sh | 43 + bin/keepers | 18 + bin/make_etsi_packages | 7 + bin/pip-install | 9 + bin/pip3-install | 24 + bin/pip3-kilo-install | 19 + bin/rift-lint.py | 429 +++ bin/submodule_has_failed_tests.sh | 71 + bin/uninitialize_cached_submodules.sh | 131 + modules/core/mano/.cpack-workaround | 0 modules/core/mano/CMakeLists.txt | 72 + modules/core/mano/Makefile | 24 + modules/core/mano/README | 9 + modules/core/mano/common/CMakeLists.txt | 30 + .../core/mano/common/plugins/CMakeLists.txt | 19 + .../plugins/rwcntmgrtasklet/CMakeLists.txt | 26 + .../common/plugins/rwcntmgrtasklet/Makefile | 24 + .../rift/tasklets/rwcntmgrtasklet/__init__.py | 16 + .../rwcntmgrtasklet/rwcntmgrtasklet.py | 331 ++ .../rwcntmgrtasklet/rwcntmgrtasklet.py | 30 + .../mano/common/plugins/yang/CMakeLists.txt | 27 + .../common/plugins/yang/rw-cloud.tailf.yang | 29 + .../mano/common/plugins/yang/rw-cloud.yang | 81 + .../plugins/yang/rw-config-agent.taif.yang | 17 + .../common/plugins/yang/rw-config-agent.yang | 83 + .../common/plugins/yang/rw-sdn.tailf.yang | 17 + .../core/mano/common/plugins/yang/rw-sdn.yang | 47 + .../core/mano/common/python/CMakeLists.txt | 18 + .../common/python/rift/mano/cloud/__init__.py | 28 + .../common/python/rift/mano/cloud/accounts.py | 174 + .../common/python/rift/mano/cloud/config.py | 256 ++ .../common/python/rift/mano/cloud/operdata.py | 114 + .../python/rift/mano/config_agent/__init__.py | 24 + .../python/rift/mano/config_agent/config.py | 219 ++ .../python/rift/mano/config_agent/operdata.py | 461 +++ modules/core/mano/common/rw_gen_package.py | 98 + modules/core/mano/confd_client/CMakeLists.txt | 16 + modules/core/mano/confd_client/Makefile | 24 + modules/core/mano/confd_client/README | 8 + modules/core/mano/confd_client/confd_client.c | 436 +++ .../core/mano/confd_client/confd_client.py | 165 + .../core/mano/confd_client/confd_client.sh | 54 + modules/core/mano/confd_client/test.sh | 38 + modules/core/mano/examples/CMakeLists.txt | 23 + modules/core/mano/examples/Makefile | 24 + .../mano/examples/ping_pong_ns/CMakeLists.txt | 49 + .../core/mano/examples/ping_pong_ns/Makefile | 24 + .../mano/examples/ping_pong_ns/config_desc.py | 147 + .../ping_pong_ns/generate_packages.sh.in | 78 + .../ping_pong_ns/ping_pong_ns/__init__.py | 17 + .../ping_pong_ns/ping_pong_ns/ping.py | 314 ++ .../ping_pong_ns/ping_pong_ns/ping.service | 12 + .../ping_pong_ns/ping_pong_ns/pong.py | 334 ++ .../ping_pong_ns/ping_pong_ns/pong.service | 12 + .../ping_pong_ns/prepare_ping_pong_qcow.sh | 139 + .../ping_pong_ns/ping_pong_ns/start_ping | 5 + .../ping_pong_ns/ping_pong_ns/start_ping.py | 92 + .../ping_pong_ns/ping_pong_ns/start_pong | 5 + .../ping_pong_ns/ping_pong_ns/start_pong.py | 94 + .../ping_pong_ns/ping_pong_ns/test/test.sh | 151 + .../ping_pong_ns/ping_pong_ns/user-data | 8 + .../ping_pong_ns/util/__init__.py | 17 + .../ping_pong_ns/ping_pong_ns/util/util.py | 40 + .../examples/ping_pong_ns/ping_pong_nsd.py | 1 + .../ping_pong_ns/rift/mano/__init__.py | 15 + .../rift/mano/examples/__init__.py | 15 + .../rift/mano/examples/ping_pong_nsd.py | 588 +++ modules/core/mano/foss.txt | 1 + modules/core/mano/manifest/LICENSE | 0 modules/core/mano/models/CMakeLists.txt | 19 + .../core/mano/models/openmano/CMakeLists.txt | 15 + .../mano/models/openmano/bin/CMakeLists.txt | 15 + .../models/openmano/bin/add_corporation.py | 528 +++ .../core/mano/models/openmano/bin/openmano | 1069 ++++++ .../models/openmano/bin/openmano_cleanup.sh | 29 + .../models/openmano/python/CMakeLists.txt | 13 + .../openmano/python/rift/openmano/__init__.py | 15 + .../python/rift/openmano/openmano_client.py | 479 +++ .../python/rift/openmano/rift2openmano.py | 515 +++ .../mano/models/openmano/src/CMakeLists.txt | 71 + .../src/generate_tidgen_packages.sh.in | 40 + .../mano/models/openmano/src/openmano2rift.py | 486 +++ .../test/osm_descriptors/mwc16-gen_test.py | 157 + .../openmano_descriptors/6WindTR1.1.2.yaml | 99 + .../Scenarios PE- Gen.jpg | Bin 0 -> 156047 bytes .../openmano_descriptors/TID-MGMTGW.yaml | 61 + .../openmano_descriptors/mwc16-gen.yaml | 54 + .../openmano_descriptors/mwc16-pe.yaml | 189 + .../steps-openmano-openvim.txt | 38 + .../openmano_descriptors/steps-openvim.txt | 42 + .../openmano_descriptors/tidgen4pLarge.yaml | 89 + .../rift_descriptors/mwc16-gen.xml | 84 + .../rift_descriptors/tidgen4pLarge.xml | 139 + .../test/osm_mwc_desctriptors/checksums.txt | 18 + .../test/osm_mwc_desctriptors/gen_pkgs.sh | 67 + .../osm_mwc_desctriptors/mwc16-gen_test.py | 258 ++ .../openmano_scenarios/IMS-allin1-corpA.yaml | 33 + .../openmano_scenarios/IMS-allin1-corpB.yaml | 33 + .../openmano_scenarios/mwc16-gen.yaml | 61 + .../openmano_scenarios/mwc16-pe.yaml | 79 + .../openmano_vnfs/6WindTR1.1.2.yaml | 81 + .../openmano_vnfs/IMS-ALLin1.yaml | 39 + .../openmano_vnfs/mwc16-gen1.yaml | 89 + .../openmano_vnfs/mwc16-gen2.yaml | 89 + .../rift_scenarios/IMS-corpA.xml | 45 + .../rift_scenarios/IMS-corpB.xml | 45 + .../rift_scenarios/mwc16-gen.xml | 104 + .../rift_scenarios/mwc16-pe-onevnf.xml | 60 + .../rift_scenarios/mwc16-pe.xml | 156 + .../rift_vnfs/6WindTR1.1.2.xml | 151 + .../rift_vnfs/IMS-ALLIN1.xml | 68 + .../rift_vnfs/mwc16gen1.xml | 141 + .../rift_vnfs/mwc16gen2.xml | 141 + .../20160212_openmano_RO_descriptors.zip | Bin 0 -> 6904 bytes .../osm_mwc_generic_descriptors/gen_pkgs.sh | 67 + .../mwc16-gen_test.py | 314 ++ .../new_gwcorpa/gw_corpA_PE1.yaml | 55 + .../new_gwcorpa/gw_corpA_PE2.yaml | 55 + .../new_gwcorpa/gwcorpA.yaml | 72 + .../openmano_scenarios/IMS-allin1-corpA.yaml | 41 + .../openmano_scenarios/IMS-allin1-corpB.yaml | 33 + .../ORIG_IMS-allin1-corpA.yaml | 33 + .../openmano_scenarios/gwcorpA.yaml | 71 + .../openmano_scenarios/mwc16-gen.yaml | 56 + .../openmano_scenarios/mwc16-pe.yaml | 79 + .../openmano_vnfs/6WindTR1.1.2.yaml | 81 + .../openmano_vnfs/IMS-ALLin1.yaml | 46 + .../openmano_vnfs/gw_corpA_PE1.yaml | 55 + .../openmano_vnfs/gw_corpA_PE2.yaml | 55 + .../openmano_vnfs/mwc16-gen1.yaml | 89 + .../openmano_vnfs/mwc16-gen2.yaml | 89 + .../rift_scenarios/IMS-corpA.xml | 149 + .../rift_scenarios/IMS-corpB.xml | 45 + .../rift_scenarios/add_corpA_input.yaml | 53 + .../rift_scenarios/gwcorpA.xml | 123 + .../rift_scenarios/mwc16-gen.xml | 91 + .../rift_scenarios/mwc16-pe-onevnf.xml | 60 + .../rift_scenarios/mwc16-pe.xml | 566 +++ .../rift_scenarios/pe_config.py | 383 ++ .../rift_vnfs/6WindTR1.1.2.xml | 151 + .../rift_vnfs/IMS-ALLIN1.xml | 81 + .../rift_vnfs/gw-corpa-pe1.xml | 94 + .../rift_vnfs/gw-corpa-pe2.xml | 94 + .../rift_vnfs/mwc16gen1.xml | 141 + .../rift_vnfs/mwc16gen2.xml | 141 + .../scenarios/IMS-allin1-corpA.yaml.generic | 24 + .../scenarios/gwcorpA.yaml | 59 + .../scenarios/gwcorpA.yaml.generic | 92 + .../scenarios/mwc16-pe.yaml | 79 + .../scenarios/tidgen.yaml | 56 + .../vnfs/6WindTR1.1.2.yaml | 81 + .../vnfs/IMS-ALLin1_2p.yaml | 46 + .../vnfs/gw_corpA_PE1.yaml | 48 + .../vnfs/gw_corpA_PE2.yaml | 48 + .../vnfs/tidgen_mwc16.yaml | 89 + .../openmano/test/tidgen_ns_2sriov.yaml | 49 + .../test/tidgen_ns_2sriov_no_ctrlnet.yaml | 44 + .../openmano/test/tidgen_ns_4sriov.yaml | 57 + .../test/tidgen_ns_4sriov_no_ctrlnet.yaml | 48 + .../openmano/test/tidgen_vnf_2sriov.yaml | 73 + .../test/tidgen_vnf_2sriov_no_ctrlnet.yaml | 65 + .../openmano/test/tidgen_vnf_4sriov.yaml | 91 + .../test/tidgen_vnf_4sriov_no_ctrlnet.yaml | 83 + .../core/mano/models/plugins/CMakeLists.txt | 13 + .../mano/models/plugins/yang/CMakeLists.txt | 47 + .../core/mano/models/plugins/yang/Makefile | 24 + .../plugins/yang/ietf-l2-topology.tailf.yang | 40 + .../models/plugins/yang/ietf-l2-topology.yang | 578 +++ .../yang/ietf-network-topology.tailf.yang | 34 + .../plugins/yang/ietf-network-topology.yang | 257 ++ .../plugins/yang/ietf-network.tailf.yang | 31 + .../models/plugins/yang/ietf-network.yang | 157 + .../mano/models/plugins/yang/nsd.tailf.yang | 25 + .../core/mano/models/plugins/yang/nsd.yang | 871 +++++ .../core/mano/models/plugins/yang/nsr.cli.xml | 9 + .../mano/models/plugins/yang/nsr.tailf.yang | 35 + .../core/mano/models/plugins/yang/nsr.yang | 859 +++++ .../plugins/yang/odl-network-topology.yang | 347 ++ .../core/mano/models/plugins/yang/pnfd.yang | 92 + .../models/plugins/yang/rw-nsd.tailf.yang | 25 + .../core/mano/models/plugins/yang/rw-nsd.yang | 45 + .../models/plugins/yang/rw-nsr.tailf.yang | 38 + .../core/mano/models/plugins/yang/rw-nsr.yang | 287 ++ .../plugins/yang/rw-topology.tailf.yang | 34 + .../mano/models/plugins/yang/rw-topology.yang | 114 + .../models/plugins/yang/rw-vld.tailf.yang | 25 + .../core/mano/models/plugins/yang/rw-vld.yang | 27 + .../models/plugins/yang/rw-vlr.tailf.yang | 25 + .../core/mano/models/plugins/yang/rw-vlr.yang | 55 + .../models/plugins/yang/rw-vnfd.tailf.yang | 25 + .../mano/models/plugins/yang/rw-vnfd.yang | 105 + .../models/plugins/yang/rw-vnfr.tailf.yang | 37 + .../mano/models/plugins/yang/rw-vnfr.yang | 261 ++ .../mano/models/plugins/yang/vld.tailf.yang | 25 + .../core/mano/models/plugins/yang/vld.yang | 129 + .../core/mano/models/plugins/yang/vlr.cli.xml | 9 + .../mano/models/plugins/yang/vlr.tailf.yang | 28 + .../core/mano/models/plugins/yang/vlr.yang | 159 + .../mano/models/plugins/yang/vnfd.tailf.yang | 25 + .../core/mano/models/plugins/yang/vnfd.yang | 461 +++ .../core/mano/models/plugins/yang/vnffgd.yang | 71 + .../mano/models/plugins/yang/vnfr.cli.xml | 9 + .../mano/models/plugins/yang/vnfr.tailf.yang | 29 + .../core/mano/models/plugins/yang/vnfr.yang | 459 +++ modules/core/mano/rwcm/CMakeLists.txt | 23 + modules/core/mano/rwcm/plugins/CMakeLists.txt | 18 + .../core/mano/rwcm/plugins/cli/cli_rwcm.xml | 73 + .../mano/rwcm/plugins/rwconman/CMakeLists.txt | 41 + .../rift/tasklets/rwconmantasklet/__init__.py | 16 + .../rift/tasklets/rwconmantasklet/juju_if.py | 659 ++++ .../rwconmantasklet/rwconman_config.py | 953 +++++ .../rwconmantasklet/rwconman_events.py | 481 +++ .../rwconman_test_config_template.cfg | 27 + .../rwconmantasklet/rwconmantasklet.py | 180 + .../tasklets/rwconmantasklet/xlate_cfg.py | 202 + .../tasklets/rwconmantasklet/xlate_tags.yml | 53 + .../rwcm/plugins/rwconman/rwconmantasklet.py | 29 + .../mano/rwcm/plugins/yang/CMakeLists.txt | 30 + .../rwcm/plugins/yang/rw-conman.tailf.yang | 22 + .../mano/rwcm/plugins/yang/rw-conman.yang | 236 ++ modules/core/mano/rwcm/test/CMakeLists.txt | 27 + modules/core/mano/rwcm/test/README.start_cm | 4 + .../configuration_input_params.yml | 35 + .../cwaio_vnfd_1_juju_template.cfg | 23 + .../configuration_input_params.yml | 38 + .../ping_vnfd_1_scriptconf_template.cfg | 54 + .../pong_vnfd_11_scriptconf_template.cfg | 42 + modules/core/mano/rwcm/test/rwso_test.py | 353 ++ .../core/mano/rwcm/test/start_cm_system.py | 134 + .../configuration_input_params.yml | 38 + .../trafgen_vnfd_1_netconf_template.cfg | 79 + .../trafsink_vnfd_3_netconf_template.cfg | 42 + modules/core/mano/rwlaunchpad/CMakeLists.txt | 25 + .../mano/rwlaunchpad/plugins/CMakeLists.txt | 26 + .../rwlaunchpad/plugins/rwiwp/CMakeLists.txt | 26 + .../mano/rwlaunchpad/plugins/rwiwp/Makefile | 24 + .../rift/tasklets/rwiwptasklet/__init__.py | 16 + .../tasklets/rwiwptasklet/rwiwptasklet.py | 621 +++ .../rwlaunchpad/plugins/rwiwp/rwiwptasklet.py | 30 + .../plugins/rwlaunchpadtasklet/CMakeLists.txt | 32 + .../plugins/rwlaunchpadtasklet/Makefile | 24 + .../rift/tasklets/rwlaunchpad/__init__.py | 16 + .../rift/tasklets/rwlaunchpad/archive.py | 268 ++ .../rift/tasklets/rwlaunchpad/checksums.py | 65 + .../rift/tasklets/rwlaunchpad/convert.py | 97 + .../rift/tasklets/rwlaunchpad/datacenters.py | 134 + .../rift/tasklets/rwlaunchpad/message.py | 346 ++ .../rift/tasklets/rwlaunchpad/tasklet.py | 511 +++ .../rift/tasklets/rwlaunchpad/uploader.py | 1379 +++++++ .../plugins/rwlaunchpadtasklet/rwlaunchpad.py | 30 + .../plugins/rwmonitor/CMakeLists.txt | 27 + .../rwlaunchpad/plugins/rwmonitor/Makefile | 24 + .../rift/tasklets/rwmonitor/__init__.py | 16 + .../rwmonitor/rift/tasklets/rwmonitor/core.py | 542 +++ .../rift/tasklets/rwmonitor/tasklet.py | 443 +++ .../plugins/rwmonitor/rwmonitor.py | 30 + .../rwlaunchpad/plugins/rwnsm/CMakeLists.txt | 38 + .../mano/rwlaunchpad/plugins/rwnsm/Makefile | 24 + .../rift/tasklets/rwnsmtasklet/__init__.py | 16 + .../rwnsm/rift/tasklets/rwnsmtasklet/cloud.py | 211 ++ .../rwnsmtasklet/config_value_pool.py | 152 + .../rift/tasklets/rwnsmtasklet/juju_intf.py | 634 ++++ .../tasklets/rwnsmtasklet/jujuconf_nsm.py | 726 ++++ .../tasklets/rwnsmtasklet/openmano_nsm.py | 573 +++ .../rift/tasklets/rwnsmtasklet/publisher.py | 228 ++ .../tasklets/rwnsmtasklet/rwnsm_conagent.py | 244 ++ .../tasklets/rwnsmtasklet/rwnsm_conman.py | 314 ++ .../rwnsmtasklet/rwnsmconfigplugin.py | 183 + .../rift/tasklets/rwnsmtasklet/rwnsmplugin.py | 114 + .../tasklets/rwnsmtasklet/rwnsmtasklet.py | 3185 ++++++++++++++++ .../rift/tasklets/rwnsmtasklet/rwvnffgmgr.py | 361 ++ .../tasklets/rwnsmtasklet/so_endpoint_cfg.xml | 23 + .../rwnsm/rift/tasklets/rwnsmtasklet/xpath.py | 363 ++ .../rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py | 30 + .../plugins/rwresmgr/CMakeLists.txt | 29 + .../rwlaunchpad/plugins/rwresmgr/Makefile | 24 + .../rift/tasklets/rwresmgrtasklet/__init__.py | 16 + .../rwresmgrtasklet/rwresmgr_config.py | 123 + .../tasklets/rwresmgrtasklet/rwresmgr_core.py | 1185 ++++++ .../rwresmgrtasklet/rwresmgr_events.py | 270 ++ .../rwresmgrtasklet/rwresmgrtasklet.py | 234 ++ .../plugins/rwresmgr/rwresmgrtasklet.py | 29 + .../plugins/rwresmgr/test/rmmgr_test.py | 784 ++++ .../rwlaunchpad/plugins/rwvnfm/CMakeLists.txt | 27 + .../mano/rwlaunchpad/plugins/rwvnfm/Makefile | 24 + .../rift/tasklets/rwvnfmtasklet/__init__.py | 17 + .../rift/tasklets/rwvnfmtasklet/mon_params.py | 678 ++++ .../tasklets/rwvnfmtasklet/rwvnfmtasklet.py | 2396 ++++++++++++ .../plugins/rwvnfm/rwvnfmtasklet.py | 30 + .../plugins/rwvnfm/test/mon_params_test.py | 514 +++ .../rwlaunchpad/plugins/rwvns/CMakeLists.txt | 39 + .../mano/rwlaunchpad/plugins/rwvns/Makefile | 24 + .../rift/tasklets/rwvnstasklet/__init__.py | 16 + .../tasklets/rwvnstasklet/rwvnstasklet.py | 356 ++ .../plugins/rwvns/rift/topmgr/__init__.py | 38 + .../plugins/rwvns/rift/topmgr/core.py | 50 + .../plugins/rwvns/rift/topmgr/mock.py | 51 + .../rwvns/rift/topmgr/rwtopdatastore.py | 187 + .../plugins/rwvns/rift/topmgr/rwtopmgr.py | 253 ++ .../plugins/rwvns/rift/topmgr/sdnsim.py | 62 + .../plugins/rwvns/rift/vlmgr/__init__.py | 27 + .../plugins/rwvns/rift/vlmgr/rwvlmgr.py | 468 +++ .../rwlaunchpad/plugins/rwvns/rwvnstasklet.py | 30 + .../test/create_stackedProvNettopology.py | 333 ++ .../rwvns/test/create_stackedSfctopology.py | 278 ++ .../rwvns/test/create_stackedVMNettopology.py | 333 ++ .../rwvns/test/create_stackedl2topology.py | 262 ++ .../plugins/rwvns/test/test_sdn_mock.py | 99 + .../plugins/rwvns/test/test_sdn_sim.py | 97 + .../plugins/rwvns/test/test_top_datastore.py | 732 ++++ .../plugins/rwvns/test/topmgr_module_test.py | 193 + .../plugins/rwvns/vala/CMakeLists.txt | 59 + .../rwvns/vala/rwsdn-python/CMakeLists.txt | 8 + .../rwvns/vala/rwsdn-python/rwsdn-plugin.py | 96 + .../rwlaunchpad/plugins/rwvns/vala/rwsdn.vala | 79 + .../rwvns/vala/rwsdn_mock/CMakeLists.txt | 8 + .../rwvns/vala/rwsdn_mock/rwsdn_mock.py | 174 + .../rwvns/vala/rwsdn_odl/CMakeLists.txt | 8 + .../plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py | 943 +++++ .../rwvns/vala/rwsdn_sim/CMakeLists.txt | 8 + .../plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py | 95 + .../plugins/rwvns/yang/CMakeLists.txt | 25 + .../rwlaunchpad/plugins/rwvns/yang/Makefile | 24 + .../plugins/rwvns/yang/rwsdn.tailf.yang | 17 + .../rwlaunchpad/plugins/rwvns/yang/rwsdn.yang | 303 ++ .../rwlaunchpad/plugins/vala/CMakeLists.txt | 13 + .../mano/rwlaunchpad/plugins/vala/Makefile | 24 + .../plugins/vala/rwos_ma_nfvo/CMakeLists.txt | 52 + .../vala/rwos_ma_nfvo/rwos_ma_nfvo.vala | 16 + .../rwos_ma_nfvo_rest/CMakeLists.txt | 8 + .../rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile | 24 + .../rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py | 53 + .../plugins/vala/rwve_vnfm_em/CMakeLists.txt | 52 + .../vala/rwve_vnfm_em/rwve_vnfm_em.vala | 16 + .../rwve_vnfm_em_rest/CMakeLists.txt | 8 + .../rwve_vnfm_em/rwve_vnfm_em_rest/Makefile | 24 + .../rwve_vnfm_em_rest/rwve_vnfm_em_rest.py | 56 + .../plugins/vala/rwve_vnfm_vnf/CMakeLists.txt | 52 + .../vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala | 16 + .../rwve_vnfm_vnf_rest/CMakeLists.txt | 8 + .../rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile | 24 + .../rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py | 56 + .../rwlaunchpad/plugins/yang/CMakeLists.txt | 36 + .../mano/rwlaunchpad/plugins/yang/Makefile | 24 + .../plugins/yang/rw-iwp.tailf.yang | 30 + .../mano/rwlaunchpad/plugins/yang/rw-iwp.yang | 184 + .../plugins/yang/rw-launchpad-log.yang | 47 + .../plugins/yang/rw-launchpad.tailf.yang | 25 + .../plugins/yang/rw-launchpad.yang | 131 + .../plugins/yang/rw-monitor.tailf.yang | 21 + .../rwlaunchpad/plugins/yang/rw-monitor.yang | 62 + .../mano/rwlaunchpad/plugins/yang/rw-nsm.yang | 121 + .../plugins/yang/rw-resource-mgr.tailf.yang | 30 + .../plugins/yang/rw-resource-mgr.yang | 293 ++ .../rwlaunchpad/plugins/yang/rw-vnfm.yang | 66 + .../plugins/yang/rw-vns.tailf.yang | 51 + .../mano/rwlaunchpad/plugins/yang/rw-vns.yang | 88 + .../core/mano/rwlaunchpad/ra/CMakeLists.txt | 51 + .../ra/launchpad_longevity_systest | 44 + .../rwlaunchpad/ra/pingpong_longevity_systest | 31 + .../ra/pingpong_lp_standalone_systest | 32 + .../rwlaunchpad/ra/pingpong_records_systest | 29 + .../ra/pingpong_vnf_reload_systest | 33 + .../mano/rwlaunchpad/ra/pingpong_vnf_systest | 28 + .../mano/rwlaunchpad/ra/pytest/conftest.py | 151 + .../ra/pytest/test_launchpad_longevity.py | 33 + .../ra/pytest/test_pingpong_longevity.py | 62 + .../ra/pytest/test_pingpong_vnf.py | 450 +++ .../ra/pytest/test_pingpong_vnf_static.py | 327 ++ .../rwlaunchpad/ra/pytest/test_records.py | 357 ++ .../rwlaunchpad/ra/pytest/test_startstop.py | 89 + ...pong_lp_standalone_systest_openstack.racfg | 18 + .../pingpong_records_systest_openstack.racfg | 18 + ...ingpong_vnf_reload_systest_openstack.racfg | 18 + .../racfg/pingpong_vnf_systest_cloudsim.racfg | 19 + .../pingpong_vnf_systest_openstack.racfg | 18 + .../core/mano/rwlaunchpad/test/CMakeLists.txt | 59 + modules/core/mano/rwlaunchpad/test/juju_ut.py | 234 ++ .../core/mano/rwlaunchpad/test/launchpad.py | 406 ++ .../rwlaunchpad/test/launchpad_module_test | 43 + .../mano/rwlaunchpad/test/mano_error_ut.py | 904 +++++ modules/core/mano/rwlaunchpad/test/mano_ut.py | 814 ++++ .../mano/rwlaunchpad/test/openmano_nsm_ut.py | 189 + .../rwlaunchpad/test/pytest/lp_kt_utm_test.py | 308 ++ .../test/pytest/lp_kt_utm_wims_test.py | 335 ++ .../mano/rwlaunchpad/test/pytest/lp_test.py | 392 ++ .../test/pytest/lp_tg_2vrouter_ts_epa_test.py | 325 ++ .../test/pytest/lp_tg_2vrouter_ts_test.py | 325 ++ .../pytest/lp_tg_vrouter_ts_epa_sriov_test.py | 323 ++ .../test/racfg/launchpad_module_test.racfg | 19 + .../mano/rwlaunchpad/test/utest_rwmonitor.py | 454 +++ .../core/mano/rwlaunchpad/test/utest_rwnsm.py | 217 ++ .../mano/rwlaunchpad/test/utest_uploader.py | 157 + modules/core/mano/rwmc/CMakeLists.txt | 32 + modules/core/mano/rwmc/Makefile | 24 + .../core/mano/rwmc/bin/cloudsim_http_proxy.sh | 53 + .../mano/rwmc/include/riftware/rwmc_log.h | 40 + .../mano/rwmc/include/riftware/rwmctasklet.h | 84 + modules/core/mano/rwmc/plugins/CMakeLists.txt | 19 + modules/core/mano/rwmc/plugins/Makefile | 24 + .../core/mano/rwmc/plugins/cli/cli_rwmc.xml | 97 + .../plugins/cli/cli_rwmc_schema_listing.txt | 54 + .../rwmc/plugins/rwmctasklet/CMakeLists.txt | 29 + .../mano/rwmc/plugins/rwmctasklet/Makefile | 24 + .../rift/tasklets/rwmctasklet/__init__.py | 17 + .../rift/tasklets/rwmctasklet/launchpad.py | 495 +++ .../rift/tasklets/rwmctasklet/rwmctasklet.py | 2451 ++++++++++++ .../rift/tasklets/rwmctasklet/salt.py | 284 ++ .../rift/tasklets/rwmctasklet/util.py | 38 + .../rwmc/plugins/rwmctasklet/rwmctasklet.py | 30 + .../mano/rwmc/plugins/yang/CMakeLists.txt | 34 + modules/core/mano/rwmc/plugins/yang/Makefile | 24 + .../core/mano/rwmc/plugins/yang/rw-mc.cli.xml | 11 + .../mano/rwmc/plugins/yang/rw-mc.tailf.yang | 78 + .../core/mano/rwmc/plugins/yang/rw-mc.yang | 519 +++ modules/core/mano/rwmc/ra/CMakeLists.txt | 45 + .../rwmc/ra/mission_control_delete_systest | 43 + ...ion_control_negative_cloud_account_systest | 42 + ...ssion_control_negative_mgmt_domain_systest | 42 + .../rwmc/ra/mission_control_negative_systest | 44 + .../mission_control_negative_vmpool_systest | 42 + .../rwmc/ra/mission_control_reload_systest | 45 + .../core/mano/rwmc/ra/mission_control_systest | 43 + modules/core/mano/rwmc/ra/pytest/conftest.py | 202 + .../rwmc/ra/pytest/test_mission_control.py | 332 ++ .../ra/pytest/test_mission_control_delete.py | 239 ++ .../pytest/test_mission_control_negative.py | 27 + ..._mission_control_negative_cloud_account.py | 379 ++ ...st_mission_control_negative_mgmt_domain.py | 497 +++ .../test_mission_control_negative_vmpool.py | 528 +++ .../ra/pytest/test_mission_control_static.py | 396 ++ ...sion_control_delete_systest_cloudsim.racfg | 19 + ...ion_control_reload_systest_openstack.racfg | 18 + .../mission_control_systest_cloudsim.racfg | 19 + ...on_control_systest_cloudsim_negative.racfg | 19 + .../mission_control_systest_openstack.racfg | 18 + ...n_control_systest_openstack_negative.racfg | 18 + modules/core/mano/rwmc/test/CMakeLists.txt | 15 + modules/core/mano/rwmc/test/README | 10 + .../core/mano/rwmc/test/mission_control.py | 299 ++ .../core/mano/rwmc/test/perf/dts-perf-nc.py | 211 ++ .../mano/rwmc/test/perf/dts-perf-system.py | 126 + .../core/mano/rwmc/test/perf/dts-perf-test.py | 188 + .../mano/rwmc/test/perf/dts-perf-webserver.py | 108 + modules/core/mano/rwmc/test/perf/test.sh | 38 + .../plugins/cli/cli_so_schema_listing.txt | 31 + modules/ui/composer/CMakeLists.txt | 62 + modules/ui/composer/foss.txt | 0 modules/ui/composer/manifest/LICENSE | 0 modules/ui/composer/scripts/.install.sh.swp | Bin 0 -> 12288 bytes modules/ui/composer/scripts/install.sh | 31 + modules/ui/composer/webapp/.editorconfig | 13 + modules/ui/composer/webapp/.eslintignore | 1 + modules/ui/composer/webapp/.eslintrc | 34 + modules/ui/composer/webapp/.gitattributes | 1 + modules/ui/composer/webapp/.yo-rc.json | 8 + modules/ui/composer/webapp/Gruntfile.js | 172 + modules/ui/composer/webapp/README.md | 46 + .../ui/composer/webapp/codeStyleSettings.xml | 95 + modules/ui/composer/webapp/karma.conf.js | 62 + .../ui/composer/webapp/license-flat-icon.pdf | Bin 0 -> 38247 bytes modules/ui/composer/webapp/license-info.txt | 2 + modules/ui/composer/webapp/package.json | 84 + modules/ui/composer/webapp/scripts/build.sh | 31 + .../webapp/scripts/launch_composer.sh | 80 + .../webapp/scripts/server_composer_ui.py | 88 + .../webapp/scripts/update-node-modules.sh | 93 + modules/ui/composer/webapp/src/README.md | 69 + .../webapp/src/actions/CanvasEditorActions.js | 35 + .../src/actions/CanvasPanelTrayActions.js | 34 + .../src/actions/CatalogDataSourceActions.js | 35 + .../src/actions/CatalogFilterActions.js | 35 + .../webapp/src/actions/CatalogItemsActions.js | 39 + .../actions/CatalogPackageManagerActions.js | 32 + .../src/actions/CatalogPanelTrayActions.js | 35 + .../webapp/src/actions/ComposerAppActions.js | 35 + .../webapp/src/actions/ModalOverlayActions.js | 35 + .../webapp/src/actions/PanelResizeAction.js | 73 + .../webapp/src/actions/RiftHeaderActions.js | 35 + modules/ui/composer/webapp/src/alt.js | 30 + .../src/assets/Roboto-Black-webfont.woff | Bin 0 -> 24484 bytes .../assets/Roboto-BlackItalic-webfont.woff | Bin 0 -> 27892 bytes .../src/assets/Roboto-Bold-webfont.woff | Bin 0 -> 24808 bytes .../src/assets/Roboto-BoldItalic-webfont.woff | Bin 0 -> 28824 bytes .../src/assets/Roboto-Italic-webfont.woff | Bin 0 -> 29080 bytes .../src/assets/Roboto-Light-webfont.woff | Bin 0 -> 24576 bytes .../assets/Roboto-LightItalic-webfont.woff | Bin 0 -> 29480 bytes .../src/assets/Roboto-Medium-webfont.woff | Bin 0 -> 25048 bytes .../assets/Roboto-MediumItalic-webfont.woff | Bin 0 -> 28860 bytes .../src/assets/Roboto-Regular-webfont.woff | Bin 0 -> 25020 bytes .../src/assets/Roboto-Thin-webfont.woff | Bin 0 -> 24944 bytes .../src/assets/Roboto-ThinItalic-webfont.woff | Bin 0 -> 30468 bytes .../assets/RobotoCondensed-Bold-webfont.woff | Bin 0 -> 25144 bytes .../RobotoCondensed-BoldItalic-webfont.woff | Bin 0 -> 29136 bytes .../RobotoCondensed-Italic-webfont.woff | Bin 0 -> 29104 bytes .../assets/RobotoCondensed-Light-webfont.woff | Bin 0 -> 25204 bytes .../RobotoCondensed-LightItalic-webfont.woff | Bin 0 -> 29796 bytes .../RobotoCondensed-Regular-webfont.woff | Bin 0 -> 25268 bytes .../src/assets/big-honking-catalog.json | 1901 ++++++++++ .../webapp/src/assets/empty-nsd-catalog.json | 290 ++ .../favicons/android-chrome-144x144.png | Bin 0 -> 1601 bytes .../favicons/android-chrome-192x192.png | Bin 0 -> 2073 bytes .../assets/favicons/android-chrome-36x36.png | Bin 0 -> 519 bytes .../assets/favicons/android-chrome-48x48.png | Bin 0 -> 676 bytes .../assets/favicons/android-chrome-72x72.png | Bin 0 -> 937 bytes .../assets/favicons/android-chrome-96x96.png | Bin 0 -> 1135 bytes .../favicons/apple-touch-icon-114x114.png | Bin 0 -> 1407 bytes .../favicons/apple-touch-icon-120x120.png | Bin 0 -> 1448 bytes .../favicons/apple-touch-icon-144x144.png | Bin 0 -> 1630 bytes .../favicons/apple-touch-icon-152x152.png | Bin 0 -> 1721 bytes .../favicons/apple-touch-icon-180x180.png | Bin 0 -> 1940 bytes .../favicons/apple-touch-icon-57x57.png | Bin 0 -> 804 bytes .../favicons/apple-touch-icon-60x60.png | Bin 0 -> 830 bytes .../favicons/apple-touch-icon-72x72.png | Bin 0 -> 951 bytes .../favicons/apple-touch-icon-76x76.png | Bin 0 -> 1001 bytes .../favicons/apple-touch-icon-precomposed.png | Bin 0 -> 2351 bytes .../src/assets/favicons/apple-touch-icon.png | Bin 0 -> 1940 bytes .../src/assets/favicons/browserconfig.xml | 27 + .../src/assets/favicons/favicon-16x16.png | Bin 0 -> 344 bytes .../src/assets/favicons/favicon-194x194.png | Bin 0 -> 2349 bytes .../src/assets/favicons/favicon-32x32.png | Bin 0 -> 567 bytes .../src/assets/favicons/favicon-96x96.png | Bin 0 -> 1351 bytes .../webapp/src/assets/favicons/favicon.ico | Bin 0 -> 15086 bytes .../webapp/src/assets/favicons/manifest.json | 41 + .../src/assets/favicons/mstile-144x144.png | Bin 0 -> 1882 bytes .../src/assets/favicons/mstile-150x150.png | Bin 0 -> 1657 bytes .../src/assets/favicons/mstile-310x150.png | Bin 0 -> 1682 bytes .../src/assets/favicons/mstile-310x310.png | Bin 0 -> 3266 bytes .../src/assets/favicons/mstile-70x70.png | Bin 0 -> 1357 bytes .../src/assets/favicons/safari-pinned-tab.svg | 69 + .../webapp/src/assets/juju-catalog.json | 936 +++++ .../src/assets/onvelocity-color-theme.json | 33 + .../webapp/src/assets/ping-pong-catalog.json | 767 ++++ .../src/assets/ping-vrouter-pong-catalog.json | 504 +++ .../src/assets/rift.ware-color-theme.json | 37 + .../composer/webapp/src/components/Button.js | 72 + .../webapp/src/components/CanvasPanel.js | 198 + .../webapp/src/components/CanvasPanelTray.js | 62 + .../webapp/src/components/CanvasZoom.js | 77 + .../webapp/src/components/CatalogFilter.js | 67 + .../src/components/CatalogItemCanvasEditor.js | 86 + .../components/CatalogItemDetailsEditor.js | 66 + .../webapp/src/components/CatalogItems.js | 132 + .../src/components/CatalogPackageManager.js | 125 + .../webapp/src/components/CatalogPanel.js | 204 + .../src/components/CatalogPanelToolbar.js | 108 + .../webapp/src/components/CatalogPanelTray.js | 75 + .../webapp/src/components/ComposerApp.js | 240 ++ .../src/components/ComposerAppToolbar.js | 189 + .../src/components/ContentEditableDiv.js | 36 + .../webapp/src/components/DetailsPanel.js | 79 + .../webapp/src/components/DropTarget.js | 85 + .../webapp/src/components/DropZonePanel.js | 62 + .../EditDescriptorModelProperties.js | 586 +++ .../ConnectionPointSelector.js | 75 + .../EditForwardingGraphPaths.js | 222 ++ .../EditorForwardingGraph/EditableProperty.js | 32 + .../EditorForwardingGraph/mapClassifier.js | 219 ++ .../mapConnectionPoint.js | 38 + .../mapRecordServicePath.js | 155 + .../onClickSelectAndShowInDetailsPanel.js | 30 + .../onCutDelegateToRemove.js | 30 + ...ormInputChangedModifyContainerAndNotify.js | 34 + .../onHoverHighlightConnectionPoint.js | 36 + .../webapp/src/components/JSONViewer.js | 148 + .../webapp/src/components/LayoutRow.js | 51 + .../webapp/src/components/LoadingIndicator.js | 54 + .../webapp/src/components/ModalOverlay.js | 84 + .../webapp/src/components/PopupWindow.js | 50 + .../composer/webapp/src/components/Range.js | 80 + .../webapp/src/components/RiftHeader.js | 106 + .../webapp/src/components/messages.js | 57 + modules/ui/composer/webapp/src/favicon.ico | Bin 0 -> 4286 bytes .../src/images/default-catalog-icon.svg | 1 + .../webapp/src/images/default-icon-white.svg | 1 + .../webapp/src/images/default-icon.svg | 1 + .../webapp/src/images/header-logo.png | Bin 0 -> 1658 bytes .../webapp/src/images/logos/riftio.png | Bin 0 -> 1964 bytes .../webapp/src/images/osm_header_253x50.png | Bin 0 -> 11795 bytes .../webapp/src/images/osm_header_506x100.png | Bin 0 -> 25054 bytes .../src/images/riftio_website_logo_002_03.png | Bin 0 -> 9692 bytes .../webapp/src/images/sample-catalog.png | Bin 0 -> 121406 bytes .../webapp/src/images/vendor-riftio.png | Bin 0 -> 16547 bytes modules/ui/composer/webapp/src/index.html | 58 + .../CatalogPackageManagerUploadDropZone.js | 125 + .../webapp/src/libraries/ColorGroups.js | 69 + .../webapp/src/libraries/DeletionManager.js | 147 + .../webapp/src/libraries/InstanceCounter.js | 62 + .../webapp/src/libraries/ResizableManager.js | 431 +++ .../webapp/src/libraries/SelectionManager.js | 396 ++ .../src/libraries/ToggleElementHandler.js | 48 + .../webapp/src/libraries/TooltipManager.js | 82 + .../composer/webapp/src/libraries/UniqueId.js | 68 + .../webapp/src/libraries/getEventPath.js | 39 + .../src/libraries/graph/DescriptorGraph.js | 170 + .../libraries/graph/DescriptorGraphGrid.js | 70 + .../graph/DescriptorGraphPathBuilder.js | 290 ++ .../graph/DescriptorGraphSelection.js | 86 + .../graph/GraphConnectionPointNumber.js | 55 + .../libraries/graph/GraphConstituentVnfd.js | 36 + .../libraries/graph/GraphDescriptorModel.js | 259 ++ .../libraries/graph/GraphForwardingGraph.js | 31 + .../graph/GraphInternalVirtualLink.js | 39 + .../graph/GraphInternalVirtualLinkPaths.js | 117 + .../libraries/graph/GraphNetworkService.js | 31 + .../libraries/graph/GraphRecordServicePath.js | 188 + .../graph/GraphVirtualDeploymentUnit.js | 30 + .../src/libraries/graph/GraphVirtualLink.js | 39 + .../libraries/graph/GraphVirtualLinkPaths.js | 66 + .../graph/GraphVirtualNetworkFunction.js | 30 + .../graph/HighlightRecordServicePaths.js | 54 + .../webapp/src/libraries/graph/PathBuilder.js | 193 + .../webapp/src/libraries/graph/Position.js | 180 + .../layouts/RelationsAndNetworksLayout.js | 583 +++ .../webapp/src/libraries/graph/math.js | 150 + .../ui/composer/webapp/src/libraries/guid.js | 30 + .../webapp/src/libraries/isFullScreen.js | 30 + .../src/libraries/model/DescriptorModel.js | 312 ++ .../libraries/model/DescriptorModelFactory.js | 421 +++ .../libraries/model/DescriptorModelFields.js | 36 + .../libraries/model/DescriptorModelMeta.json | 3358 +++++++++++++++++ .../model/DescriptorModelMetaFactory.js | 89 + .../model/DescriptorModelMetaProperty.js | 168 + .../model/DescriptorModelSerializer.js | 183 + .../model/DescriptorTemplateFactory.js | 62 + .../libraries/model/DescriptorTemplates.js | 73 + .../webapp/src/libraries/model/IconFactory.js | 76 + .../libraries/model/descriptors/Classifier.js | 98 + .../ClassifierConnectionPointRef.js | 85 + .../descriptors/ClassifierMatchAttributes.js | 70 + .../model/descriptors/ConnectionPoint.js | 122 + .../model/descriptors/ConstituentVnfd.js | 119 + .../ConstituentVnfdConnectionPoint.js | 36 + .../model/descriptors/ForwardingGraph.js | 120 + .../descriptors/InternalConnectionPoint.js | 74 + .../descriptors/InternalConnectionPointRef.js | 64 + .../model/descriptors/InternalVirtualLink.js | 78 + .../model/descriptors/NetworkService.js | 197 + .../NetworkServiceConnectionPoint.js | 122 + .../descriptors/PhysicalNetworkFunction.js | 52 + .../model/descriptors/RecordServicePath.js | 101 + .../descriptors/RspConnectionPointRef.js | 50 + .../descriptors/VirtualDeploymentUnit.js | 117 + ...alDeploymentUnitInternalConnectionPoint.js | 63 + .../model/descriptors/VirtualLink.js | 105 + .../descriptors/VirtualNetworkFunction.js | 123 + .../VirtualNetworkFunctionConnectionPoint.js | 59 + .../VirtualNetworkFunctionReadOnlyWrapper.js | 54 + .../descriptors/VnfdConnectionPointRef.js | 101 + .../ui/composer/webapp/src/libraries/utils.js | 122 + .../webapp/src/libraries/zoomFactor.js | 34 + .../webapp/src/sources/CatalogDataSource.js | 160 + .../sources/CatalogPackageManagerSource.js | 113 + .../webapp/src/sources/RiftHeaderSource.js | 75 + .../webapp/src/stores/CatalogDataStore.js | 554 +++ .../src/stores/CatalogPackageManagerStore.js | 239 ++ .../webapp/src/stores/CatalogPanelStore.js | 51 + .../webapp/src/stores/ComposerAppStore.js | 405 ++ .../webapp/src/stores/ModalOverlayStore.js | 56 + .../webapp/src/stores/RiftHeaderStore.js | 73 + .../webapp/src/styles/Animations.scss | 63 + .../composer/webapp/src/styles/AppRoot.scss | 133 + .../ui/composer/webapp/src/styles/Button.scss | 73 + .../webapp/src/styles/CanvasPanel.scss | 52 + .../webapp/src/styles/CanvasPanelTray.scss | 66 + .../webapp/src/styles/CanvasZoom.scss | 43 + .../webapp/src/styles/CatalogFilter.scss | 34 + .../src/styles/CatalogItemCanvasEditor.scss | 44 + .../webapp/src/styles/CatalogItems.scss | 168 + .../src/styles/CatalogPackageManager.scss | 167 + .../webapp/src/styles/CatalogPanel.scss | 49 + .../src/styles/CatalogPanelToolbar.scss | 53 + .../webapp/src/styles/CatalogPanelTray.scss | 68 + .../webapp/src/styles/ComposerAppToolbar.scss | 39 + .../src/styles/ConnectionPointSelector.scss | 30 + .../webapp/src/styles/DataOpenCloseIcon.scss | 42 + .../webapp/src/styles/DescriptorGraph.scss | 173 + .../webapp/src/styles/DetailsPanel.scss | 50 + .../webapp/src/styles/DropZonePanel.scss | 59 + .../styles/EditDescriptorModelProperties.scss | 312 ++ .../src/styles/EditForwardingGraphPaths.scss | 445 +++ .../webapp/src/styles/EditableProperty.scss | 61 + .../webapp/src/styles/FileUploadsList.scss | 137 + .../webapp/src/styles/FileUploadsToolbar.scss | 46 + .../webapp/src/styles/FullScreen.scss | 38 + .../src/styles/GraphDescriptorModel.scss | 21 + .../src/styles/GraphRecordServicePaths.scss | 31 + .../webapp/src/styles/GraphVirtualLink.scss | 48 + .../webapp/src/styles/JSONViewer.scss | 105 + .../composer/webapp/src/styles/LayoutRow.scss | 64 + .../webapp/src/styles/LoadingIndicator.scss | 56 + .../webapp/src/styles/ModalOverlay.scss | 90 + .../webapp/src/styles/ResizableManager.scss | 53 + .../webapp/src/styles/RiftHeader.scss | 57 + .../webapp/src/styles/ToggleElement.scss | 38 + .../webapp/src/styles/TooltipManager.scss | 84 + .../webapp/src/styles/_ColorGroups.scss | 47 + .../ui/composer/webapp/src/styles/_main.scss | 458 +++ .../webapp/src/styles/_variables.scss | 53 + .../test/helpers/pack/phantomjs-shims.js | 55 + .../test-clean-input-output-model.json | 583 +++ .../webapp/test/spec/components/ButtonSpec.js | 35 + .../libraries/DescriptorModelFactorySpec.js | 85 + .../spec/libraries/DescriptorModelSpec.js | 57 + .../spec/libraries/SelectionManagerSpec.js | 34 + .../webapp/test/uploadServer/package.json | 9 + .../webapp/test/uploadServer/server.js | 228 ++ modules/ui/composer/webapp/webpack.config.js | 102 + .../ui/composer/webapp/webpack.dist.config.js | 88 + modules/ui/rw.ui/CMakeLists.txt | 77 + modules/ui/rw.ui/api/about/about.js | 117 + .../rw.ui/api/cloud_account/cloudAccount.js | 334 ++ modules/ui/rw.ui/api/common/constants.js | 55 + modules/ui/rw.ui/api/debug/debug.js | 63 + .../ui/rw.ui/api/launchpad/epa_aggregator.js | 158 + modules/ui/rw.ui/api/launchpad/launchpad.js | 1398 +++++++ modules/ui/rw.ui/api/launchpad/transforms.js | 123 + modules/ui/rw.ui/api/logging/logging.js | 70 + .../api/missioncontrol/missionControl.js | 1226 ++++++ modules/ui/rw.ui/api/package.json | 34 + modules/ui/rw.ui/api/routes.js | 696 ++++ modules/ui/rw.ui/api/routes/launchpad.js | 62 + .../ui/rw.ui/api/routes/mission-control.js | 264 ++ .../ui/rw.ui/api/sdn_account/sdnAccount.js | 237 ++ modules/ui/rw.ui/api/server.js | 125 + modules/ui/rw.ui/api/sockets.js | 300 ++ modules/ui/rw.ui/api/utils/utils.js | 164 + modules/ui/rw.ui/foss.txt | 0 modules/ui/rw.ui/manifest/LICENSE | 0 modules/ui/rw.ui/scripts/install_api.sh | 29 + modules/ui/rw.ui/scripts/install_ui.sh | 31 + modules/ui/rw.ui/webapp/README.md | 27 + .../favicons/android-chrome-144x144.png | Bin 0 -> 1601 bytes .../favicons/android-chrome-192x192.png | Bin 0 -> 2073 bytes .../assets/favicons/android-chrome-36x36.png | Bin 0 -> 519 bytes .../assets/favicons/android-chrome-48x48.png | Bin 0 -> 676 bytes .../assets/favicons/android-chrome-72x72.png | Bin 0 -> 937 bytes .../assets/favicons/android-chrome-96x96.png | Bin 0 -> 1135 bytes .../favicons/apple-touch-icon-114x114.png | Bin 0 -> 1407 bytes .../favicons/apple-touch-icon-120x120.png | Bin 0 -> 1448 bytes .../favicons/apple-touch-icon-144x144.png | Bin 0 -> 1630 bytes .../favicons/apple-touch-icon-152x152.png | Bin 0 -> 1721 bytes .../favicons/apple-touch-icon-180x180.png | Bin 0 -> 1940 bytes .../favicons/apple-touch-icon-57x57.png | Bin 0 -> 804 bytes .../favicons/apple-touch-icon-60x60.png | Bin 0 -> 830 bytes .../favicons/apple-touch-icon-72x72.png | Bin 0 -> 951 bytes .../favicons/apple-touch-icon-76x76.png | Bin 0 -> 1001 bytes .../favicons/apple-touch-icon-precomposed.png | Bin 0 -> 2351 bytes .../app/assets/favicons/apple-touch-icon.png | Bin 0 -> 1940 bytes .../app/assets/favicons/browserconfig.xml | 27 + .../app/assets/favicons/favicon-16x16.png | Bin 0 -> 344 bytes .../app/assets/favicons/favicon-194x194.png | Bin 0 -> 2349 bytes .../app/assets/favicons/favicon-32x32.png | Bin 0 -> 567 bytes .../app/assets/favicons/favicon-96x96.png | Bin 0 -> 1351 bytes .../webapp/app/assets/favicons/favicon.ico | Bin 0 -> 15086 bytes .../webapp/app/assets/favicons/manifest.json | 41 + .../app/assets/favicons/mstile-144x144.png | Bin 0 -> 1882 bytes .../app/assets/favicons/mstile-150x150.png | Bin 0 -> 1657 bytes .../app/assets/favicons/mstile-310x150.png | Bin 0 -> 1682 bytes .../app/assets/favicons/mstile-310x310.png | Bin 0 -> 3266 bytes .../app/assets/favicons/mstile-70x70.png | Bin 0 -> 1357 bytes .../app/assets/favicons/safari-pinned-tab.svg | 69 + .../ui/rw.ui/webapp/app/assets/img/avatar.png | Bin 0 -> 1555 bytes .../rw.ui/webapp/app/assets/img/bargraph.png | Bin 0 -> 11369 bytes .../app/assets/img/bearer-plane-diagram.png | Bin 0 -> 4921 bytes .../app/assets/img/bg-tile-cross-small.png | Bin 0 -> 104 bytes .../webapp/app/assets/img/catalog-default.svg | 1 + .../webapp/app/assets/img/create-account.png | Bin 0 -> 957900 bytes .../assets/img/create-fleet-params-temp.png | Bin 0 -> 9809 bytes .../app/assets/img/create-fleet-pool-temp.png | Bin 0 -> 4137 bytes .../assets/img/create-fleet-services-temp.png | Bin 0 -> 34844 bytes .../assets/img/diameter-openflow-lte-icon.png | Bin 0 -> 1597 bytes .../webapp/app/assets/img/firewall-icon.png | Bin 0 -> 1939 bytes .../rw.ui/webapp/app/assets/img/gbps-10.png | Bin 0 -> 10910 bytes .../rw.ui/webapp/app/assets/img/gbps-50.png | Bin 0 -> 11244 bytes .../webapp/app/assets/img/green-page-icon.png | Bin 0 -> 286 bytes .../webapp/app/assets/img/header-logo.png | Bin 0 -> 1658 bytes .../rw.ui/webapp/app/assets/img/host-icon.png | Bin 0 -> 1518 bytes .../webapp/app/assets/img/icon-host-sm.png | Bin 0 -> 832 bytes .../rw.ui/webapp/app/assets/img/icon-host.png | Bin 0 -> 1518 bytes .../app/assets/img/icon-open-viewport.png | Bin 0 -> 2842 bytes .../webapp/app/assets/img/icon-switch.png | Bin 0 -> 2206 bytes .../app/assets/img/iot-industry-icon.png | Bin 0 -> 1739 bytes .../app/assets/img/iot-medical-icon.png | Bin 0 -> 2222 bytes .../img/iot-transportation-icon-active.png | Bin 0 -> 5410 bytes .../webapp/app/assets/img/ip-lte-icon.png | Bin 0 -> 1619 bytes .../webapp/app/assets/img/ip-softgre-icon.png | Bin 0 -> 2503 bytes .../rw.ui/webapp/app/assets/img/latency.png | Bin 0 -> 31825 bytes .../webapp/app/assets/img/latency_graph.png | Bin 0 -> 26551 bytes .../assets/img/launchpad-add-fleet-icon.png | Bin 0 -> 146 bytes .../app/assets/img/launchpad-graph-temp.png | Bin 0 -> 8716 bytes .../img/launchpad-graphs-temp/10.09.png | Bin 0 -> 12577 bytes .../img/launchpad-graphs-temp/17.23.png | Bin 0 -> 12337 bytes .../img/launchpad-graphs-temp/20.05.png | Bin 0 -> 12528 bytes .../img/launchpad-graphs-temp/23.08.png | Bin 0 -> 12934 bytes .../img/launchpad-graphs-temp/25.03.png | Bin 0 -> 13145 bytes .../img/launchpad-graphs-temp/30.56.png | Bin 0 -> 12960 bytes .../assets/img/launchpad-graphs-temp/7.08.png | Bin 0 -> 12780 bytes .../assets/img/launchpad-graphs-temp/9.24.png | Bin 0 -> 12587 bytes .../ui/rw.ui/webapp/app/assets/img/link.png | Bin 0 -> 12683 bytes .../app/assets/img/loadbalance-icon.png | Bin 0 -> 2163 bytes .../webapp/app/assets/img/lte-mme-icon.png | Bin 0 -> 1610 bytes .../webapp/app/assets/img/osm_header.png | Bin 0 -> 8767 bytes .../app/assets/img/osm_header_253x50.png | Bin 0 -> 11795 bytes .../app/assets/img/osm_header_506x100.png | Bin 0 -> 25054 bytes .../webapp/app/assets/img/page_loader.gif | Bin 0 -> 6551 bytes .../app/assets/img/platform-nav-temp.png | Bin 0 -> 239 bytes .../1a-fleet-platform-resources.png | Bin 0 -> 173437 bytes .../1a-fleet-platform-traffic.png | Bin 0 -> 185114 bytes .../1b-fleet-platform-resources.png | Bin 0 -> 173437 bytes .../1b-fleet-platform-traffic.png | Bin 0 -> 185114 bytes .../2a-fleet-platform-resources.png | Bin 0 -> 173437 bytes .../2a-fleet-platform-traffic.png | Bin 0 -> 185114 bytes .../2b-fleet-platform-resources.png | Bin 0 -> 173437 bytes .../2b-fleet-platform-traffic.png | Bin 0 -> 185114 bytes .../3a-fleet-platform-resources.png | Bin 0 -> 173437 bytes .../3a-fleet-platform-traffic.png | Bin 0 -> 185114 bytes .../3b-fleet-platform-resources.png | Bin 0 -> 173437 bytes .../3b-fleet-platform-traffic.png | Bin 0 -> 185114 bytes .../webapp/app/assets/img/router-icon.png | Bin 0 -> 1983 bytes .../assets/img/so-pages-temp/ipTrafTemp.png | Bin 0 -> 14635 bytes .../assets/img/so-pages-temp/securityTemp.png | Bin 0 -> 20942 bytes .../assets/img/svg/launch-fleet-icn-close.svg | 10 + .../assets/img/svg/launch-fleet-icn-edit.svg | 13 + .../svg/launch-fleet-network-01-active.svg | 20 + .../svg/launch-fleet-network-01-inactive.svg | 20 + .../svg/launch-fleet-network-02-active.svg | 23 + .../svg/launch-fleet-network-02-inactive.svg | 23 + .../svg/launch-fleet-network-03-active.svg | 19 + .../svg/launch-fleet-network-03-inactive.svg | 19 + .../svg/launch-fleet-network-04-active.svg | 20 + .../svg/launch-fleet-network-04-inactive.svg | 20 + .../svg/launch-fleet-network-05-active.svg | 20 + .../svg/launch-fleet-network-05-inactive.svg | 20 + .../svg/launch-fleet-network-06-active.svg | 25 + .../svg/launch-fleet-network-06-inactive.svg | 25 + .../svg/launch-fleet-network-07-active.svg | 20 + .../svg/launch-fleet-network-07-inactive.svg | 20 + .../svg/launch-fleet-network-08-active.svg | 24 + .../svg/launch-fleet-network-08-inactive.svg | 24 + .../svg/launch-fleet-network-09-active.svg | 21 + .../svg/launch-fleet-network-09-inactive.svg | 21 + .../svg/launch-fleet-network-10-active.svg | 27 + .../svg/launch-fleet-network-10-inactive.svg | 27 + .../svg/launch-fleet-network-11-active.svg | 22 + .../svg/launch-fleet-network-11-inactive.svg | 22 + .../img/svg/launch-fleet-pool-01-active.svg | 27 + .../img/svg/launch-fleet-pool-01-inactive.svg | 27 + .../img/svg/launch-fleet-pool-02-active.svg | 27 + .../img/svg/launch-fleet-pool-02-inactive.svg | 27 + ...launchpad-icn-create-environment-large.svg | 10 + .../assets/img/svg/launchpad-icn-newTab.svg | 14 + .../app/assets/img/svg/launchpad-icn-play.svg | 9 + .../assets/img/svg/launchpad-icn-sliders.svg | 14 + .../webapp/app/assets/img/switch-icon.png | Bin 0 -> 2206 bytes .../webapp/app/assets/img/table-cell-bg.png | Bin 0 -> 173 bytes .../app/assets/img/traffic-sim-diagram.png | Bin 0 -> 4610 bytes .../rw.ui/webapp/app/assets/img/tunnels.png | Bin 0 -> 1970 bytes .../app/assets/img/viewport-dash-temp.png | Bin 0 -> 72267 bytes .../app/assets/img/viewport-dash-v2-temp.png | Bin 0 -> 85437 bytes .../app/assets/img/viewport-dash-v3-temp.png | Bin 0 -> 136180 bytes .../app/assets/img/viewport-nav-bottom.png | Bin 0 -> 4013 bytes .../app/assets/img/viewport-nav-center.png | Bin 0 -> 4012 bytes .../app/assets/img/viewport-nav-left.png | Bin 0 -> 3101 bytes .../app/assets/img/viewport-nav-right.png | Bin 0 -> 4013 bytes .../app/assets/img/viewport-nav-top.png | Bin 0 -> 1821 bytes .../app/assets/img/viewport-platform-temp.png | Bin 0 -> 136180 bytes .../app/assets/img/viewport-sla-graph.svg | 45 + .../app/assets/img/viewport-vim-temp.png | Bin 0 -> 111677 bytes .../webapp/app/assets/img/viewport-vnf-10.svg | 3 + .../webapp/app/assets/img/viewport-vnf-50.svg | 3 + .../app/assets/img/viewport-vnf-temp.png | Bin 0 -> 106477 bytes .../app/assets/img/vim-icon-corners.png | Bin 0 -> 305 bytes .../app/assets/img/vim-icon-diamond.png | Bin 0 -> 380 bytes .../assets/img/vim-icon-halfcircle-bottom.png | Bin 0 -> 367 bytes .../assets/img/vim-icon-halfcircle-top.png | Bin 0 -> 356 bytes .../webapp/app/assets/img/vim-icon-plus.png | Bin 0 -> 197 bytes .../app/assets/img/vim-icon-soliddiamond.png | Bin 0 -> 302 bytes .../app/assets/img/vim-icon-solidsquare.png | Bin 0 -> 180 bytes .../app/assets/img/vim-icon-solidstar.png | Bin 0 -> 371 bytes .../app/assets/img/vim-icon-triangle.png | Bin 0 -> 282 bytes modules/ui/rw.ui/webapp/app/assets/js/guid.js | 33 + .../webapp/app/assets/js/n3-line-chart.js | 1808 +++++++++ modules/ui/rw.ui/webapp/app/main.js | 42 + .../rw.ui/webapp/app/modules/about/about.jsx | 179 + .../rw.ui/webapp/app/modules/about/about.scss | 67 + .../webapp/app/modules/about/aboutActions.js | 32 + .../webapp/app/modules/about/aboutSource.js | 83 + .../webapp/app/modules/about/aboutStore.js | 61 + .../app/modules/components/bullet/bullet.js | 233 ++ .../app/modules/components/button/button.scss | 55 + .../modules/components/button/rw.button.js | 264 ++ .../carousel/ButtonEventListener.js | 210 ++ .../components/carousel/carousel-react.js | 16 + .../components/carousel/carousel-react.jsx | 127 + .../modules/components/carousel/carousel.css | 93 + .../modules/components/carousel/carousel.html | 18 + .../modules/components/carousel/carousel.js | 44 + .../modules/components/carousel/components.js | 39 + .../components/carousel/multicomponent.js | 64 + .../app/modules/components/carousel/test.html | 68 + .../app/modules/components/components.js | 383 ++ .../dashboard_card/dashboardCardHeader.jsx | 13 + .../dashboard_card/dashboard_card.jsx | 73 + .../dashboard_card/dashboard_card.scss | 60 + .../app/modules/components/filter/filter.jsx | 71 + .../app/modules/components/gauge/gauge.js | 278 ++ .../app/modules/components/header/header.jsx | 127 + .../app/modules/components/header/header.scss | 91 + .../components/header/headerActions.js | 23 + .../modules/components/header/headerStore.js | 58 + .../input-range-slider/input-range-slider.jsx | 62 + .../input-range-slider.scss | 107 + .../app/modules/components/listy/listy.js | 159 + .../loading-indicator-animations.scss | 56 + .../loading-indicator/loadingIndicator.jsx | 47 + .../components/mixins/ButtonEventListener.js | 210 ++ .../monitoringParamComponents.js | 148 + .../monitoringParamsCarousel.jsx | 35 + .../monitoring_params/monitoring_params.scss | 93 + .../multicomponent/multicomponent.js | 60 + .../nfvi-metric-bars/nfviMetricBars.jsx | 67 + .../nfvi-metric-bars/nfviMetricBars.scss | 75 + .../launchpadOperationalStatus.jsx | 182 + .../radio-button/rw.radio-button.js | 270 ++ .../components/screen-loader/screenLoader.jsx | 33 + .../components/text-area/rw.text-area.js | 235 ++ .../text-input/check-box/rw.check-box.js | 236 ++ .../text-input/check-box/rw.check-box2.js | 231 ++ .../components/text-input/rw.text-input.js | 283 ++ .../components/topology/topologyL2Graph.jsx | 297 ++ .../components/topology/topologyTree.jsx | 179 + .../components/topology/topologyTree.scss | 19 + .../transmit-receive/transmit-receive.js | 107 + .../transmit-receive/transmit-receive.jsx | 72 + .../transmit-receive/transmit-receive.scss | 52 + .../app/modules/components/uptime/uptime.jsx | 134 + .../ui/rw.ui/webapp/app/modules/core/alt.js | 25 + .../ui/rw.ui/webapp/app/modules/core/app.js | 25 + .../rw.ui/webapp/app/modules/debug/crash.jsx | 149 + .../rw.ui/webapp/app/modules/debug/crash.scss | 40 + .../webapp/app/modules/debug/crashActions.js | 29 + .../webapp/app/modules/debug/crashSource.js | 59 + .../webapp/app/modules/debug/crashStore.js | 54 + .../account_sidebar/accountSidebar.jsx | 132 + .../account_sidebar/accountSidebar.scss | 106 + .../app/modules/launchpad/cloud-account.js | 328 ++ .../app/modules/launchpad/createActions.js | 34 + .../app/modules/launchpad/createSource.js | 102 + .../app/modules/launchpad/createStore.js | 61 + .../app/modules/launchpad/launchpad-create.js | 135 + .../launchpad/launchpad-dashboard.html.orig | 27 + .../webapp/app/modules/launchpad/launchpad.js | 488 +++ .../app/modules/launchpad/launchpad.jsx | 120 + .../app/modules/launchpad/launchpad.scss | 48 + .../launchpad/launchpadBreadcrumbs.jsx | 41 + .../launchpad/launchpadFleetActions.js | 43 + .../modules/launchpad/launchpadFleetSource.js | 167 + .../modules/launchpad/launchpadFleetStore.js | 251 ++ .../launchpad_card/launchpad-card.js | 373 ++ .../launchpad_card/launchpadCard.jsx | 297 ++ .../launchpad_card/launchpadCardActions.js | 23 + .../launchpadCardCloudAccount.jsx | 40 + .../launchpadCardMgmtInterfaces.jsx | 55 + .../launchpad_card/launchpadControls.jsx | 175 + .../launchpad_card/launchpadHeader.jsx | 156 + .../launchpad_card/launchpadNSInfo.jsx | 96 + .../launchpad_card/launchpad_card.scss | 330 ++ .../launchpad_card/nsConfigPrimitives.scss | 223 ++ .../launchpad_card/nsrConfigPrimitives.jsx | 352 ++ .../cloudAccountActions.js | 40 + .../cloudAccountSource.js | 196 + .../cloudAccountStore.js | 223 ++ .../launchpadCloudAccount.jsx | 90 + .../configAgentAccount.jsx | 302 ++ .../configAgentAccount.scss | 24 + .../configAgentAccountActions.js | 40 + .../configAgentAccountSource.js | 203 + .../configAgentAccountStore.js | 200 + .../launchpadConfigAgentAccount.jsx | 66 + .../launchpadSdnAccount.jsx | 66 + .../launchpad_sdn_account/sdnAccount.jsx | 304 ++ .../launchpad_sdn_account/sdnAccount.scss | 24 + .../sdnAccountActions.js | 39 + .../launchpad_sdn_account/sdnAccountSource.js | 203 + .../launchpad_sdn_account/sdnAccountStore.js | 246 ++ .../launchpad/monitoring-params-filter.jsx | 87 + .../network_service_launcher/catalogItems.jsx | 63 + .../catalog_items.scss | 114 + .../launchNetworkService.jsx | 248 ++ .../launchNetworkService.scss | 216 ++ .../launchNetworkServiceActions.js | 41 + .../launchNetworkServiceSource.js | 202 + .../launchNetworkServiceSource.js.orig | 187 + .../launchNetworkServiceStore.js | 244 ++ .../selectDescriptor.jsx | 43 + .../specifySLAParameters.jsx | 96 + .../launchpad/recordViewer/recordCard.jsx | 96 + .../launchpad/recordViewer/recordDetails.jsx | 31 + .../recordViewer/recordNavigator.jsx | 60 + .../launchpad/recordViewer/recordView.jsx | 108 + .../recordViewer/recordViewActions.js | 32 + .../recordViewer/recordViewSource.js | 169 + .../launchpad/recordViewer/recordViewStore.js | 337 ++ .../launchpad/recordViewer/recordViewer.scss | 219 ++ .../launchpad/topologyL2View/detailView.jsx | 66 + .../topologyL2View/topologyL2Actions.js | 32 + .../topologyL2View/topologyL2Source.js | 96 + .../topologyL2View/topologyL2Store.js | 131 + .../topologyL2View/topologyL2View.jsx | 123 + .../topologyL2View/topologyL2View.scss | 146 + .../launchpad/topologyView/topologyActions.js | 34 + .../launchpad/topologyView/topologySource.js | 152 + .../launchpad/topologyView/topologyStore.js | 132 + .../launchpad/topologyView/topologyView.jsx | 119 + .../launchpad/topologyView/topologyView.scss | 73 + .../app/modules/launchpad/vnfr/vnfrActions.js | 23 + .../app/modules/launchpad/vnfr/vnfrCard.jsx | 30 + .../app/modules/launchpad/vnfr/vnfrCard.scss | 39 + .../launchpad/vnfr/vnfrCardNfviMetrics.jsx | 34 + .../app/modules/launchpad/vnfr/vnfrSource.js | 67 + .../app/modules/launchpad/vnfr/vnfrStore.js | 67 + .../app/modules/launchpad/vnfr/vnfrView.jsx | 51 + .../app/modules/logging/loggingActions.js | 23 + .../app/modules/logging/loggingSource.js | 63 + .../rw.ui/webapp/app/modules/login/login.js | 54 + .../rw.ui/webapp/app/modules/login/login.jsx | 66 + .../rw.ui/webapp/app/modules/login/login.scss | 31 + .../app/modules/login/loginAuthActions.js | 24 + .../account_sidebar/accountSidebar.jsx | 69 + .../account_sidebar/accountSidebar.scss | 108 + .../cloud_account/cloud-account.css | 174 + .../cloud_account/cloud-account.js | 322 ++ .../cloud_account/cloudAccount.jsx | 362 ++ .../cloud_account/cloudAccount.scss | 27 + .../cloud_account/cloudAccountActions.js | 38 + .../cloud_account/cloudAccountSource.js | 176 + .../cloud_account/cloudAccountStore.js | 93 + .../cloud_account/cloudAccountWrapper.jsx | 43 + .../management_domain/management-domain.css | 157 + .../management_domain/management-domain.js | 325 ++ .../management_domain/management-domain.scss | 39 + .../management_domain/managementDomain.jsx | 294 ++ .../managementDomainActions.js | 40 + .../managementDomainSource.js | 193 + .../managementDomainStore.js | 101 + .../managementDomainCard.jsx | 81 + .../managementDomainCard.scss | 256 ++ .../managementDomainCardHeader.jsx | 145 + .../missioncontrol/mission-control.css | 144 + .../missioncontrol/missionControlActions.js | 40 + .../missionControlDashboard.jsx | 74 + .../missionControlDashboard.scss | 30 + .../missioncontrol/missionControlSource.js | 148 + .../missioncontrol/missionControlStore.js | 138 + .../modules/missioncontrol/missioncontrol.js | 354 ++ .../app/modules/missioncontrol/pool/pool.js | 275 ++ .../app/modules/missioncontrol/pool/pool.jsx | 195 + .../app/modules/missioncontrol/pool/pool.scss | 95 + .../missioncontrol/pool/poolActions.js | 41 + .../modules/missioncontrol/pool/poolSource.js | 216 ++ .../modules/missioncontrol/pool/poolStore.js | 302 ++ .../sdn_account/createSdnAccountActions.js | 39 + .../sdn_account/createSdnAccountSource.js | 196 + .../sdn_account/createSdnAccountStore.js | 170 + .../sdn_account/sdn-account.html | 73 + .../missioncontrol/sdn_account/sdn-account.js | 187 + .../missioncontrol/sdn_account/sdnAccount.jsx | 220 ++ .../webapp/app/modules/styles/_colors.scss | 54 + .../webapp/app/modules/styles/common.scss | 87 + .../webapp/app/modules/styles/layout.scss | 62 + .../rw.ui/webapp/app/modules/utils/utils.js | 222 ++ .../views/mission-control-dashboard.html | 165 + .../webapp/app/modules/views/screenlist.html | 112 + .../webapp/app/modules/views/uptime.html | 21 + modules/ui/rw.ui/webapp/package.json | 50 + .../ui/rw.ui/webapp/public/assets/.DS_Store | Bin 0 -> 8196 bytes .../public/assets/css/config-viewer.css | 16 + .../rw.ui/webapp/public/assets/css/core.css | 2056 ++++++++++ .../rw.ui/webapp/public/assets/css/flex.css | 117 + .../webapp/public/assets/css/overwriting.css | 480 +++ .../assets/fonts/Roboto-Light-webfont.ttf | Bin 0 -> 140276 bytes .../assets/fonts/Roboto-Regular-webfont.eot | Bin 0 -> 21320 bytes .../assets/fonts/Roboto-Regular-webfont.svg | 621 +++ .../assets/fonts/Roboto-Regular-webfont.ttf | Bin 0 -> 45376 bytes .../assets/fonts/Roboto-Regular-webfont.woff | Bin 0 -> 25020 bytes .../fonts/RobotoCondensed-Bold-webfont.eot | Bin 0 -> 21457 bytes .../fonts/RobotoCondensed-Bold-webfont.svg | 643 ++++ .../fonts/RobotoCondensed-Bold-webfont.ttf | Bin 0 -> 45452 bytes .../fonts/RobotoCondensed-Bold-webfont.woff | Bin 0 -> 25144 bytes .../public/assets/less/accents/corners.less | 83 + .../public/assets/less/accents/states.less | 35 + .../public/assets/less/base/palette.less | 32 + .../webapp/public/assets/less/base/type.less | 72 + .../webapp/public/assets/less/base/utils.less | 30 + .../webapp/public/assets/less/base/vars.less | 6 + .../assets/less/components/config-viewer.less | 21 + .../assets/less/components/dropdowns.less | 39 + .../assets/less/components/fleet-card.less | 275 ++ .../assets/less/components/nav-panels.less | 51 + .../assets/less/components/progress-bars.less | 30 + .../public/assets/less/controls/buttons.less | 56 + .../public/assets/less/controls/slider.less | 92 + .../public/assets/less/controls/splitter.css | 22 + .../public/assets/less/controls/splitter.less | 71 + .../assets/less/controls/step-control.less | 27 + .../assets/less/controls/toggle-control.less | 31 + .../rw.ui/webapp/public/assets/less/core.css | 2030 ++++++++++ .../rw.ui/webapp/public/assets/less/core.less | 69 + .../less/launchpad/launchpad-dashboard.css | 17 + .../less/launchpad/launchpad-dashboard.less | 83 + .../public/assets/less/layout/app-body.less | 15 + .../public/assets/less/layout/footer.less | 6 + .../public/assets/less/layout/header.less | 91 + .../public/assets/less/layout/layout.less | 36 + .../webapp/public/assets/less/login.less | 55 + .../public/assets/less/mixins/flexbox.less | 266 ++ .../assets/less/screens/create-fleet.less | 202 + .../public/assets/less/screens/launchpad.css | 22 + .../public/assets/less/screens/launchpad.less | 14 + .../assets/less/screens/viewport-dash.less | 330 ++ .../assets/less/screens/viewport-so.css | 22 + .../assets/less/screens/viewport-so.less | 23 + .../assets/less/screens/viewport-vim.less | 179 + .../assets/less/screens/viewport-vnf.less | 168 + .../public/assets/less/screens/wag.less | 235 ++ .../webapp/public/assets/less/slider.less | 172 + .../webapp/public/assets/less/temp-png.less | 92 + .../assets/vendor/css-reset-2.0/css-reset.css | 21 + modules/ui/rw.ui/webapp/public/index.html | 69 + modules/ui/rw.ui/webapp/public/rw.js | 925 +++++ modules/ui/rw.ui/webapp/scripts/build.sh | 31 + modules/ui/rw.ui/webapp/scripts/launch_ui.sh | 119 + .../rw.ui/webapp/scripts/server_rw.ui_ui.py | 87 + modules/ui/rw.ui/webapp/server.js | 111 + modules/ui/rw.ui/webapp/server/bundle.js | 85 + modules/ui/rw.ui/webapp/server/package.json | 17 + modules/ui/rw.ui/webapp/webpack.config.js | 168 + .../rw.ui/webapp/webpack.production.config.js | 72 + rift-bashrc | 193 + rift-prompt | 50 + rift-shell | 160 + rift_env.py | 367 ++ scripts/vm_image/base.config.sh | 31 + scripts/vm_image/base.rpms | 1 + scripts/vm_image/build.config.sh | 29 + scripts/vm_image/build.depends | 1 + scripts/vm_image/build.pip3 | 4 + scripts/vm_image/build.rpms | 57 + scripts/vm_image/extras.depends | 1 + scripts/vm_image/extras.rpms | 0 scripts/vm_image/launchpad.depends | 2 + scripts/vm_image/launchpad.rpms | 1 + scripts/vm_image/missioncontrol.depends | 2 + scripts/vm_image/missioncontrol.rpms | 1 + scripts/vm_image/mkvmimg | 803 ++++ scripts/vm_image/riftware-release.repo | 77 + scripts/vm_image/trafgen.depends | 2 + scripts/vm_image/trafgen.rpms | 0 scripts/vm_image/ui-lab.config.sh | 38 + scripts/vm_image/ui-lab.depends | 1 + scripts/vm_image/ui.config.sh | 26 + scripts/vm_image/ui.depends | 1 + scripts/vm_image/ui.kilo | 4 + scripts/vm_image/ui.pip | 1 + scripts/vm_image/ui.pip3 | 32 + scripts/vm_image/ui.rpms | 490 +++ scripts/vm_image/vnf.config.sh | 18 + scripts/vm_image/vnf.depends | 1 + scripts/vm_image/vnf.pip3 | 22 + scripts/vm_image/vnf.rpms | 10 + 1189 files changed, 144631 insertions(+) create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 .gitmodules.deps create mode 100644 CMakeLists.txt create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 Makefile.env create mode 100644 Makefile.top create mode 100644 RELEASE create mode 100644 RIFTWARE_COMPILATION_LICENSE create mode 100755 bin/build_ladder.sh create mode 100755 bin/catchsegv.sh create mode 100755 bin/dependency_parser.py create mode 100755 bin/dependency_sort.py create mode 100755 bin/dev.sh create mode 100755 bin/extract_rpm.sh create mode 100755 bin/generate_descriptor_pkg.sh create mode 100755 bin/generate_protopy.sh create mode 100755 bin/generate_supermodule_hash.sh create mode 100644 bin/keepers create mode 100755 bin/make_etsi_packages create mode 100755 bin/pip-install create mode 100755 bin/pip3-install create mode 100755 bin/pip3-kilo-install create mode 100755 bin/rift-lint.py create mode 100755 bin/submodule_has_failed_tests.sh create mode 100755 bin/uninitialize_cached_submodules.sh create mode 100644 modules/core/mano/.cpack-workaround create mode 100644 modules/core/mano/CMakeLists.txt create mode 100644 modules/core/mano/Makefile create mode 100644 modules/core/mano/README create mode 100644 modules/core/mano/common/CMakeLists.txt create mode 100644 modules/core/mano/common/plugins/CMakeLists.txt create mode 100644 modules/core/mano/common/plugins/rwcntmgrtasklet/CMakeLists.txt create mode 100644 modules/core/mano/common/plugins/rwcntmgrtasklet/Makefile create mode 100644 modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py create mode 100755 modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py create mode 100755 modules/core/mano/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py create mode 100644 modules/core/mano/common/plugins/yang/CMakeLists.txt create mode 100644 modules/core/mano/common/plugins/yang/rw-cloud.tailf.yang create mode 100755 modules/core/mano/common/plugins/yang/rw-cloud.yang create mode 100644 modules/core/mano/common/plugins/yang/rw-config-agent.taif.yang create mode 100755 modules/core/mano/common/plugins/yang/rw-config-agent.yang create mode 100644 modules/core/mano/common/plugins/yang/rw-sdn.tailf.yang create mode 100644 modules/core/mano/common/plugins/yang/rw-sdn.yang create mode 100644 modules/core/mano/common/python/CMakeLists.txt create mode 100644 modules/core/mano/common/python/rift/mano/cloud/__init__.py create mode 100644 modules/core/mano/common/python/rift/mano/cloud/accounts.py create mode 100644 modules/core/mano/common/python/rift/mano/cloud/config.py create mode 100644 modules/core/mano/common/python/rift/mano/cloud/operdata.py create mode 100644 modules/core/mano/common/python/rift/mano/config_agent/__init__.py create mode 100644 modules/core/mano/common/python/rift/mano/config_agent/config.py create mode 100644 modules/core/mano/common/python/rift/mano/config_agent/operdata.py create mode 100755 modules/core/mano/common/rw_gen_package.py create mode 100644 modules/core/mano/confd_client/CMakeLists.txt create mode 100644 modules/core/mano/confd_client/Makefile create mode 100644 modules/core/mano/confd_client/README create mode 100644 modules/core/mano/confd_client/confd_client.c create mode 100755 modules/core/mano/confd_client/confd_client.py create mode 100755 modules/core/mano/confd_client/confd_client.sh create mode 100755 modules/core/mano/confd_client/test.sh create mode 100644 modules/core/mano/examples/CMakeLists.txt create mode 100644 modules/core/mano/examples/Makefile create mode 100644 modules/core/mano/examples/ping_pong_ns/CMakeLists.txt create mode 100644 modules/core/mano/examples/ping_pong_ns/Makefile create mode 100755 modules/core/mano/examples/ping_pong_ns/config_desc.py create mode 100755 modules/core/mano/examples/ping_pong_ns/generate_packages.sh.in create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/__init__.py create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.py create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.service create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.py create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.service create mode 100755 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh create mode 100755 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping.py create mode 100755 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong.py create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/test/test.sh create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/user-data create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/__init__.py create mode 100644 modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/util.py create mode 120000 modules/core/mano/examples/ping_pong_ns/ping_pong_nsd.py create mode 100644 modules/core/mano/examples/ping_pong_ns/rift/mano/__init__.py create mode 100644 modules/core/mano/examples/ping_pong_ns/rift/mano/examples/__init__.py create mode 100755 modules/core/mano/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py create mode 100644 modules/core/mano/foss.txt create mode 100644 modules/core/mano/manifest/LICENSE create mode 100644 modules/core/mano/models/CMakeLists.txt create mode 100644 modules/core/mano/models/openmano/CMakeLists.txt create mode 100644 modules/core/mano/models/openmano/bin/CMakeLists.txt create mode 100755 modules/core/mano/models/openmano/bin/add_corporation.py create mode 100755 modules/core/mano/models/openmano/bin/openmano create mode 100755 modules/core/mano/models/openmano/bin/openmano_cleanup.sh create mode 100644 modules/core/mano/models/openmano/python/CMakeLists.txt create mode 100644 modules/core/mano/models/openmano/python/rift/openmano/__init__.py create mode 100755 modules/core/mano/models/openmano/python/rift/openmano/openmano_client.py create mode 100755 modules/core/mano/models/openmano/python/rift/openmano/rift2openmano.py create mode 100644 modules/core/mano/models/openmano/src/CMakeLists.txt create mode 100755 modules/core/mano/models/openmano/src/generate_tidgen_packages.sh.in create mode 100755 modules/core/mano/models/openmano/src/openmano2rift.py create mode 100755 modules/core/mano/models/openmano/test/osm_descriptors/mwc16-gen_test.py create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/6WindTR1.1.2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/Scenarios PE- Gen.jpg create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/TID-MGMTGW.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-gen.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-pe.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openmano-openvim.txt create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openvim.txt create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/tidgen4pLarge.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/mwc16-gen.xml create mode 100644 modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/tidgen4pLarge.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/checksums.txt create mode 100755 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/gen_pkgs.sh create mode 100755 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/mwc16-gen_test.py create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpA.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpB.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-gen.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-pe.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/6WindTR1.1.2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/IMS-ALLin1.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen1.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpA.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpB.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-gen.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe-onevnf.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/6WindTR1.1.2.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/IMS-ALLIN1.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen1.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen2.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/20160212_openmano_RO_descriptors.zip create mode 100755 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/gen_pkgs.sh create mode 100755 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/mwc16-gen_test.py create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE1.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gwcorpA.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpA.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpB.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/ORIG_IMS-allin1-corpA.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/gwcorpA.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-gen.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-pe.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/6WindTR1.1.2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/IMS-ALLin1.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE1.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen1.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpA.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpB.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/add_corpA_input.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/gwcorpA.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-gen.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe-onevnf.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe.xml create mode 100755 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/pe_config.py create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/6WindTR1.1.2.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/IMS-ALLIN1.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe1.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe2.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen1.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen2.xml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/IMS-allin1-corpA.yaml.generic create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml.generic create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/mwc16-pe.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/tidgen.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/6WindTR1.1.2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/IMS-ALLin1_2p.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE1.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE2.yaml create mode 100644 modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/tidgen_mwc16.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_ns_2sriov.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_ns_4sriov.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_vnf_2sriov.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_vnf_4sriov.yaml create mode 100644 modules/core/mano/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml create mode 100644 modules/core/mano/models/plugins/CMakeLists.txt create mode 100644 modules/core/mano/models/plugins/yang/CMakeLists.txt create mode 100644 modules/core/mano/models/plugins/yang/Makefile create mode 100644 modules/core/mano/models/plugins/yang/ietf-l2-topology.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/ietf-l2-topology.yang create mode 100644 modules/core/mano/models/plugins/yang/ietf-network-topology.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/ietf-network-topology.yang create mode 100644 modules/core/mano/models/plugins/yang/ietf-network.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/ietf-network.yang create mode 100644 modules/core/mano/models/plugins/yang/nsd.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/nsd.yang create mode 100755 modules/core/mano/models/plugins/yang/nsr.cli.xml create mode 100644 modules/core/mano/models/plugins/yang/nsr.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/nsr.yang create mode 100644 modules/core/mano/models/plugins/yang/odl-network-topology.yang create mode 100755 modules/core/mano/models/plugins/yang/pnfd.yang create mode 100644 modules/core/mano/models/plugins/yang/rw-nsd.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/rw-nsd.yang create mode 100644 modules/core/mano/models/plugins/yang/rw-nsr.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/rw-nsr.yang create mode 100644 modules/core/mano/models/plugins/yang/rw-topology.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/rw-topology.yang create mode 100644 modules/core/mano/models/plugins/yang/rw-vld.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/rw-vld.yang create mode 100644 modules/core/mano/models/plugins/yang/rw-vlr.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/rw-vlr.yang create mode 100644 modules/core/mano/models/plugins/yang/rw-vnfd.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/rw-vnfd.yang create mode 100644 modules/core/mano/models/plugins/yang/rw-vnfr.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/rw-vnfr.yang create mode 100644 modules/core/mano/models/plugins/yang/vld.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/vld.yang create mode 100755 modules/core/mano/models/plugins/yang/vlr.cli.xml create mode 100644 modules/core/mano/models/plugins/yang/vlr.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/vlr.yang create mode 100644 modules/core/mano/models/plugins/yang/vnfd.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/vnfd.yang create mode 100755 modules/core/mano/models/plugins/yang/vnffgd.yang create mode 100755 modules/core/mano/models/plugins/yang/vnfr.cli.xml create mode 100644 modules/core/mano/models/plugins/yang/vnfr.tailf.yang create mode 100755 modules/core/mano/models/plugins/yang/vnfr.yang create mode 100644 modules/core/mano/rwcm/CMakeLists.txt create mode 100644 modules/core/mano/rwcm/plugins/CMakeLists.txt create mode 100644 modules/core/mano/rwcm/plugins/cli/cli_rwcm.xml create mode 100644 modules/core/mano/rwcm/plugins/rwconman/CMakeLists.txt create mode 100644 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py create mode 100755 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/juju_if.py create mode 100644 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py create mode 100644 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py create mode 100644 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg create mode 100755 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py create mode 100644 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py create mode 100644 modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_tags.yml create mode 100755 modules/core/mano/rwcm/plugins/rwconman/rwconmantasklet.py create mode 100644 modules/core/mano/rwcm/plugins/yang/CMakeLists.txt create mode 100644 modules/core/mano/rwcm/plugins/yang/rw-conman.tailf.yang create mode 100755 modules/core/mano/rwcm/plugins/yang/rw-conman.yang create mode 100644 modules/core/mano/rwcm/test/CMakeLists.txt create mode 100644 modules/core/mano/rwcm/test/README.start_cm create mode 100644 modules/core/mano/rwcm/test/cwims_juju_nsd/configuration_input_params.yml create mode 100644 modules/core/mano/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg create mode 100644 modules/core/mano/rwcm/test/ping_pong_nsd/configuration_input_params.yml create mode 100755 modules/core/mano/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg create mode 100755 modules/core/mano/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg create mode 100755 modules/core/mano/rwcm/test/rwso_test.py create mode 100755 modules/core/mano/rwcm/test/start_cm_system.py create mode 100644 modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml create mode 100644 modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg create mode 100644 modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg create mode 100644 modules/core/mano/rwlaunchpad/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwiwp/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwiwp/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/__init__.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/rwiwptasklet.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwiwp/rwiwptasklet.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/archive.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/checksums.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwmonitor/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwmonitor/rwmonitor.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/juju_intf.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/jujuconf_nsm.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conagent.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmconfigplugin.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwresmgr/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvnfm/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/mon_params.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvnfm/test/mon_params_test.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvns/rwvnstasklet.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py create mode 100755 modules/core/mano/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/yang/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.tailf.yang create mode 100644 modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py create mode 100644 modules/core/mano/rwlaunchpad/plugins/yang/CMakeLists.txt create mode 100644 modules/core/mano/rwlaunchpad/plugins/yang/Makefile create mode 100644 modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.tailf.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad-log.yang create mode 100644 modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.yang create mode 100644 modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.tailf.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-nsm.yang create mode 100644 modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-vnfm.yang create mode 100644 modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.tailf.yang create mode 100755 modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.yang create mode 100644 modules/core/mano/rwlaunchpad/ra/CMakeLists.txt create mode 100755 modules/core/mano/rwlaunchpad/ra/launchpad_longevity_systest create mode 100755 modules/core/mano/rwlaunchpad/ra/pingpong_longevity_systest create mode 100755 modules/core/mano/rwlaunchpad/ra/pingpong_lp_standalone_systest create mode 100755 modules/core/mano/rwlaunchpad/ra/pingpong_records_systest create mode 100755 modules/core/mano/rwlaunchpad/ra/pingpong_vnf_reload_systest create mode 100755 modules/core/mano/rwlaunchpad/ra/pingpong_vnf_systest create mode 100644 modules/core/mano/rwlaunchpad/ra/pytest/conftest.py create mode 100644 modules/core/mano/rwlaunchpad/ra/pytest/test_launchpad_longevity.py create mode 100644 modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_longevity.py create mode 100755 modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf.py create mode 100644 modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf_static.py create mode 100755 modules/core/mano/rwlaunchpad/ra/pytest/test_records.py create mode 100644 modules/core/mano/rwlaunchpad/ra/pytest/test_startstop.py create mode 100644 modules/core/mano/rwlaunchpad/ra/racfg/pingpong_lp_standalone_systest_openstack.racfg create mode 100644 modules/core/mano/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg create mode 100644 modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg create mode 100644 modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg create mode 100644 modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg create mode 100644 modules/core/mano/rwlaunchpad/test/CMakeLists.txt create mode 100755 modules/core/mano/rwlaunchpad/test/juju_ut.py create mode 100755 modules/core/mano/rwlaunchpad/test/launchpad.py create mode 100755 modules/core/mano/rwlaunchpad/test/launchpad_module_test create mode 100755 modules/core/mano/rwlaunchpad/test/mano_error_ut.py create mode 100755 modules/core/mano/rwlaunchpad/test/mano_ut.py create mode 100755 modules/core/mano/rwlaunchpad/test/openmano_nsm_ut.py create mode 100644 modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_test.py create mode 100644 modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py create mode 100644 modules/core/mano/rwlaunchpad/test/pytest/lp_test.py create mode 100644 modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py create mode 100644 modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py create mode 100644 modules/core/mano/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py create mode 100644 modules/core/mano/rwlaunchpad/test/racfg/launchpad_module_test.racfg create mode 100755 modules/core/mano/rwlaunchpad/test/utest_rwmonitor.py create mode 100755 modules/core/mano/rwlaunchpad/test/utest_rwnsm.py create mode 100755 modules/core/mano/rwlaunchpad/test/utest_uploader.py create mode 100644 modules/core/mano/rwmc/CMakeLists.txt create mode 100644 modules/core/mano/rwmc/Makefile create mode 100755 modules/core/mano/rwmc/bin/cloudsim_http_proxy.sh create mode 100644 modules/core/mano/rwmc/include/riftware/rwmc_log.h create mode 100644 modules/core/mano/rwmc/include/riftware/rwmctasklet.h create mode 100644 modules/core/mano/rwmc/plugins/CMakeLists.txt create mode 100644 modules/core/mano/rwmc/plugins/Makefile create mode 100644 modules/core/mano/rwmc/plugins/cli/cli_rwmc.xml create mode 100644 modules/core/mano/rwmc/plugins/cli/cli_rwmc_schema_listing.txt create mode 100644 modules/core/mano/rwmc/plugins/rwmctasklet/CMakeLists.txt create mode 100644 modules/core/mano/rwmc/plugins/rwmctasklet/Makefile create mode 100644 modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/__init__.py create mode 100644 modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/launchpad.py create mode 100644 modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/rwmctasklet.py create mode 100644 modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/salt.py create mode 100644 modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/util.py create mode 100755 modules/core/mano/rwmc/plugins/rwmctasklet/rwmctasklet.py create mode 100644 modules/core/mano/rwmc/plugins/yang/CMakeLists.txt create mode 100644 modules/core/mano/rwmc/plugins/yang/Makefile create mode 100755 modules/core/mano/rwmc/plugins/yang/rw-mc.cli.xml create mode 100644 modules/core/mano/rwmc/plugins/yang/rw-mc.tailf.yang create mode 100755 modules/core/mano/rwmc/plugins/yang/rw-mc.yang create mode 100644 modules/core/mano/rwmc/ra/CMakeLists.txt create mode 100755 modules/core/mano/rwmc/ra/mission_control_delete_systest create mode 100755 modules/core/mano/rwmc/ra/mission_control_negative_cloud_account_systest create mode 100755 modules/core/mano/rwmc/ra/mission_control_negative_mgmt_domain_systest create mode 100755 modules/core/mano/rwmc/ra/mission_control_negative_systest create mode 100755 modules/core/mano/rwmc/ra/mission_control_negative_vmpool_systest create mode 100755 modules/core/mano/rwmc/ra/mission_control_reload_systest create mode 100755 modules/core/mano/rwmc/ra/mission_control_systest create mode 100644 modules/core/mano/rwmc/ra/pytest/conftest.py create mode 100755 modules/core/mano/rwmc/ra/pytest/test_mission_control.py create mode 100755 modules/core/mano/rwmc/ra/pytest/test_mission_control_delete.py create mode 100755 modules/core/mano/rwmc/ra/pytest/test_mission_control_negative.py create mode 100755 modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_cloud_account.py create mode 100755 modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_mgmt_domain.py create mode 100755 modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_vmpool.py create mode 100755 modules/core/mano/rwmc/ra/pytest/test_mission_control_static.py create mode 100644 modules/core/mano/rwmc/ra/racfg/mission_control_delete_systest_cloudsim.racfg create mode 100644 modules/core/mano/rwmc/ra/racfg/mission_control_reload_systest_openstack.racfg create mode 100644 modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim.racfg create mode 100644 modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim_negative.racfg create mode 100644 modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack.racfg create mode 100644 modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack_negative.racfg create mode 100644 modules/core/mano/rwmc/test/CMakeLists.txt create mode 100644 modules/core/mano/rwmc/test/README create mode 100755 modules/core/mano/rwmc/test/mission_control.py create mode 100755 modules/core/mano/rwmc/test/perf/dts-perf-nc.py create mode 100755 modules/core/mano/rwmc/test/perf/dts-perf-system.py create mode 100755 modules/core/mano/rwmc/test/perf/dts-perf-test.py create mode 100755 modules/core/mano/rwmc/test/perf/dts-perf-webserver.py create mode 100755 modules/core/mano/rwmc/test/perf/test.sh create mode 100644 modules/core/mano/rwso/plugins/cli/cli_so_schema_listing.txt create mode 100644 modules/ui/composer/CMakeLists.txt create mode 100644 modules/ui/composer/foss.txt create mode 100644 modules/ui/composer/manifest/LICENSE create mode 100644 modules/ui/composer/scripts/.install.sh.swp create mode 100755 modules/ui/composer/scripts/install.sh create mode 100644 modules/ui/composer/webapp/.editorconfig create mode 100644 modules/ui/composer/webapp/.eslintignore create mode 100644 modules/ui/composer/webapp/.eslintrc create mode 100644 modules/ui/composer/webapp/.gitattributes create mode 100644 modules/ui/composer/webapp/.yo-rc.json create mode 100644 modules/ui/composer/webapp/Gruntfile.js create mode 100644 modules/ui/composer/webapp/README.md create mode 100644 modules/ui/composer/webapp/codeStyleSettings.xml create mode 100644 modules/ui/composer/webapp/karma.conf.js create mode 100755 modules/ui/composer/webapp/license-flat-icon.pdf create mode 100644 modules/ui/composer/webapp/license-info.txt create mode 100644 modules/ui/composer/webapp/package.json create mode 100755 modules/ui/composer/webapp/scripts/build.sh create mode 100755 modules/ui/composer/webapp/scripts/launch_composer.sh create mode 100755 modules/ui/composer/webapp/scripts/server_composer_ui.py create mode 100755 modules/ui/composer/webapp/scripts/update-node-modules.sh create mode 100644 modules/ui/composer/webapp/src/README.md create mode 100644 modules/ui/composer/webapp/src/actions/CanvasEditorActions.js create mode 100644 modules/ui/composer/webapp/src/actions/CanvasPanelTrayActions.js create mode 100644 modules/ui/composer/webapp/src/actions/CatalogDataSourceActions.js create mode 100644 modules/ui/composer/webapp/src/actions/CatalogFilterActions.js create mode 100644 modules/ui/composer/webapp/src/actions/CatalogItemsActions.js create mode 100644 modules/ui/composer/webapp/src/actions/CatalogPackageManagerActions.js create mode 100644 modules/ui/composer/webapp/src/actions/CatalogPanelTrayActions.js create mode 100644 modules/ui/composer/webapp/src/actions/ComposerAppActions.js create mode 100644 modules/ui/composer/webapp/src/actions/ModalOverlayActions.js create mode 100644 modules/ui/composer/webapp/src/actions/PanelResizeAction.js create mode 100644 modules/ui/composer/webapp/src/actions/RiftHeaderActions.js create mode 100644 modules/ui/composer/webapp/src/alt.js create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-Black-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-BlackItalic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-Bold-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-BoldItalic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-Italic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-Light-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-LightItalic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-Medium-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-MediumItalic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-Regular-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-Thin-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/Roboto-ThinItalic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/RobotoCondensed-Bold-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/RobotoCondensed-BoldItalic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/RobotoCondensed-Italic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/RobotoCondensed-Light-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/RobotoCondensed-LightItalic-webfont.woff create mode 100755 modules/ui/composer/webapp/src/assets/RobotoCondensed-Regular-webfont.woff create mode 100644 modules/ui/composer/webapp/src/assets/big-honking-catalog.json create mode 100644 modules/ui/composer/webapp/src/assets/empty-nsd-catalog.json create mode 100644 modules/ui/composer/webapp/src/assets/favicons/android-chrome-144x144.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/android-chrome-192x192.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/android-chrome-36x36.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/android-chrome-48x48.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/android-chrome-72x72.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/android-chrome-96x96.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-114x114.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-120x120.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-144x144.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-152x152.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-180x180.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-57x57.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-60x60.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-72x72.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-76x76.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-precomposed.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/browserconfig.xml create mode 100644 modules/ui/composer/webapp/src/assets/favicons/favicon-16x16.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/favicon-194x194.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/favicon-32x32.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/favicon-96x96.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/favicon.ico create mode 100644 modules/ui/composer/webapp/src/assets/favicons/manifest.json create mode 100644 modules/ui/composer/webapp/src/assets/favicons/mstile-144x144.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/mstile-150x150.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/mstile-310x150.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/mstile-310x310.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/mstile-70x70.png create mode 100644 modules/ui/composer/webapp/src/assets/favicons/safari-pinned-tab.svg create mode 100644 modules/ui/composer/webapp/src/assets/juju-catalog.json create mode 100644 modules/ui/composer/webapp/src/assets/onvelocity-color-theme.json create mode 100644 modules/ui/composer/webapp/src/assets/ping-pong-catalog.json create mode 100644 modules/ui/composer/webapp/src/assets/ping-vrouter-pong-catalog.json create mode 100644 modules/ui/composer/webapp/src/assets/rift.ware-color-theme.json create mode 100644 modules/ui/composer/webapp/src/components/Button.js create mode 100644 modules/ui/composer/webapp/src/components/CanvasPanel.js create mode 100644 modules/ui/composer/webapp/src/components/CanvasPanelTray.js create mode 100644 modules/ui/composer/webapp/src/components/CanvasZoom.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogFilter.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogItemCanvasEditor.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogItemDetailsEditor.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogItems.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogPackageManager.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogPanel.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogPanelToolbar.js create mode 100644 modules/ui/composer/webapp/src/components/CatalogPanelTray.js create mode 100644 modules/ui/composer/webapp/src/components/ComposerApp.js create mode 100644 modules/ui/composer/webapp/src/components/ComposerAppToolbar.js create mode 100644 modules/ui/composer/webapp/src/components/ContentEditableDiv.js create mode 100644 modules/ui/composer/webapp/src/components/DetailsPanel.js create mode 100644 modules/ui/composer/webapp/src/components/DropTarget.js create mode 100644 modules/ui/composer/webapp/src/components/DropZonePanel.js create mode 100644 modules/ui/composer/webapp/src/components/EditDescriptorModelProperties.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/ConnectionPointSelector.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/EditForwardingGraphPaths.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/EditableProperty.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/mapClassifier.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/mapConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/mapRecordServicePath.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/onClickSelectAndShowInDetailsPanel.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/onCutDelegateToRemove.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/onFormInputChangedModifyContainerAndNotify.js create mode 100644 modules/ui/composer/webapp/src/components/EditorForwardingGraph/onHoverHighlightConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/components/JSONViewer.js create mode 100644 modules/ui/composer/webapp/src/components/LayoutRow.js create mode 100644 modules/ui/composer/webapp/src/components/LoadingIndicator.js create mode 100644 modules/ui/composer/webapp/src/components/ModalOverlay.js create mode 100644 modules/ui/composer/webapp/src/components/PopupWindow.js create mode 100644 modules/ui/composer/webapp/src/components/Range.js create mode 100644 modules/ui/composer/webapp/src/components/RiftHeader.js create mode 100644 modules/ui/composer/webapp/src/components/messages.js create mode 100644 modules/ui/composer/webapp/src/favicon.ico create mode 100644 modules/ui/composer/webapp/src/images/default-catalog-icon.svg create mode 100644 modules/ui/composer/webapp/src/images/default-icon-white.svg create mode 100644 modules/ui/composer/webapp/src/images/default-icon.svg create mode 100644 modules/ui/composer/webapp/src/images/header-logo.png create mode 100644 modules/ui/composer/webapp/src/images/logos/riftio.png create mode 100644 modules/ui/composer/webapp/src/images/osm_header_253x50.png create mode 100644 modules/ui/composer/webapp/src/images/osm_header_506x100.png create mode 100644 modules/ui/composer/webapp/src/images/riftio_website_logo_002_03.png create mode 100644 modules/ui/composer/webapp/src/images/sample-catalog.png create mode 100644 modules/ui/composer/webapp/src/images/vendor-riftio.png create mode 100644 modules/ui/composer/webapp/src/index.html create mode 100644 modules/ui/composer/webapp/src/libraries/CatalogPackageManagerUploadDropZone.js create mode 100644 modules/ui/composer/webapp/src/libraries/ColorGroups.js create mode 100644 modules/ui/composer/webapp/src/libraries/DeletionManager.js create mode 100644 modules/ui/composer/webapp/src/libraries/InstanceCounter.js create mode 100644 modules/ui/composer/webapp/src/libraries/ResizableManager.js create mode 100644 modules/ui/composer/webapp/src/libraries/SelectionManager.js create mode 100644 modules/ui/composer/webapp/src/libraries/ToggleElementHandler.js create mode 100644 modules/ui/composer/webapp/src/libraries/TooltipManager.js create mode 100644 modules/ui/composer/webapp/src/libraries/UniqueId.js create mode 100644 modules/ui/composer/webapp/src/libraries/getEventPath.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/DescriptorGraph.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/DescriptorGraphGrid.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/DescriptorGraphPathBuilder.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/DescriptorGraphSelection.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphConnectionPointNumber.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphConstituentVnfd.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphDescriptorModel.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphForwardingGraph.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphInternalVirtualLink.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphInternalVirtualLinkPaths.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphNetworkService.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphRecordServicePath.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphVirtualDeploymentUnit.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphVirtualLink.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphVirtualLinkPaths.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/GraphVirtualNetworkFunction.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/HighlightRecordServicePaths.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/PathBuilder.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/Position.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/layouts/RelationsAndNetworksLayout.js create mode 100644 modules/ui/composer/webapp/src/libraries/graph/math.js create mode 100644 modules/ui/composer/webapp/src/libraries/guid.js create mode 100644 modules/ui/composer/webapp/src/libraries/isFullScreen.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorModel.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorModelFactory.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorModelFields.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorModelMeta.json create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorModelMetaFactory.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorModelMetaProperty.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorModelSerializer.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorTemplateFactory.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/DescriptorTemplates.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/IconFactory.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/Classifier.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/ClassifierConnectionPointRef.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/ClassifierMatchAttributes.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/ConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/ConstituentVnfd.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/ConstituentVnfdConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/ForwardingGraph.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/InternalConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/InternalConnectionPointRef.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/InternalVirtualLink.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/NetworkService.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/NetworkServiceConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/PhysicalNetworkFunction.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/RecordServicePath.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/RspConnectionPointRef.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/VirtualDeploymentUnit.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/VirtualDeploymentUnitInternalConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/VirtualLink.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/VirtualNetworkFunction.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/VirtualNetworkFunctionConnectionPoint.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/VirtualNetworkFunctionReadOnlyWrapper.js create mode 100644 modules/ui/composer/webapp/src/libraries/model/descriptors/VnfdConnectionPointRef.js create mode 100644 modules/ui/composer/webapp/src/libraries/utils.js create mode 100644 modules/ui/composer/webapp/src/libraries/zoomFactor.js create mode 100644 modules/ui/composer/webapp/src/sources/CatalogDataSource.js create mode 100644 modules/ui/composer/webapp/src/sources/CatalogPackageManagerSource.js create mode 100644 modules/ui/composer/webapp/src/sources/RiftHeaderSource.js create mode 100644 modules/ui/composer/webapp/src/stores/CatalogDataStore.js create mode 100644 modules/ui/composer/webapp/src/stores/CatalogPackageManagerStore.js create mode 100644 modules/ui/composer/webapp/src/stores/CatalogPanelStore.js create mode 100644 modules/ui/composer/webapp/src/stores/ComposerAppStore.js create mode 100644 modules/ui/composer/webapp/src/stores/ModalOverlayStore.js create mode 100644 modules/ui/composer/webapp/src/stores/RiftHeaderStore.js create mode 100644 modules/ui/composer/webapp/src/styles/Animations.scss create mode 100644 modules/ui/composer/webapp/src/styles/AppRoot.scss create mode 100644 modules/ui/composer/webapp/src/styles/Button.scss create mode 100644 modules/ui/composer/webapp/src/styles/CanvasPanel.scss create mode 100644 modules/ui/composer/webapp/src/styles/CanvasPanelTray.scss create mode 100644 modules/ui/composer/webapp/src/styles/CanvasZoom.scss create mode 100644 modules/ui/composer/webapp/src/styles/CatalogFilter.scss create mode 100644 modules/ui/composer/webapp/src/styles/CatalogItemCanvasEditor.scss create mode 100644 modules/ui/composer/webapp/src/styles/CatalogItems.scss create mode 100644 modules/ui/composer/webapp/src/styles/CatalogPackageManager.scss create mode 100644 modules/ui/composer/webapp/src/styles/CatalogPanel.scss create mode 100644 modules/ui/composer/webapp/src/styles/CatalogPanelToolbar.scss create mode 100644 modules/ui/composer/webapp/src/styles/CatalogPanelTray.scss create mode 100644 modules/ui/composer/webapp/src/styles/ComposerAppToolbar.scss create mode 100644 modules/ui/composer/webapp/src/styles/ConnectionPointSelector.scss create mode 100644 modules/ui/composer/webapp/src/styles/DataOpenCloseIcon.scss create mode 100644 modules/ui/composer/webapp/src/styles/DescriptorGraph.scss create mode 100644 modules/ui/composer/webapp/src/styles/DetailsPanel.scss create mode 100644 modules/ui/composer/webapp/src/styles/DropZonePanel.scss create mode 100644 modules/ui/composer/webapp/src/styles/EditDescriptorModelProperties.scss create mode 100644 modules/ui/composer/webapp/src/styles/EditForwardingGraphPaths.scss create mode 100644 modules/ui/composer/webapp/src/styles/EditableProperty.scss create mode 100644 modules/ui/composer/webapp/src/styles/FileUploadsList.scss create mode 100644 modules/ui/composer/webapp/src/styles/FileUploadsToolbar.scss create mode 100644 modules/ui/composer/webapp/src/styles/FullScreen.scss create mode 100644 modules/ui/composer/webapp/src/styles/GraphDescriptorModel.scss create mode 100644 modules/ui/composer/webapp/src/styles/GraphRecordServicePaths.scss create mode 100644 modules/ui/composer/webapp/src/styles/GraphVirtualLink.scss create mode 100644 modules/ui/composer/webapp/src/styles/JSONViewer.scss create mode 100644 modules/ui/composer/webapp/src/styles/LayoutRow.scss create mode 100644 modules/ui/composer/webapp/src/styles/LoadingIndicator.scss create mode 100644 modules/ui/composer/webapp/src/styles/ModalOverlay.scss create mode 100644 modules/ui/composer/webapp/src/styles/ResizableManager.scss create mode 100644 modules/ui/composer/webapp/src/styles/RiftHeader.scss create mode 100644 modules/ui/composer/webapp/src/styles/ToggleElement.scss create mode 100644 modules/ui/composer/webapp/src/styles/TooltipManager.scss create mode 100644 modules/ui/composer/webapp/src/styles/_ColorGroups.scss create mode 100644 modules/ui/composer/webapp/src/styles/_main.scss create mode 100644 modules/ui/composer/webapp/src/styles/_variables.scss create mode 100644 modules/ui/composer/webapp/test/helpers/pack/phantomjs-shims.js create mode 100644 modules/ui/composer/webapp/test/helpers/test-clean-input-output-model.json create mode 100644 modules/ui/composer/webapp/test/spec/components/ButtonSpec.js create mode 100644 modules/ui/composer/webapp/test/spec/libraries/DescriptorModelFactorySpec.js create mode 100644 modules/ui/composer/webapp/test/spec/libraries/DescriptorModelSpec.js create mode 100644 modules/ui/composer/webapp/test/spec/libraries/SelectionManagerSpec.js create mode 100644 modules/ui/composer/webapp/test/uploadServer/package.json create mode 100644 modules/ui/composer/webapp/test/uploadServer/server.js create mode 100644 modules/ui/composer/webapp/webpack.config.js create mode 100644 modules/ui/composer/webapp/webpack.dist.config.js create mode 100644 modules/ui/rw.ui/CMakeLists.txt create mode 100644 modules/ui/rw.ui/api/about/about.js create mode 100644 modules/ui/rw.ui/api/cloud_account/cloudAccount.js create mode 100644 modules/ui/rw.ui/api/common/constants.js create mode 100644 modules/ui/rw.ui/api/debug/debug.js create mode 100644 modules/ui/rw.ui/api/launchpad/epa_aggregator.js create mode 100644 modules/ui/rw.ui/api/launchpad/launchpad.js create mode 100644 modules/ui/rw.ui/api/launchpad/transforms.js create mode 100644 modules/ui/rw.ui/api/logging/logging.js create mode 100644 modules/ui/rw.ui/api/missioncontrol/missionControl.js create mode 100644 modules/ui/rw.ui/api/package.json create mode 100644 modules/ui/rw.ui/api/routes.js create mode 100644 modules/ui/rw.ui/api/routes/launchpad.js create mode 100644 modules/ui/rw.ui/api/routes/mission-control.js create mode 100644 modules/ui/rw.ui/api/sdn_account/sdnAccount.js create mode 100644 modules/ui/rw.ui/api/server.js create mode 100644 modules/ui/rw.ui/api/sockets.js create mode 100644 modules/ui/rw.ui/api/utils/utils.js create mode 100644 modules/ui/rw.ui/foss.txt create mode 100644 modules/ui/rw.ui/manifest/LICENSE create mode 100755 modules/ui/rw.ui/scripts/install_api.sh create mode 100755 modules/ui/rw.ui/scripts/install_ui.sh create mode 100644 modules/ui/rw.ui/webapp/README.md create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/android-chrome-144x144.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/android-chrome-192x192.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/android-chrome-36x36.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/android-chrome-48x48.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/android-chrome-72x72.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/android-chrome-96x96.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-114x114.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-120x120.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-144x144.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-152x152.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-180x180.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-57x57.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-60x60.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-72x72.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-76x76.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon-precomposed.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/apple-touch-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/browserconfig.xml create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/favicon-16x16.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/favicon-194x194.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/favicon-32x32.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/favicon-96x96.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/favicon.ico create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/manifest.json create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/mstile-144x144.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/mstile-150x150.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/mstile-310x150.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/mstile-310x310.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/mstile-70x70.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/favicons/safari-pinned-tab.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/avatar.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/bargraph.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/bearer-plane-diagram.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/bg-tile-cross-small.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/catalog-default.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/create-account.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/create-fleet-params-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/create-fleet-pool-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/create-fleet-services-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/diameter-openflow-lte-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/firewall-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/gbps-10.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/gbps-50.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/green-page-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/header-logo.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/host-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/icon-host-sm.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/icon-host.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/icon-open-viewport.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/icon-switch.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/iot-industry-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/iot-medical-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/iot-transportation-icon-active.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/ip-lte-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/ip-softgre-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/latency.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/latency_graph.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-add-fleet-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graph-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/10.09.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/17.23.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/20.05.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/23.08.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/25.03.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/30.56.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/7.08.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/launchpad-graphs-temp/9.24.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/link.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/loadbalance-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/lte-mme-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/osm_header.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/osm_header_253x50.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/osm_header_506x100.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/page_loader.gif create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-nav-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/1a-fleet-platform-resources.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/1a-fleet-platform-traffic.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/1b-fleet-platform-resources.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/1b-fleet-platform-traffic.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/2a-fleet-platform-resources.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/2a-fleet-platform-traffic.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/2b-fleet-platform-resources.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/2b-fleet-platform-traffic.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/3a-fleet-platform-resources.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/3a-fleet-platform-traffic.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/3b-fleet-platform-resources.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/platform-pages-temp/3b-fleet-platform-traffic.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/router-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/so-pages-temp/ipTrafTemp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/so-pages-temp/securityTemp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-icn-close.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-icn-edit.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-01-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-01-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-02-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-02-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-03-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-03-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-04-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-04-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-05-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-05-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-06-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-06-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-07-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-07-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-08-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-08-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-09-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-09-inactive.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-10-active.svg create mode 100755 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-10-inactive.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-11-active.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-network-11-inactive.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-pool-01-active.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-pool-01-inactive.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-pool-02-active.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launch-fleet-pool-02-inactive.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launchpad-icn-create-environment-large.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launchpad-icn-newTab.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launchpad-icn-play.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/svg/launchpad-icn-sliders.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/switch-icon.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/table-cell-bg.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/traffic-sim-diagram.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/tunnels.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-dash-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-dash-v2-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-dash-v3-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-nav-bottom.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-nav-center.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-nav-left.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-nav-right.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-nav-top.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-platform-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-sla-graph.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-vim-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-vnf-10.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-vnf-50.svg create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/viewport-vnf-temp.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-corners.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-diamond.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-halfcircle-bottom.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-halfcircle-top.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-plus.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-soliddiamond.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-solidsquare.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-solidstar.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/img/vim-icon-triangle.png create mode 100644 modules/ui/rw.ui/webapp/app/assets/js/guid.js create mode 100644 modules/ui/rw.ui/webapp/app/assets/js/n3-line-chart.js create mode 100644 modules/ui/rw.ui/webapp/app/main.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/about/about.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/about/about.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/about/aboutActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/about/aboutSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/about/aboutStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/bullet/bullet.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/button/button.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/button/rw.button.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/ButtonEventListener.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/carousel-react.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/carousel-react.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/carousel.css create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/carousel.html create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/carousel.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/components.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/multicomponent.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/carousel/test.html create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/components.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/dashboard_card/dashboardCardHeader.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/dashboard_card/dashboard_card.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/dashboard_card/dashboard_card.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/filter/filter.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/gauge/gauge.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/header/header.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/header/header.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/header/headerActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/header/headerStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/input-range-slider/input-range-slider.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/input-range-slider/input-range-slider.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/listy/listy.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/loading-indicator/loading-indicator-animations.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/loading-indicator/loadingIndicator.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/mixins/ButtonEventListener.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/monitoring_params/monitoringParamComponents.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/monitoring_params/monitoringParamsCarousel.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/monitoring_params/monitoring_params.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/multicomponent/multicomponent.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/nfvi-metric-bars/nfviMetricBars.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/nfvi-metric-bars/nfviMetricBars.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/operational-status/launchpadOperationalStatus.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/radio-button/rw.radio-button.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/screen-loader/screenLoader.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/text-area/rw.text-area.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/text-input/check-box/rw.check-box.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/text-input/check-box/rw.check-box2.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/text-input/rw.text-input.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/topology/topologyL2Graph.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/topology/topologyTree.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/topology/topologyTree.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/transmit-receive/transmit-receive.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/transmit-receive/transmit-receive.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/transmit-receive/transmit-receive.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/components/uptime/uptime.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/core/alt.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/core/app.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/debug/crash.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/debug/crash.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/debug/crashActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/debug/crashSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/debug/crashStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/account_sidebar/accountSidebar.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/account_sidebar/accountSidebar.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/cloud-account.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/createActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/createSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/createStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad-create.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad-dashboard.html.orig create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpadBreadcrumbs.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpadFleetActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpadFleetSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpadFleetStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpad-card.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpadCard.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpadCardActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpadCardCloudAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpadCardMgmtInterfaces.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpadControls.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpadHeader.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpadNSInfo.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/launchpad_card.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/nsConfigPrimitives.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_card/nsrConfigPrimitives.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_cloud_account/cloudAccountActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_cloud_account/cloudAccountSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_cloud_account/cloudAccountStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_cloud_account/launchpadCloudAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_config_agent_account/configAgentAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_config_agent_account/configAgentAccount.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_config_agent_account/configAgentAccountActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_config_agent_account/configAgentAccountSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_config_agent_account/configAgentAccountStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_config_agent_account/launchpadConfigAgentAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_sdn_account/launchpadSdnAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_sdn_account/sdnAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_sdn_account/sdnAccount.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_sdn_account/sdnAccountActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_sdn_account/sdnAccountSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/launchpad_sdn_account/sdnAccountStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/monitoring-params-filter.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/catalogItems.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/catalog_items.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/launchNetworkService.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/launchNetworkService.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/launchNetworkServiceActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/launchNetworkServiceSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/launchNetworkServiceSource.js.orig create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/launchNetworkServiceStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/selectDescriptor.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/network_service_launcher/specifySLAParameters.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordCard.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordDetails.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordNavigator.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordView.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordViewActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordViewSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordViewStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/recordViewer/recordViewer.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyL2View/detailView.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyL2View/topologyL2Actions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyL2View/topologyL2Source.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyL2View/topologyL2Store.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyL2View/topologyL2View.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyL2View/topologyL2View.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyView/topologyActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyView/topologySource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyView/topologyStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyView/topologyView.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/topologyView/topologyView.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/vnfr/vnfrActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/vnfr/vnfrCard.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/vnfr/vnfrCard.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/vnfr/vnfrCardNfviMetrics.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/vnfr/vnfrSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/vnfr/vnfrStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/launchpad/vnfr/vnfrView.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/logging/loggingActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/logging/loggingSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/login/login.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/login/login.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/login/login.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/login/loginAuthActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/account_sidebar/accountSidebar.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/account_sidebar/accountSidebar.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloud-account.css create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloud-account.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloudAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloudAccount.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloudAccountActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloudAccountSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloudAccountStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/cloud_account/cloudAccountWrapper.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/management-domain.css create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/management-domain.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/management-domain.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/managementDomain.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/managementDomainActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/managementDomainSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/managementDomainStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/management_domain_card/managementDomainCard.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/management_domain_card/managementDomainCard.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/management_domain/management_domain_card/managementDomainCardHeader.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/mission-control.css create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/missionControlActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/missionControlDashboard.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/missionControlDashboard.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/missionControlSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/missionControlStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/missioncontrol.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/pool/pool.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/pool/pool.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/pool/pool.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/pool/poolActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/pool/poolSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/pool/poolStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/sdn_account/createSdnAccountActions.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/sdn_account/createSdnAccountSource.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/sdn_account/createSdnAccountStore.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/sdn_account/sdn-account.html create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/sdn_account/sdn-account.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/missioncontrol/sdn_account/sdnAccount.jsx create mode 100644 modules/ui/rw.ui/webapp/app/modules/styles/_colors.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/styles/common.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/styles/layout.scss create mode 100644 modules/ui/rw.ui/webapp/app/modules/utils/utils.js create mode 100644 modules/ui/rw.ui/webapp/app/modules/views/mission-control-dashboard.html create mode 100644 modules/ui/rw.ui/webapp/app/modules/views/screenlist.html create mode 100644 modules/ui/rw.ui/webapp/app/modules/views/uptime.html create mode 100644 modules/ui/rw.ui/webapp/package.json create mode 100644 modules/ui/rw.ui/webapp/public/assets/.DS_Store create mode 100644 modules/ui/rw.ui/webapp/public/assets/css/config-viewer.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/css/core.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/css/flex.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/css/overwriting.css create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/Roboto-Light-webfont.ttf create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/Roboto-Regular-webfont.eot create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/Roboto-Regular-webfont.svg create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/Roboto-Regular-webfont.ttf create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/Roboto-Regular-webfont.woff create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/RobotoCondensed-Bold-webfont.eot create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/RobotoCondensed-Bold-webfont.svg create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/RobotoCondensed-Bold-webfont.ttf create mode 100755 modules/ui/rw.ui/webapp/public/assets/fonts/RobotoCondensed-Bold-webfont.woff create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/accents/corners.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/accents/states.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/base/palette.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/base/type.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/base/utils.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/base/vars.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/components/config-viewer.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/components/dropdowns.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/components/fleet-card.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/components/nav-panels.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/components/progress-bars.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/controls/buttons.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/controls/slider.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/controls/splitter.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/controls/splitter.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/controls/step-control.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/controls/toggle-control.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/core.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/core.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/launchpad/launchpad-dashboard.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/launchpad/launchpad-dashboard.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/layout/app-body.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/layout/footer.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/layout/header.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/layout/layout.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/login.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/mixins/flexbox.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/create-fleet.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/launchpad.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/launchpad.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/viewport-dash.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/viewport-so.css create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/viewport-so.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/viewport-vim.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/viewport-vnf.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/screens/wag.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/slider.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/less/temp-png.less create mode 100644 modules/ui/rw.ui/webapp/public/assets/vendor/css-reset-2.0/css-reset.css create mode 100644 modules/ui/rw.ui/webapp/public/index.html create mode 100644 modules/ui/rw.ui/webapp/public/rw.js create mode 100755 modules/ui/rw.ui/webapp/scripts/build.sh create mode 100755 modules/ui/rw.ui/webapp/scripts/launch_ui.sh create mode 100755 modules/ui/rw.ui/webapp/scripts/server_rw.ui_ui.py create mode 100644 modules/ui/rw.ui/webapp/server.js create mode 100644 modules/ui/rw.ui/webapp/server/bundle.js create mode 100644 modules/ui/rw.ui/webapp/server/package.json create mode 100644 modules/ui/rw.ui/webapp/webpack.config.js create mode 100644 modules/ui/rw.ui/webapp/webpack.production.config.js create mode 100644 rift-bashrc create mode 100644 rift-prompt create mode 100755 rift-shell create mode 100644 rift_env.py create mode 100644 scripts/vm_image/base.config.sh create mode 100644 scripts/vm_image/base.rpms create mode 100644 scripts/vm_image/build.config.sh create mode 100644 scripts/vm_image/build.depends create mode 100644 scripts/vm_image/build.pip3 create mode 100644 scripts/vm_image/build.rpms create mode 100644 scripts/vm_image/extras.depends create mode 100644 scripts/vm_image/extras.rpms create mode 100644 scripts/vm_image/launchpad.depends create mode 100644 scripts/vm_image/launchpad.rpms create mode 100644 scripts/vm_image/missioncontrol.depends create mode 100644 scripts/vm_image/missioncontrol.rpms create mode 100755 scripts/vm_image/mkvmimg create mode 100644 scripts/vm_image/riftware-release.repo create mode 100644 scripts/vm_image/trafgen.depends create mode 100644 scripts/vm_image/trafgen.rpms create mode 100644 scripts/vm_image/ui-lab.config.sh create mode 100644 scripts/vm_image/ui-lab.depends create mode 100644 scripts/vm_image/ui.config.sh create mode 100644 scripts/vm_image/ui.depends create mode 100644 scripts/vm_image/ui.kilo create mode 100644 scripts/vm_image/ui.pip create mode 100644 scripts/vm_image/ui.pip3 create mode 100644 scripts/vm_image/ui.rpms create mode 100644 scripts/vm_image/vnf.config.sh create mode 100644 scripts/vm_image/vnf.depends create mode 100644 scripts/vm_image/vnf.pip3 create mode 100644 scripts/vm_image/vnf.rpms diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ce8509f --- /dev/null +++ b/.gitignore @@ -0,0 +1,28 @@ +.build +*.tgz +.install +cmake/ +etc/ +modules/automation/ +modules/core/enablement/ +modules/core/mgmt/ +modules/core/rwvx/ +modules/core/util/ +modules/ext/ +modules/yang_composite/ +rwbase/ +scripts/cloud/ +scripts/env/ +scripts/install +scripts/ldap +scripts/nagios +scripts/packaging/ +scripts/rift-scripts.sh.in +scripts/rpm/ +scripts/system +scripts/test/ +scripts/util/ +.gitmodules.deps.orig +.gitmodules.orig +modules/toolchain/ +scripts/CMakeLists.txt diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..691f254 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,45 @@ +[submodule "modules/toolchain"] + path = modules/toolchain + url = ../modules/toolchain.git +[submodule "modules/ext/lib"] + path = modules/ext/lib + url = ../modules/ext/lib.git +[submodule "modules/core/util"] + path = modules/core/util + url = ../modules/core/util.git +[submodule "modules/ext/gnome"] + path = modules/ext/gnome + url = ../modules/ext/gnome.git +[submodule "modules/ext/ipc"] + path = modules/ext/ipc + url = ../modules/ext/ipc.git +[submodule "modules/ext/util"] + path = modules/ext/util + url = ../modules/ext/util.git +[submodule "modules/ext/yang"] + path = modules/ext/yang + url = ../modules/ext/yang.git +[submodule "modules/core/mgmt"] + path = modules/core/mgmt + url = ../modules/core/mgmt.git +[submodule "modules/core/rwvx"] + path = modules/core/rwvx + url = ../modules/core/rwvx.git +[submodule "modules/ext/mgmt"] + path = modules/ext/mgmt + url = ../modules/ext/mgmt.git +[submodule "modules/automation/core"] + path = modules/automation/core + url = ../modules/automation/core.git +[submodule "modules/ext/go"] + path = modules/ext/go + url = ../modules/ext/go.git +[submodule "modules/ui/rw.ui"] + path = modules/ui/rw.ui + url = ../modules/ui/rw.ui.git +[submodule "modules/ui/composer"] + path = modules/ui/composer + url = ../modules/ui/composer.git +[submodule "modules/core/mano"] + path = modules/core/mano + url = ../modules/core/mano.git diff --git a/.gitmodules.deps b/.gitmodules.deps new file mode 100644 index 0000000..3fd5eb9 --- /dev/null +++ b/.gitmodules.deps @@ -0,0 +1,24 @@ +// specify the sub"modules that each submodule depends on +// for example +// "modules/core/util" -> "modules/ext/gnome" + +strict digraph dependencies { + // modules/core/util dependencies + "modules/core/util" -> "modules/ext/mgmt" + "modules/core/util" -> "modules/ext/yang" + "modules/core/util" -> "modules/ext/go" + "modules/core/util" -> "modules/ext/cloud" + "modules/core/util" -> "modules/automation/core" + + "modules/ext/util" -> "modules/ext/gnome" + "modules/ext/util" -> "modules/ext/ipc" + "modules/ext/yang" -> "modules/ext/util" + + "modules/core/rwvx" -> "modules/core/util" + + "modules/core/mgmt" -> "modules/core/rwvx" + + "modules/core/mano" -> "modules/core/mgmt" + "modules/core/mano" -> "modules/ui/rw.ui" + "modules/core/mano" -> "modules/ui/composer" +} diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..26e045e --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,496 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 8/29/2013 +# + +cmake_minimum_required(VERSION 2.8) + + +# this block should be at the top of every CMakleLists.txt +# this sets up project root dir and module path +get_filename_component(PROJECT_TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR} ABSOLUTE) +message("PROJECT_TOP_DIR = ${PROJECT_TOP_DIR}") +set(CMAKE_MODULE_PATH ${PROJECT_TOP_DIR}/cmake/modules) +include(rift_globals) +include(rift_build_utils) +include(rift_externalproject) +cmake_policy(SET CMP0017 NEW) + + +## +# Fetch the module dependencies +# For each submodule that is checked out, this target will determine the +# dependencies. For each dependency, this target tries to populate the cache. +# If the cache doesn't exist for the dependency, the submodule is checkedout +# to build locally. +## +configure_file( + ${PROJECT_TOP_DIR}/cmake/modules/rift_fetch_dependencies.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/rift_fetch_dependencies.cmake + ESCAPE_QUOTES @ONLY + ) + +# Custom target for fetching the dependencies +add_custom_target(fetch-dependencies + ${CMAKE_COMMAND} + -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + -DPROJECT_TOP_DIR=${PROJECT_TOP_DIR} + -P ${CMAKE_CURRENT_BINARY_DIR}/rift_fetch_dependencies.cmake + ) + +# Fetch the dependencies +# This would eliminate the "make fetch_dependencies" step during the build process +execute_process( + COMMAND + ${CMAKE_COMMAND} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + -DPROJECT_TOP_DIR=${PROJECT_TOP_DIR} + -P ${CMAKE_CURRENT_BINARY_DIR}/rift_fetch_dependencies.cmake + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + +if(result) + message("Failed to get dependencies for submodule ${submodule}") + message(FATAL_ERROR "Error: ${result}") +endif(result) + +## +# function to create a target to generate rw-composite.yang from dir of .yangs +# rift_gen_composite_yang( +# OUTPUT_FILE # composite output file path +# INPUT_YANG_DIR ) +# +# Example: rift_submodule_to_target_name(var modules/core/fpath) +# +# Result: $(var) == core_fpath +## +function(rift_submodule_to_target_name var submodule) + set(retval) + string(REPLACE "/" "_" retval ${submodule}) + string(REPLACE "modules_" "" retval ${retval}) + set(${var} "${retval}" PARENT_SCOPE) + +endfunction(rift_submodule_to_target_name) + + +function(add_submodule_targets + submodule + submodule_target + clean_targets + unittest_targets + unittest_long_targets + systemtest_targets + coverage_targets + doxygen_targets + package_targets + bcache_targets) + ## + # Wipe out the external project and build it again + ## + list(APPEND clean_targets clean_${submodule_target}) + add_custom_target(clean_${submodule_target} + rm -rf ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/ + ) + + ## + # Add custom targets for running quick unittests in each submodule + ## + list(APPEND unittest_targets rw.unittest.${submodule_target}) + add_custom_target(rw.unittest.${submodule_target} + $(MAKE) rw.unittest + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for running long unittests in each submodule + ## + list(APPEND unittest_long_targets rw.unittest_long.${submodule_target}) + add_custom_target(rw.unittest_long.${submodule_target} + $(MAKE) rw.unittest_long + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for running systemtests in each submodule + ## + list(APPEND systemtest_targets rw.systemtest.${submodule_target}) + add_custom_target(rw.systemtest.${submodule_target} + $(MAKE) rw.systemtest + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + + ## + # Add custom targets for running coverage analysis in each submodule + ## + list(APPEND coverage_targets rw.coverage.${submodule_target}) + add_custom_target(rw.coverage.${submodule_target} + $(MAKE) rw.coverage + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for generating doxygen documentation in each submodule + ## + list(APPEND doxygen_targets rw.doxygen.${submodule_target}) + add_custom_target(rw.doxygen.${submodule_target} + $(MAKE) rw.doxygen + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for generating packages in each submodule + ## + list(APPEND package_targets rw.package.${submodule_target}) + add_custom_target(rw.package.${submodule_target} + $(MAKE) rw.package + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for caching packages in each submodule + ## + list(APPEND bcache_targets rw.bcache.${submodule_target}) + add_custom_target(rw.bcache.${submodule_target} + $(MAKE) rw.bcache + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + set(clean_targets "${clean_targets}" PARENT_SCOPE) + set(unittest_targets "${unittest_targets}" PARENT_SCOPE) + set(unittest_long_targets "${unittest_long_targets}" PARENT_SCOPE) + set(systemtest_targets "${systemtest_targets}" PARENT_SCOPE) + set(coverage_targets "${coverage_targets}" PARENT_SCOPE) + set(doxygen_targets "${doxygen_targets}" PARENT_SCOPE) + set(package_targets "${package_targets}" PARENT_SCOPE) + set(bcache_targets "${bcache_targets}" PARENT_SCOPE) +endfunction() + +## +# rwbase is a special project which sets up the environment for +# the rest of Riftware to build. As such, every other project +# will depend on rwbase. As this will never change, rather than +# use .gitmodules.deps, we can just create this special project +# here and set the dependency for the rest of the projects below. +# +# This also allows us to have rwbase be directly included in this +# git tree rather than in a submodule which is preferable as +# rwbase is tiny. +## +externalproject_add( + rwbase + DOWNLOAD_COMMAND "" + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/rwbase + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/rwbase + CMAKE_ARGS + -DCMAKE_INSTALL_PREFIX="${CMAKE_INSTALL_PREFIX}" + -DNOT_DEVELOPER_BUILD="${NOT_DEVELOPER_BUILD}" + -DCOVERAGE_BUILD="${COVERAGE_BUILD}" + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCONFD_FLAVOUR=${CONFD_FLAVOUR} + INSTALL_DIR ${CMAKE_INSTALL_PREFIX} +) + +rift_externalproject_sha_check(rwbase + BINARY_DIR + ${CMAKE_CURRENT_BINARY_DIR}/rwbase/src/rwbase-build + SOURCE_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/rwbase + STAMP_DIR + ${CMAKE_CURRENT_BINARY_DIR}/rwbase/src/rwbase-stamp) + +set(clean_targets) +set(unittest_targets) +set(unittest_long_targets) +set(systemtest_targets) +set(coverage_targets) +set(doxygen_targets) +set(package_targets) +set(bcache_targets) + +add_submodule_targets(rwbase rwbase + "${clean_targets}" + "${unittest_targets}" + "${unittest_long_targets}" + "${systemtest_targets}" + "${coverage_targets}" + "${doxygen_targets}" + "${package_targets}" + "${bcache_targets}") + +set(package_targets) +set(bcache_targets) + +## +# Find the list of checked out submodules +# The user may check out one or more submodules to build +# For example user may check out modules/core/schema using +# - git submodule init modules/core/schema +# - git submodule update modules/core/schema +# - cd modules/core/schema && git checkout master +# Missing cache for submodule dependencies will also cause the submodule +# to be checked-out +## +rift_find_checkedout_submodules( + PROJECT_TOP_DIR ${PROJECT_TOP_DIR} + OUT_SUBMODULES submodules) + +## +# Add external project for building individual submodules +## +foreach(submodule ${submodules}) + rift_submodule_to_target_name(submodule_target ${submodule}) + + ## + # Add the externalproject targets for each submodule + ## + + # RIFT-3266 - Prevent docs from running too much in parallel + # due to out of memory situation with "fop". + set (submodule_build_cmd $(MAKE)) + if(submodule_target MATCHES "(.*)docs(.*)") + set (submodule_build_cmd $(MAKE) -j1) + endif() + + externalproject_add( + ${submodule_target} + DOWNLOAD_COMMAND "" + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/${submodule} + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${submodule} + CMAKE_ARGS + -DCMAKE_INSTALL_PREFIX="${CMAKE_INSTALL_PREFIX}" + -DNOT_DEVELOPER_BUILD="${NOT_DEVELOPER_BUILD}" + -DCOVERAGE_BUILD="${COVERAGE_BUILD}" + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCONFD_FLAVOUR=${CONFD_FLAVOUR} + BUILD_COMMAND ${submodule_build_cmd} + DEPENDS rwbase + ) + + # Get the submodule dependents instead of its dependencies + rift_find_submodule_deps( + PROJECT_TOP_DIR ${PROJECT_TOP_DIR} + SUBMODULE ${submodule} + OUT_DEPS dep_submodules + GET_DEPENDENTS + ) + + # Calculate all dependent submodule targets + set(dep_submodule_targets) + foreach(dep_submodule ${dep_submodules}) + rift_submodule_to_target_name(dep_submodule_target ${dep_submodule}) + list(APPEND dep_submodule_targets ${dep_submodule_target}) + endforeach(dep_submodule) + + rift_externalproject_sha_check(${submodule_target} + BINARY_DIR + ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + SOURCE_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/${submodule} + STAMP_DIR + ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-stamp + GIT_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/${submodule}/.git + DEPENDENT_EXTERNAL_TARGETS + ${dep_submodule_targets} + ) + + add_submodule_targets(${submodule} ${submodule_target} + "${clean_targets}" + "${unittest_targets}" + "${unittest_long_targets}" + "${systemtest_targets}" + "${coverage_targets}" + "${doxygen_targets}" + "${package_targets}" + "${bcache_targets}") +endforeach(submodule) + +## +# Create a list which contains all submodule targets +## +set(submodule_targets) +foreach(submodule ${submodules}) + rift_submodule_to_target_name(submodule_target ${submodule}) + list(APPEND submodule_targets ${submodule_target}) +endforeach(submodule) + +## +# Each submodule may be dependent on other submodules +# Add dependencies for each submodule target +## +foreach(submodule ${submodules}) + rift_submodule_to_target_name(submodule_target ${submodule}) + + # create a list of targets for dependent submodules + unset(dep_targets) + rift_find_submodule_deps( + PROJECT_TOP_DIR ${PROJECT_TOP_DIR} + SUBMODULE ${submodule} + OUT_DEPS deps) + if(deps) + foreach(dep ${deps}) + rift_submodule_to_target_name(dep_target ${dep}) + add_dependencies(${submodule_target} ${dep_target}) + #add_dependencies(externalproject_${submodule_target}_sha externalproject_${dep_target}_sha) + endforeach(dep) + endif(deps) +endforeach(submodule) + +## +# A target which writes solib search path configuration for gdb into a file +# +if(NOT "${submodules}" STREQUAL "") + add_custom_target(rw.gdbinit ALL + COMMAND ${PROJECT_TOP_DIR}/scripts/util/generate_gdbinit.sh ${CMAKE_INSTALL_PREFIX} + # This is an utter hack, the gdbinit ends up not included in rpm installs. For bcache rpms we + # don't mind; for real rpm installs there may well not be symbols anyway so it'll do for now + ) +endif() + +## +# Create a target which combines all installed foss.txt files into a single +# output +## +add_custom_target(rw.foss ALL + COMMAND rm -rf ${CMAKE_INSTALL_PREFIX}/foss/foss.html + COMMAND mkdir -p ${CMAKE_INSTALL_PREFIX}/foss + COMMAND ${PROJECT_TOP_DIR}/scripts/util/generate_foss.py + --foss-dir ${CMAKE_INSTALL_PREFIX}/foss + --output-file ${CMAKE_INSTALL_PREFIX}/foss/foss.html + DEPENDS ${submodule_targets} + ) + +## +# Add a top level target for forcing the clean on all submodules +## +#??this doesn't work as clean isn't a target it's a freebie from the Makefile generator: add_dependencies(clean ${clean_targets}) +#??this doesn't work as two cleans come out?? add_custom_target(clean +# DEPENDS ${clean_targets} +# ) + +## +# Add a top level target for running quick unittests +## +add_custom_target(rw.unittest_long + DEPENDS ${unittest_long_targets} + ) + +## +# Add a top level target for running unittests +## +add_custom_target(rw.unittest + DEPENDS ${unittest_targets} + ) + +## +# Add a top level target for running systemtests +## +add_custom_target(rw.systemtest + DEPENDS ${systemtest_targets} + ) + +## +# Add a top level target for running coverage +## +add_custom_target(rw.coverage + DEPENDS ${coverage_targets} + ) + +## +# Add a top level target for generating doxygen documentation +## +add_custom_target(rw.doxygen + DEPENDS ${doxygen_targets} + ) + +## +# Add a top level target for generating packages +## +add_custom_target(rw.package + DEPENDS ${package_targets} + ) + +## +# Add a top level target for creating build cache +## +add_custom_target(rw.bcache + DEPENDS ${bcache_targets} + ) + +if(NOT submodules) + message("No submodules are checked out") +endif() + +## +# Generate the dependency graph for pictorial viewing +## +add_custom_target(rw.dependency_graph + COMMAND + sed s,modules/,,g ${PROJECT_TOP_DIR}/.gitmodules.deps | dot -Tpng -odependency.png + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + +## +# Generate cscope sysmbols +## +add_custom_target(rw.cscope + cscope-indexer -r + WORKING_DIRECTORY ${PROJECT_TOP_DIR} + ) + +## +# Generate pycscope sysmbols +## +add_custom_target(rw.pycscope + ./scripts/cloud/pycscope-indexer -v + WORKING_DIRECTORY ${PROJECT_TOP_DIR} + ) + +## +# Perform compilation check on python scripts +## +add_custom_target(rw.pycheck + COMMAND python ./bin/rift-lint.py -c -t modules -verbose + WORKING_DIRECTORY ${PROJECT_TOP_DIR} + ) + +add_subdirectory(scripts) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..2b2e329 --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ + Copyright 2016 RIFT.IO Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..aec746b --- /dev/null +++ b/Makefile @@ -0,0 +1,591 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf / Anil Gunturu +# Creation Date: 11/18/2013 +# + +.DEFAULT_GOAL := rw + +## +# Set a variable for the top level directory +## + +makefile.top := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) + +TOP_SRC_PATH := $(makefile.top) +RIFT_BUILD = $(TOP_SRC_PATH)/.build +RIFT_ARTIFACTS = $(TOP_SRC_PATH)/.artifacts +RIFT_MODULE_TEST = $(TOP_SRC_PATH)/.artifacts/moduletest +RIFT_INSTALL = $(TOP_SRC_PATH)/.install +RIFT_SHELL_EXE = $(TOP_SRC_PATH)/rift-shell -e -- +# Force rift-shell to reload env +RIFT_SHELL = + +HARNESS_EXE = $(RIFT_INSTALL)/usr/rift/systemtest/harness/harness + +CONFD = FULL + +## +# Function to get build type +## +ifeq ($(CMAKE_BUILD_TYPE),) + get_build_type=Debug +else + get_build_type=$(CMAKE_BUILD_TYPE) +endif + +## +# Function to get coverage build type +## +ifeq ($(COVERAGE_BUILD),) + is_coverage=FALSE +else + is_coverage=$(COVERAGE_BUILD) +endif + +## +# Function to get developer build type +## +ifeq ($(NOT_DEVELOPER_BUILD),) + is_not_developer=FALSE +else + is_not_developer=$(NOT_DEVELOPER_BUILD) +endif + +## +# Function to get commit revision to checkout +## +ifeq ($(COMMIT),) + get_commit= +else + get_commit=$(COMMIT) +endif + +## +# Function to get whether to abort on first unit test failure +## +ifeq ($(ABORT_ON_TEST_FAILURE),) + get_abort_on_test_failure=1 +else + get_abort_on_test_failure=0 +endif + +## +# Function to get whether to used Confd basic version +# or Confd licensed version +## +ifeq ($(CONFD),) + get_confd_flavour=FULL +else + get_confd_flavour=$(CONFD) +endif + + +## +# Function to lookup submodules +## +ifeq ($(SUBMODULE),) + lookup_submodule=$(error ${newline}ERROR: SUBMODULE=XYZ missing on command line:${newline}) +else ifeq ($(findstring modules/, $(SUBMODULE)), modules/) + lookup_submodule=$(SUBMODULE) +else + lookup_submodule=$(error ${newline}ERROR: Invalid SUBMODULE=XYZ specifier on command line${newline}) +endif + +.PHONY: all +all: rw + +## +# +## +rw.list: + @echo '' + @echo '================================================================================' + @echo ' List of Make targets' + @echo '================================================================================' + @echo '' + @echo ' make rw.checkout SUBMODULE=' + @echo ' - Generic target to checkout a specific submodule' + @echo ' e.g. make rw.checkout SUBMODULE=modules/core/util' + @echo '' + @echo ' make rw.submodule SUBMODULE=' + @echo ' - Generic target to checkout & build a submodule' + @echo ' e.g. make rw.submodule SUBMODULE=modules/core/util' + @echo '' + @echo ' make rw.checkout.world COMMIT=' + @echo ' - Check out the entire tree at a particular superproject hash-state (or branch-tag)' + @echo '' + @echo '' + @echo 'Shortcuts:' + @echo ' make rw - Just want an incremental build' + @echo ' make rw.app.rwbin - Application RW.Bin (checkout & build)' + @echo ' make rw.bcache - Populate the build cache' + @echo ' make rw.checkout.world - Checkout ALL submodules (whole world)' + @echo ' make rw.checkout.stack - Checkout Openstack submodules' + @echo ' make rw.core.fpath - Core FastPath (checkout & build)' + @echo ' make rw.core.ipc - Core IPC packages (checkout & build)' + @echo ' make rw.core.mgmt - Core mgmt packages (checkout & build)' + @echo ' make rw.core.schema - Core management packages (checkout & build)' + @echo ' make rw.core.util - Core utilities (checkout & build)' + @echo ' make rw.cscope - Generate cscope symbols' + @echo ' (in top directory)' + @echo ' make rw.pycscope - Generate pycscope symbols' + @echo ' (in top directory)' + @echo ' make rw.coverage - Run coverage' + @echo ' (results in ${top}/.artifacts/coverage)' + @echo ' make rw.fix_perms - Fix root ownership in .install' + @echo ' make rw.pycheck - Run simple Python compile-check for scripts under modules/*' + @echo ' make rw.doxygen - Generate doxygen documentation' + @echo ' (in ${top}/.install/documentation dir)' + @echo ' make rw.docs - Documentation (checkout & build)' + @echo ' make rw.dependency_graph - Generate submodule dependency dot graph' + @echo ' (in ${top}/.build/dependency.png)' + @echo ' make rw.package - Generate RPM packages' + @echo ' make rw.unittest - Run the unittests' + @echo ' (results in ${top}/.artifacts/unittest)' + @echo ' make rw.unittest_long - Run long unittests' + @echo ' (results in ${top}/.artifacts/unittest)' + @echo ' make rw.automation.systemtest - Checkout modules/automation/systemtest but do not run the systemtests' + @echo ' make rw.sanity - Run a single harness smoke test (default: trafgen)' + @echo ' (takes optional TEST=[trafgen, seagull, ltesim] parameter)' + @echo ' make rw.systemtest - Run the harness smoke tests' + @echo ' make rw.systemtest_local - Run the local systemtest' + @echo ' (results in ${top}/.artifacts/systemtest)' + @echo ' make rw.rift - Checkout & build rift (no ext)' + @echo ' make rw.world - Checkout & build' + @echo ' make CONFD=BASIC - Checkout & build using Confd BASIC version' + @echo ' make CONFD=FULL - Checkout & build using Confd FULL version. This is the default option.' + @echo + @echo 'Examples w/misc. options:' + @echo ' make rw VERBOSE=1 CMAKE_BUILD_TYPE=Release' + @echo ' make rw VERBOSE=1 NOT_DEVELOPER_BUILD=TRUE CMAKE_BUILD_TYPE=Release COVERAGE_BUILD=TRUE' + @echo '' + @echo '' + @echo '' + @echo 'Image building commands:' + @echo ' NOTE: Images require root access and a fully built tree that used NOT_DEVELOPER_BUILD=TRUE' + @echo ' As we do not want to built the entire tree as root, they do not depend on rw.world, it' + @echo ' remains up to the caller to first call "make rw.world NOT_DEVELOPER_BUILD=TRUE"' + @echo ' make rw.ec2-image - Image suitable for uploading to EC2' + @echo ' make rw.kvm-image - Image suitable for uploading to OpenStack' + @echo '' + @echo 'Instructions to run the trafgen simulation (as of 04/15/2015):' + @echo ' cd top-of-your-build-dir' + @echo ' ./rift-shell' + @echo ' ./modules/automation/systemtest/fpath/demos/trafgen_111.py -c -m ethsim --configure --start-traffic' + @echo '' + @echo ' ## To see port statistics:' + @echo ' show colony trafsink port-state trafsink/5/1 counters' + @echo ' ## [Here you should see port rx/tx counters, if the test is running successfully...]' + @echo '' + @echo 'Smoke-Test Instructions:' + @echo ' Wiki: http://confluence.eng.riftio.com/display/AUT/Fpath+smoke+test' + @echo ' Example: ./modules/automation/systemtest/fpath/fp_smoke' + @echo '' + @echo '' + +## +# Make rule to display help for all targets +## + +help: + @echo '================================================================================' + @echo 'Makefile targets - the default target is "help"' + @echo '================================================================================' + @echo '' + @echo ' primer - help message to build source code for the first time' + @echo ' help - this message' + @echo ' cmake - invoke cmake for the module this directory is in' + @echo ' rw.list - list of make targets and usage' + $(RIFT_SHELL_EXE) $(MAKE) rw.list + + + +## +# Make rule to display a primer on how to easily checkout/build the software +## + +primer: + @echo '================================================================================' + @echo 'RiftWare software build primer' + @echo '================================================================================' + @echo '' + @echo 'Step #1 -- First checkout the software module that you wish to build' + @echo '--------------------------------------------------------------------------------' + @echo 'Assuming this is the "rw.core.util" submodule, then:' + @echo '' + @echo '$$ make rw.checkout SUBMODULE=rw.core.util' + @echo '' + @echo 'If you know the git submodule name, you can also specify:' + @echo '' + @echo '$$ make rw.checkout SUBMODULE=modules/core/util' + @echo '' + @echo 'Step #2 -- Now run the cmake target' + @echo '--------------------------------------------------------------------------------' + @echo 'This makes a build directory, runs cmake, and runs make on the generated files' + @echo '' + @echo '$$ make cmake' + + +## +# Clean up all generated files from previous builds. +## +clean: + rm -rf $(RIFT_ARTIFACTS) + rm -rf $(RIFT_INSTALL) + rm -rf $(RIFT_BUILD) + +rw.clean: clean + +clean.fast: + @touch $(RIFT_ARTIFACTS) + @touch $(RIFT_INSTALL) + @touch $(RIFT_BUILD) + @$(eval DELETE := $(shell mktemp -d --tmpdir=$(RIFT_ROOT) .deleteXXXXXX)) + @mv -f $(RIFT_ARTIFACTS) $(DELETE) + @mv -f $(RIFT_INSTALL) $(DELETE) + @mv -f $(RIFT_BUILD) $(DELETE) + @(rm -rf $(DELETE) &>/dev/null &) + +## +# Rule to invoke cmake +## +cmake:: BUILD_TYPE=$(call get_build_type) +cmake:: COVERAGE_TYPE=$(call is_coverage) +cmake:: NOT_DEVELOPER_TYPE=$(call is_not_developer) +cmake:: CONFD_FLAVOUR=$(call get_confd_flavour) +cmake:: + mkdir -p $(RIFT_BUILD) + mkdir -p $(RIFT_ARTIFACTS) + mkdir -p $(RIFT_MODULE_TEST) + mkdir -p $(RIFT_INSTALL) + cd $(RIFT_BUILD) && $(RIFT_SHELL_EXE) cmake ../ -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DNOT_DEVELOPER_BUILD=$(NOT_DEVELOPER_TYPE) -DCOVERAGE_BUILD=$(COVERAGE_TYPE) -DCONFD_FLAVOUR=$(CONFD_FLAVOUR) + +## +# Rule to checkout non-external components +## +rw.checkout.rift: CHANGESET=$(call get_commit) +rw.checkout.rift: + git xinit -e modules/ext/* + git xcheckout $(CHANGESET) + +## +# Rule to checkout all components +## +rw.checkout.world: CHANGESET=$(call get_commit) +rw.checkout.world: + git xinit + git xcheckout $(CHANGESET) + +## +# Rule for rw.checkout +# +# This is done with a "git submodule init" followed by a "git submodule update" +# Then checkout the master branch of the source code +## +rw.checkout:: SUBMODULE_DIR=$(call lookup_submodule) +rw.checkout:: CHANGESET=$(call get_commit) +rw.checkout:: + git xinit -s $(SUBMODULE_DIR) + git xcheckout $(CHANGESET) + +## +# Generic code to checkout submodule and make it +## +rw.submodule:: SUBMODULE_DIR=$(call lookup_submodule) +rw.submodule:: CHANGESET=$(call get_commit) +rw.submodule: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=$(SUBMODULE_DIR) COMMIT=$(CHANGESET) + $(RIFT_SHELL_EXE) $(MAKE) rw + +## +# Shortcut checkout/make rules for various modules +# +# These commands are shortcuts to checkout and build the specified submodule +## +rw.app.rwbin: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/app/rwbin + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.util: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/util + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.fpath: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/fpath + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/automation/systemtest + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.docs: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/docs + $(RIFT_SHELL_EXE) $(MAKE) rw + +## +# SAMPLE target for making a tar file of the exportable-docs +# as of this moment, no documents are exportable, so this is just a placeholder +## +rw.export_docs: rw.docs + tar -c -h -C $(RIFT_INSTALL)/documentation -f $(RIFT_INSTALL)/documents.tar riftio/pdf/riftio_distributed_fpath.pdf config + +rw.core.mgmt: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/mgmt + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.ipc: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/ipc + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.rwvx: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/rwvx + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.automation.systemtest: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/automation/systemtest + $(RIFT_SHELL_EXE) $(MAKE) rw + +core_fpath: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + +core_mgmt: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + +core_util: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + +core_rwvx: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + + +# +# Rule to checkout and build rift +# +rw.world: rw.checkout.world + $(RIFT_SHELL_EXE) $(MAKE) rw + + +# +# Rule to checkout and build rift without external packages +# +rw.rift: rw.checkout.rift + $(RIFT_SHELL_EXE) $(MAKE) rw + +# +# Rule to run the systemtest smoke test via the harness +# +rw.systemtest: + $(RIFT_SHELL_EXE) $(HARNESS_EXE) run -i smoke_stable --serial --stdout + + +# +# Get the harness test name from the TEST= make argument (default is trafgen) +# This will convert between a simple test name (trafgen, ltesim, seagull) +# into the corresponding harness test name (passed to harness via --name parameter) +# These test names are found in the respective .racfg test configuration files +# +ifeq ($(TEST),) + get_sanity_test=^TC_TRAFGEN111_0100$$ +else ifeq ($(TEST), trafgen) + get_sanity_test=^TC_TRAFGEN111_0100$$ +else ifeq ($(TEST), ltesim) + get_sanity_test=^TC_LTESIMCOMBINED_0101$$ +else ifeq ($(TEST), seagull) + get_sanity_test=^TC_SEAGULL_0001$$ +endif + +# +# Rule to run a single test via the harness +# +rw.sanity:: HARNESS_TEST=$(call get_sanity_test) +rw.sanity:: + $(RIFT_SHELL_EXE) $(HARNESS_EXE) run --no-user --serial --stdout --name $(HARNESS_TEST) + +## +# Rule to invoke systemtest locally +## +rw.systemtest_local: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) RIFT_NO_SUDO_REAPER=1 rw.systemtest + + +# +# Rule to fix the permissions in the .install directory after running a demo as root +# +rw.fix_perms: + $(RIFT_SHELL_EXE) $(HARNESS_EXE) run --serial --stdout --name FIX_INSTALL_PERMISSIONS_9001 + +## +# Rule to create the combined foss.html from all foss.txt files in +# installed submodules. +## +rw.foss: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.foss + + +## +# This target runs the cmake step. +# The cmake is invoked under the following two conditions: +# - code was checked out and the cmake step was never invoked +# - cmake step was invoked once, however a new submodule +# was checked out since then +## +rw.cmake: + if [[ ! -f .build/Makefile ]] ; then \ + $(RIFT_SHELL_EXE) $(MAKE) cmake ; \ + else \ + grep "path = " .gitmodules | awk '{print $$3}' | \ + while read submodule; do \ + if [[ -f $$submodule/CMakeLists.txt ]] ; then \ + if [[ ! -d .build/$$submodule ]] ; then \ + cd $(RIFT_BUILD) && cmake ../ ; \ + break; \ + fi; \ + fi; \ + done; \ + fi; \ + env RIFT_ROOT=$(TOP_SRC_PATH) python3 rift_env.py + +## +# rule to download and install the non-OSM source code +# +modules/core/util/Makefile: + wget http://repo.riftio.com/releases/open.riftio.com/4.1.1/ext_4_1_1.tgz + tar xzf ext_4_1_1.tgz + +rw.ext: modules/core/util/Makefile + + +# Rule to invoke the incremental build +# This should be invoked after a target that already invoked "make cmake" +# For example after the "make rw.core.util" is invoked first, one can just +# invoke "make rw" +# NOTE: This will not rebuild the external projects in submodules +## +rw: rw.ext rw.cmake + git rev-parse --abbrev-ref HEAD >$(RIFT_INSTALL)/.git_status + git rev-parse HEAD >>$(RIFT_INSTALL)/.git_status + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) + +## +# Rule to invoke the incremental build +# This should be invoked after a target that already invoked "make cmake" +# For example after the "make rw.core.util" is invoked first, one can just +# invoke "make rw-dammit" +# NOTE: This will rebuild the external projects in submodules. +## +rw.dammit: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.dammit + +## +# Rule to invoke the clean build +# This should be invoked after a target that already invoked "make cmake" +# For example after the "make rw.core.util" is invoked first, one can just +# invoke "make rw.clean_and_rebuid" +# NOTE: This will remove the current install directory and submodule +# build directories and build everything from scratch +## +rw.clean_and_rebuild: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.clean_and_rebuild + +## +# Rule generate doxygen documentation +## +rw.doxygen: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.doxygen + +## +# Rule to generate dependency graph +## +rw.dependency_graph: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.dependency_graph + +## +# Rule to invoke unittest +## +rw.unittest: ABORT_ON_TEST_FAILURE=$(call get_abort_on_test_failure) +rw.unittest: rw.cmake + @if [ "$(shell ulimit -c)" == "0" ]; then \ + ulimit -S -c unlimited; \ + fi && \ + ABORT_ON_TEST_FAILURE=$(ABORT_ON_TEST_FAILURE) $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.unittest + +## +# Rule to invoke unittest +## +rw.unittest_long: ABORT_ON_TEST_FAILURE=$(call get_abort_on_test_failure) +rw.unittest_long: rw.cmake + @if [ "$(shell ulimit -c)" == "0" ]; then \ + ulimit -S -c unlimited; \ + fi && \ + ABORT_ON_TEST_FAILURE=$(ABORT_ON_TEST_FAILURE) $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.unittest_long +## +# Rule to invoke python checks +## +rw.pycheck: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.pycheck + +## +# Rule to invoke coverage target +## +rw.coverage: ABORT_ON_TEST_FAILURE=$(call get_abort_on_test_failure) +rw.coverage: export COVERAGE_BUILD = TRUE +rw.coverage: rw.cmake + @if [ "$(shell ulimit -c)" == "0" ]; then \ + ulimit -S -c unlimited; \ + fi && \ + ABORT_ON_TEST_FAILURE=$(ABORT_ON_TEST_FAILURE) $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.coverage + +## +# Rule to generate cscope symbols +## +rw.cscope: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.cscope + +## +# Rule to generate pycscope symbols +## +rw.pycscope: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.pycscope + +## +# Rule to generate ctags +## +rw.ctags: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.ctags + +## +# Rule for rw.package +rw.package: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.package + +## +# Rule for generating build cache +## +rw.bcache: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.bcache + +## +# Rule for generating EC2 images +## +rw.ec2-image: cmake + @if [ "$(NOT_DEVELOPER_BUILD)" != "TRUE" ]; then \ + echo; \ + echo "ERROR: Images must be built with NOT_DEVELOPER_BUILD=TRUE"; \ + exit 1; \ + fi + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) ec2-image + +## +# Rule for generating KVM images +## +rw.kvm-image: cmake + @if [ "$(NOT_DEVELOPER_BUILD)" != "TRUE" ]; then \ + echo; \ + echo "ERROR: Images must be built with NOT_DEVELOPER_BUILD=TRUE"; \ + exit 1; \ + fi + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) kvm-image +# +rw.rpmbuild: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rpmbuild + + diff --git a/Makefile.env b/Makefile.env new file mode 100644 index 0000000..9642724 --- /dev/null +++ b/Makefile.env @@ -0,0 +1,50 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 05/22/2014 +# + +## +# This makefile sets up environment variables +## + +## +# Function to find the top of the RiftWare distribution tree +## + +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) +makefile.top := $(call find_upward, "Makefile.top") + +## +# Set a variable for the top level directory +## + +top.src.path := $(abspath $(dir $(makefile.top))) +top.build.path := $(abspath $(top.src.path)/.build) +top.install.path := $(abspath $(top.src.path)/.install) + +## +# Set the LD_LIBRARY_PATH to include the local install paths +## +export LD_LIBRARY_PATH := $(top.install.path)/usr/local/lib:$(top.install.path)/usr/local/pyang-1.4.1/lib:$(top.install.path)/usr/lib:$(top.install.path)/usr/lib64:$(top.install.path)/usr/lib/rift/plugins + +## +# Set the PKG_CONFIG_PATH to include the local install paths +## +export PKG_CONFIG_PATH := $(top.install.path)/usr/lib/pkgconfig:$(top.install.path)/usr/lib64/pkgconfig:$(top.install.path)/usr/share/pkgconfig + +## +# Set the PATH to include the local install paths +## +export PATH := $(top.install.path)/usr/local/bin:$(top.install.path)/usr/local/pyang-1.4.1/bin:$(top.install.path)/usr/bin:$(top.install.path)/usr/sbin:${PATH} + +## +# Set the GI_TYPELIB_PATH to include the local install paths +## +export GI_TYPELIB_PATH := $(top.install.path)/usr/lib/girepository-1.0:$(top.install.path)/usr/lib/rift/girepository-1.0 + +## +# Needed find the gir files +## +export XDG_DATA_DIRS := $(top.install.path)/usr/share:$(XDG_DATA_DIRS) diff --git a/Makefile.top b/Makefile.top new file mode 100644 index 0000000..c28cc10 --- /dev/null +++ b/Makefile.top @@ -0,0 +1,175 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/18/2013 +# + +## +# Set a variable for the top level directory +## + +top.src.path := $(abspath $(dir $(makefile.top))) +top.build.path := $(abspath $(top.src.path)/.build) +top.install.path := $(abspath $(top.src.path)/.install) + +## +# From the top level directory and the current directory, determine the module directory +## + +module.src.path := $(abspath $(dir $(call find_upward, "manifest"))) +module.src.subdir := $(subst $(top.src.path),,$(module.src.path)) +ifeq ($(wildcard $(top.build.path)/$(module.src.subdir)/src/*-build),) + module.build.path := $(abspath $(top.build.path)/$(module.src.subdir)) +else + module.build.path = $(abspath $(wildcard $(top.build.path)/$(module.src.subdir)/src/*-build)/) +endif + +## +# From the module directory, determine the final build directory +## + +here.src.path := $(abspath $(PWD)) +here.src.subdir := $(subst $(module.src.path),,$(here.src.path)) +here.build.path := $(abspath $(module.build.path)/$(here.src.subdir)) + +makefile.env := $(call find_upward, "Makefile.env") +include $(makefile.env) + +## +# Define a variable for newline +## + +define newline + + +endef + +## +# Default rule is to invoke the "compile" target +## + +all:: compile + +## +# Rule to clean from a particular directory +## + +clean:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to compile from a particular directory +## + +compile:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to install from a particular directory +## + +install:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to create a symbolic link to the build directory for a particular directory +## + +link:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + ln -s $(here.build.path) .build +endif + + +## +# This is a VERY temporary rule to get rid of things in .install that OVERRIDE the local things +# The correct fix for this is in the CMakefiles so we do not need to remove these in the first place +## + +localize: + rm -f $(top.src.path)/.install/usr/include/*rwsched* + rm -f $(top.src.path)/.install/usr/lib/*rwsched* + +## +# Rule to invoke ctest from a particular directory +## + +test:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && ctest --verbose +endif + +unittest:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to invoke cmake +## + +cmake:: + echo $(top.build.path) + rm -rf $(top.build.path) + mkdir $(top.build.path) + cd $(top.build.path) && cmake .. + cd $(top.build.path) && $(MAKE) + + +## +# Generic code to run rwmain +## + +run_rwmain:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +run_ravcs_mtest:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Make rule to display help for all targets +## + +help:: + @echo '======================================================================' + @echo 'Makefile targets - the default target is "compile"' + @echo '======================================================================' + @echo '' + @echo ' compile - compile for this directory (default target)' + @echo '' + @echo ' clean - invoke clean in the current sub-directory' + @echo ' cmake - invoke cmake for the module this directory is in' + @echo ' link - create symbolic links to the cmake .build directory for the current sub-directory' + @echo ' test - invoke the cmake "test" target in the current sub-directory' + @echo ' run_rwain - invoke the cmake "run_rwmain" target in the current sub-directory' + @echo ' run_ravcs_mtest - invoke the cmake "run_ravcs_mtest" target in the current sub-directory' + + + diff --git a/RELEASE b/RELEASE new file mode 100644 index 0000000..cb1d6ed --- /dev/null +++ b/RELEASE @@ -0,0 +1 @@ +4.1.1.0 diff --git a/RIFTWARE_COMPILATION_LICENSE b/RIFTWARE_COMPILATION_LICENSE new file mode 100644 index 0000000..5ceb211 --- /dev/null +++ b/RIFTWARE_COMPILATION_LICENSE @@ -0,0 +1,244 @@ +License +------- +RIFT.ware is a compilation of software packages, each distributed under +its own license. The compilation itself is released under the Apache 2.0 +license (See copy below OR http://www.apache.org/licenses/LICENSE-2.0). +However, the RIFT.ware compilation license does not supersede the +licenses of code and content contained in RIFT.ware. + + +Source Availability +------------------- +A complete machine-readable copy of the source code corresponding to portions +of the accompanying RIFT.ware release is available upon request. This offer +is valid to anyone in receipt of this information and shall +expire three years following the date of the final distribution of this +release by RIFT.IO, Inc. + +To obtain such source code, send a check or money order in the amount of +US $20.00 to: +Vice President, Intellectual Property +RIFT.IO, Inc. +77 South Bedford Street Suite 450 +Burlington, MA 01803 USA + +Please write "source for RIFT.ware $VERSION" +(replacing $VERSION for the version of RIFT.ware you want the source for) +in the memo line of your payment. + +You may also access a copy of this source code at: +https://open.riftio.com/download + + +Export Regulations +------------------ +By downloading or installing RIFT.ware software, you acknowledge that you +understand all of the following: RIFT.ware software and technical information +may be subject to the U.S. Export Administration Regulations (the EAR) and +other U.S. and foreign laws and may not be exported, re-exported or +transferred (a) to any country listed in Country Group E:1 in Supplement +No. 1 to part 740 of the EAR (currently, Cuba, Iran, North Korea, Sudan +& Syria); (b) to any prohibited destination or to any end user who has been +prohibited from participating in U.S. export transactions by any federal +agency of the U.S. government; or (c) for use in connection with the design, +development or production of nuclear, chemical or biological weapons, or +rocket systems, space launch vehicles, or sounding rockets, or unmanned +air vehicle systems. You may not download RIFT.ware software or technical +information if you are located in one of these countries or otherwise subject +to these restrictions. You may not provide RIFT.ware software or technical +information to individuals or entities located in one of these countries or +otherwise subject to these restrictions. You are also responsible for +compliance with foreign law requirements applicable to the import, export +and use of RIFT.ware software and technical information. + + + + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. diff --git a/bin/build_ladder.sh b/bin/build_ladder.sh new file mode 100755 index 0000000..9aa3a02 --- /dev/null +++ b/bin/build_ladder.sh @@ -0,0 +1,228 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# /bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/03 +# +# The build ladder gets the topological sorted list of submodules and builds +# the submodules one at a time using a custom build cache location. +# +# REQUIRES ALL SUBMODULES IN UNINITIALIZED STATE +# +# 1. Create an empty directory to use as the build cache location. +# 3. Generate list of sorted submodules (using dependency_sort.sh). +# 4. For each submodule in the sorted list: +# 1. Build only that submodule (make rw.submodule SUBMODULE= BUILDCACHE_DIR=) +# 2. If the submodule build fails, stop. +# 3. Make submodule package. +# 4. Make submodule build cache. +# 5. Deinitialize submodule. + +# These steps should verify that all submodule dependencies are correct and the +# artifact packaging is complete. If any submodule fails, then the required +# dependencies are somehow incomplete or incorrect. + +set -o nounset + +THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# Set some vars if not defined by env variables. Used for testing. +DEPENDENCY_SORT_BIN=${DEPENDENCY_SORT_BIN:-"$THIS_DIR/dependency_sort.py"} +MAKE_CMD=${MAKE_CMD:-"make VERBOSE=1"} + +function verify_cwd_at_root(){ + if [ ! -f ".gitmodules.deps" ]; then + echo "ERROR: This script should be run at the top-level" + exit 1 + fi +} + +function verify_submodules_uninitialized(){ + while read line; do + if [[ $line != -* ]]; then + echo "ERROR: Found a initialized submodule: $line" + exit 1 + fi + done < <(git submodule) +} + +function verify_single_submodule_initialized(){ + submodule=$1 + git submodule | while read line; do + if [[ $line != -* ]]; then + if [[ $line != *$submodule* ]]; then + echo "ERROR: Found a initialized submodule: $line" + exit 1 + fi + fi + done < <(git submodule) +} + +# Capture stdout to store cache directory. +function generate_build_cache_dir(){ + local dir=$(mktemp -d -t "build_ladder_XXX") || exit 1 + + echo $dir +} + +function get_sorted_submodule_list(){ + local sorted_submodules=$($DEPENDENCY_SORT_BIN) + if [ $? -ne 0 ]; then + echo "ERROR: Could not get list of sorted submodules." + exit 1 + fi + + echo "$sorted_submodules" +} + +# Log the command and run it. +function log_and_run_cmd(){ + echo "INFO: Running command: $@" + $@ +} + +## +# Build the submodule using the top-level rw.submodule target. + +# Arguments: +# $1 - submodule +# $2 - build cache location +## +function submodule_build(){ + local submodule="$1" + local build_cache="$2" + + # Build only this submodule + build_cmd="$MAKE_CMD rw.submodule SUBMODULE=$submodule BUILDCACHE_DIR=$build_cache" + + log_and_run_cmd $build_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Building submodule '$submodule' failed. (command: $build_cmd)" + exit 1 + fi + + verify_single_submodule_initialized "$submodule" || exit 1 +} + + +## +# Package the submodule using the top-level rw.package target. +# +# Arguments: +# $1 - submodule +## +function submodule_package(){ + local submodule="$1" + + # SUBMODULE argument is NOT necessary but doesn't hurt to include it. + local package_cmd="$MAKE_CMD rw.package SUBMODULE=$submodule" + + log_and_run_cmd $package_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Packaging submodule '$submodule' failed. (command: $package_cmd)" + exit 1 + fi +} + +## +# Create the build cache using the packaged submodule artifacts +# +# Arguments: +# $1 - submodule +# $2 - build cache location +## +function submodule_bcache(){ + local submodule="$1" + local build_cache="$2" + + # SUBMODULE argument is NOT necessary. + local bcache_cmd="$MAKE_CMD rw.bcache SUBMODULE=$submodule BUILDCACHE_DIR=$build_cache" + + BCACHE_IGNORE_FAILED_SUBMODULE_TESTS=1 log_and_run_cmd $bcache_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Bcaching submodule '$submodule' failed. (command: $bcache_cmd)" + exit 1 + fi +} + +## +# Deinitialize the submodule so submodule build past this point will not have +# access to the submodule's sources but only the pre-built artifacts. +# +# Arguments: +# $1 - submodule +## +function submodule_deinit(){ + local submodule="$1" + + local deinit_cmd="git submodule deinit $submodule" + + $deinit_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Deinitializing submodule failed. (command: $deinit_cmd)" + exit 1 + fi +} + +## +# Make clean to clear out everything previously generated in the workspace. +# This ensures only artifacts from build cache are retrieved. +## +function make_clean(){ + [ -d .build ] && rm -rf .build + [ -d .install ] && rm -rf .install +} + +## +# +## +function make_unittests(){ + make rw.unittest ABORT_ON_TEST_FAILURE=0 VERBOSE=1 +} + +verify_cwd_at_root +verify_submodules_uninitialized || exit 1 + +build_cache=$(generate_build_cache_dir) +# Set up a trap to automatically clean up build cache directory on exit or catchable signal +trap "[ -d $build_cache ] && rm -rf $build_cache" EXIT SIGINT SIGTERM + +echo "INFO: Created new build cache ($build_cache)" + +sorted_submodules=$(get_sorted_submodule_list) +echo "INFO: Got list of sorted submodules ($sorted_submodules)" + +# Convert the string into an array using the default IFS of ' ' +read -a sorted_submodules_array <<< "$sorted_submodules" + +for submodule in "${sorted_submodules_array[@]}"; do + if [ $submodule == "modules/ext/lib" ]; then + continue + fi + + submodule_build "$submodule" "$build_cache" || (submodule_deinit "$submodule"; exit 1) + make_unittests || echo "Unit tests in submodule $submodule failed, continuing." + submodule_package "$submodule" || (submodule_deinit "$submodule"; exit 1) + submodule_bcache $submodule "$build_cache" || (submodule_deinit "$submodule"; exit 1) + submodule_deinit "$submodule" || exit 1 + + make_clean +done + +echo "INFO: Build ladder was successful!" +exit 0 \ No newline at end of file diff --git a/bin/catchsegv.sh b/bin/catchsegv.sh new file mode 100755 index 0000000..e82bc82 --- /dev/null +++ b/bin/catchsegv.sh @@ -0,0 +1,151 @@ +#! /bin/sh +# Copyright (C) 1998,1999,2001,2003,2004,2006,2007,2008,2009 +# Free Software Foundation, Inc. +# This file is part of the GNU C Library. +# Contributed by Ulrich Drepper , 1998. + +# The GNU C Library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. + +# The GNU C Library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. + +# You should have received a copy of the GNU Lesser General Public +# License along with the GNU C Library; if not, write to the Free +# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +# 02111-1307 USA. + +if test $# -eq 0; then + echo "$0: missing program name" >&2 + echo "Try \`$0 --help' for more information." >&2 + exit 1 +fi + +prog="$1" +shift + +if test $# -eq 0; then + case "$prog" in + --h | --he | --hel | --help) + echo 'Usage: catchsegv PROGRAM ARGS...' + echo ' --help print this help, then exit' + echo ' --version print version number, then exit' + echo "For bug reporting instructions, please see:" + echo "." + exit 0 + ;; + --v | --ve | --ver | --vers | --versi | --versio | --version) + echo 'catchsegv (Ubuntu EGLIBC 2.11.1-0ubuntu7.8) 2.11.1' + echo 'Copyright (C) 2009 Free Software Foundation, Inc. +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +Written by Ulrich Drepper.' + exit 0 + ;; + *) + ;; + esac +fi + +segv_output=`mktemp ${TMPDIR:-/tmp}/segv_output.XXXXXX` || exit + +# Redirect stderr to avoid termination message from shell. +(exec 3>&2 2>/dev/null +LD_PRELOAD=${LD_PRELOAD:+${LD_PRELOAD}:}/\$LIB/libSegFault.so \ +SEGFAULT_USE_ALTSTACK=1 \ +SEGFAULT_OUTPUT_NAME=$segv_output \ +"$prog" ${1+"$@"} 2>&3 3>&-) +exval=$? + +# Check for output. Even if the program terminated correctly it might +# be that a minor process (clone) failed. Therefore we do not check the +# exit code. +if test -s "$segv_output"; then + # The program caught a signal. The output is in the file with the + # name we have in SEGFAULT_OUTPUT_NAME. In the output the names of + # functions in shared objects are available, but names in the static + # part of the program are not. We use addr2line to get this information. + case $prog in + */*) ;; + *) + old_IFS=$IFS + IFS=: + for p in $PATH; do + test -n "$p" || p=. + if test -f "$p/$prog"; then + prog=$p/$prog + break + fi + done + IFS=$old_IFS + ;; + esac + sed '/Backtrace/q' "$segv_output" + sed '1,/Backtrace/d' "$segv_output" | sed '/Memory map:/q' | + (while read line; do + if echo -n "$line" | grep -Fq "$prog"; then + exe=`echo -n "$line" | sed 's/([^(]\{1,\})//' | sed 's/\[0x[[:xdigit:]]\{1,\}\]//'` + exe=`readlink --canonicalize-existing "$exe" 2>/dev/null` + if test $? -eq 0; then + if test -f "$exe" -a -e "$exe"; then + addr=`echo -n "$line" | sed 's/.*\[\(0x[[:xdigit:]]\{1,\}\)\]$/\1/'` + addr2line=`addr2line --exe="$exe" --functions --demangle $addr 2>/dev/null` + if test $? -eq 0; then + if echo -n "$addr2line" | grep -Eq '^\?\?:0'; then + : + else + func=`echo "$addr2line" | head --lines=1` + fileline=`echo "$addr2line" | tail --lines=1` + file=`echo -n "$fileline" | sed 's/:[[:digit:]]\{1,\}$//'` + if test -f "$file"; then + line="$fileline: $func" + fi + fi + fi + fi + fi + else + exe=`echo -n "$line" | sed 's/([^(]\{1,\})//' | sed 's/\[0x[[:xdigit:]]\{1,\}\]//'` + exe=`readlink --canonicalize-existing "$exe" 2>/dev/null` + if test $? -eq 0; then + if test -f "$exe" -a -e "$exe"; then + addr=`echo -n "$line" | sed 's/.*\[\(0x[[:xdigit:]]\{1,\}\)\]$/\1/'` + addr=`printf '%d' $addr 2>/dev/null` + if test $? -eq 0; then + mmap=`grep -F "$exe" "$segv_output" | grep -E ' 0+ '` + if test $? -eq 0; then + baseaddr=`echo -n "$mmap" | grep -Eo '^[[:xdigit:]]+'` + baseaddr=`printf '%d' 0x$baseaddr 2>/dev/null` + if test $? -eq 0; then + addr=`expr $addr - $baseaddr` + addr=`printf '%x' $addr` + addr2line=`addr2line --exe="$exe" --functions --demangle $addr 2>/dev/null` + if test $? -eq 0; then + if echo -n "$addr2line" | grep -Eq '^\?\?:0'; then + : + else + func=`echo "$addr2line" | head --lines=1` + fileline=`echo "$addr2line" | tail --lines=1` + file=`echo -n "$fileline" | sed 's/:[[:digit:]]\{1,\}$//'` + if test -f "$file"; then + line="$fileline: $func" + fi + fi + fi + fi + fi + fi + fi + fi + fi + echo "$line" + done) + sed '1,/Memory map:/d' "$segv_output" +fi +rm -f "$segv_output" + +exit $exval diff --git a/bin/dependency_parser.py b/bin/dependency_parser.py new file mode 100755 index 0000000..ad1cf74 --- /dev/null +++ b/bin/dependency_parser.py @@ -0,0 +1,219 @@ +#!/usr/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# This program parses the depndency digraph to get the +# list of dependencies recursively +# +# This is used by cmake to get the submodule dependencies +# CMAKE has terrible support for recirsive functions, +# and hence this is written in python + +import sys +import re +import os +from hashlib import sha1 +import argparse +import shlex +import subprocess +import StringIO + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) + +def get_supermodule_hash(): + """ Call the generate_supermodule_hash.sh script in the current directory. + This script prints out a hash that represents all files in the superproject + that could affect the output of what a submodule produces. + """ + return 1 + supermodule_hash_cmd = os.path.join(THIS_DIR, "generate_supermodule_hash.sh") + submodule_hash = subprocess.check_output(supermodule_hash_cmd, shell=True) + + return submodule_hash + +def get_dependencies(dep_file, submodule): + """Parse the dependency file and determine all the dependencies + recursively. + """ + deps = [] + # Save the file position so we can restore before returning + dep_file.seek(0) + + for line in dep_file.readlines(): + line = line.strip() + regex = r'"' + re.escape(submodule) + '"[ ]*->[ ]*"(.*)"' + m = re.match(regex, line) + if not m: + continue + + if m.group(1) not in deps : + deps.append(m.group(1)) + temp = get_dependencies(dep_file, m.group(1)) + for i in temp : + if i not in deps: + deps.append(i) + + # Restore the file position to the same spot before this function was called + return deps + +def reverse_dependency_file(dep_file): + """ Create a in-memory reversed dependency file + + For all lines that match the "sub_a" -> "sub_b" regex, + reverse the dependency such that "sub_b" -> "sub_a". This allows + us to reverse the dependency search to get dependents. + """ + + reversed_file = StringIO.StringIO() + for line in dep_file: + line = line.strip() + if line.startswith("//"): + continue + + regex = r'"(.*)"[ ]*->[ ]*"(.*)"' + m = re.match(regex, line) + if m is None: + reversed_file.write(line + "\n") + continue + + reversed_line = '"{}" -> "{}"\n'.format(m.group(2), m.group(1)) + reversed_file.write(reversed_line) + + reversed_file.flush() + + reversed_file.seek(0) + + return reversed_file + + +def submodule_hash(filename, supermodule_hash, submodule, outdir): + """This function calculates the submodule hash to be + be used when caching the aritifacts. The submodule hash + depends on the hash of the submodule, as well as on the + hashes of all the submodules that the current submodule + depends on. For example if a submodule B depends on A, + the hash for submodule B, should include the hashes + of both submodules A and B. In otherwords, if submodule + A changes, the submodule B needs recompilation eventhough + the contents of submodule B didn't change. + + Since the supermodule also depends on cmake/build files in the supermodule, + use a supermodule hash when calculating the submodule hash. This ensures that + the submodule caches are flushed when a file that could affect its output is changed. + """ + deps = get_dependencies(filename, submodule) + out_fname = outdir + "/" + submodule.replace("/", "_").replace("modules_", "") + ".hash" + out_f = open(out_fname, "w") + + mylist = deps + mylist.append(submodule) + for i in mylist: + cmd = "git submodule status " + i + args = shlex.split(cmd) + p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + out_f.write(out.split()[0].lstrip(" +-") + " " + out.split()[1] + "\n") + + out_f.close() + filesize_bytes = os.path.getsize(out_f.name) + s = sha1() + s.update(("blob %u\0" % filesize_bytes).encode('utf-8')) + + with open(out_fname, 'rb') as f: + s.update(f.read()) + + # Finally, input the supermodule hash into the hash function + s.update(supermodule_hash) + + return s.hexdigest() + +def get_output_dir(): + if os.path.exists(".build"): + return ".build" + + return "/tmp" + + +def main(): + ## + # Command line argument specification + ## + desc= """This script helps in getting the submodule dependency information""" + parser = argparse.ArgumentParser(description=desc) + + parser.add_argument('-o', '--output-dir', + default=get_output_dir(), + help='Directory for the output files (default: %(default)s)' + ) + + parser.add_argument('-f', '--dependency-file', + type=argparse.FileType('r'), + required=True, + default='.gitmodules.deps', + help='Name of the file with dependencies in DAG format (default: %(default)s)' + ) + + parser.add_argument('-s', '--submodule', + default='modules/core/util', + help='Name of the submodule (default: %(default)s' + ) + + parser.add_argument('-d', '--print-dependency', + action='store_true', + help='Print the dependency information for the submodule' + ) + + parser.add_argument('--print-dependents', + action='store_true', + help='Print the dependent information for the submodule' + ) + + parser.add_argument('-x', '--print-hash', + dest='print_hash', + action='store_true', + help='Print the combined hash for the submodule and its dependencies' + ) + + cmdargs = parser.parse_args() + + supermodule_hash = get_supermodule_hash() + + if cmdargs.print_dependency: + deps = get_dependencies(cmdargs.dependency_file, cmdargs.submodule) + # output as a list for cmake + for i in deps: + sys.stdout.write(i+";") + + elif cmdargs.print_dependents: + # In order to get the dependents (instead of dependencies), reverse + # the dependency file (a -> b becomes b -> a) and reuse the + # get_dependencies() function. + reverse_dep_file = reverse_dependency_file(cmdargs.dependency_file) + deps = get_dependencies(reverse_dep_file, cmdargs.submodule) + # output as a list for cmake + for i in deps: + sys.stdout.write(i+";") + + if cmdargs.print_hash: + h=submodule_hash(cmdargs.dependency_file, supermodule_hash, cmdargs.submodule, cmdargs.output_dir) + sys.stdout.write(h) + + +if __name__ == "__main__": + main() diff --git a/bin/dependency_sort.py b/bin/dependency_sort.py new file mode 100755 index 0000000..c8bddb3 --- /dev/null +++ b/bin/dependency_sort.py @@ -0,0 +1,105 @@ +#!/usr/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# +# +# This program takes as arguments a list of submodules and a submodule +# dependency file then prints the submodules in topological order. + +from os.path import join, dirname, relpath, realpath +from dependency_parser import get_dependencies +import argparse +import re + +SCRIPT_DIR=dirname(realpath(__file__)) +ROOT_DIR=join(SCRIPT_DIR, "..") +GITMODULES_DEPS_FILEPATH=relpath(join(ROOT_DIR, ".gitmodules.deps")) +GITMODULES_FILEPATH=join(ROOT_DIR, ".gitmodules") + + +def get_all_submodules(): + submodules = [] + with open(GITMODULES_FILEPATH) as gitmodules_hdl: + for line in gitmodules_hdl: + match = re.match('\[submodule "(?P.*)"\]', line) + if not match: + continue + + submodule = match.group("submodule") + submodules.append(submodule) + + return submodules + +def generate_submodules_depends(submodules, dependency_file): + submodule_depends = [] + for submodule in submodules: + depends = get_dependencies(dependency_file, submodule) + entry = (submodule, depends) + submodule_depends.append(entry) + + return submodule_depends + + +def sort_submodules(submodule_depends): + sorted_depends = [] + unsorted_depends = dict(submodule_depends) + + while unsorted_depends: + acyclic = False + for node, edges in unsorted_depends.items(): + for edge in edges: + if edge in unsorted_depends: + break + else: + acyclic = True + del unsorted_depends[node] + sorted_depends.append((node, edges)) + + assert acyclic + + return [submodule[0] for submodule in sorted_depends] + + +def main(): + ## + # Command line argument specification + ## + desc= """Submodule Topological Sort. Submodules are written to stdout seperated by spaces.""" + parser = argparse.ArgumentParser(description=desc) + + # User can provide an alternate dependency file. Default is the .gitmodules.dep at the root of the repo. + parser.add_argument('-f', '--dependency-file', dest='dependency_file', + type=argparse.FileType('r'), default=GITMODULES_DEPS_FILEPATH, + help='Name of the file with dependencies in DAG format (default: %(default)s)') + + # User can provide a list of submodules. Default is all of the submodules. + parser.add_argument('-s', '--submodule_list', dest='submodule_list', default=get_all_submodules(), + choices=get_all_submodules(), type=str, nargs='+', + help='Names of the submodule to sort (default: %(default)s') + + parsed_args = parser.parse_args() + + submodule_depends = generate_submodules_depends(parsed_args.submodule_list, parsed_args.dependency_file) + sorted_submodules = sort_submodules(submodule_depends) + + # print the sorted list of submodules seperated by spaces. + print " ".join(sorted_submodules) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/bin/dev.sh b/bin/dev.sh new file mode 100755 index 0000000..9817f8b --- /dev/null +++ b/bin/dev.sh @@ -0,0 +1,165 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# +# + +root=$(realpath $(dirname $0)/../) +log=${root}/.install/.undev +verbose=false + +undev() { + local found_dirs=false + local links=$(find ${root}/.install/ -type l) + local f + + for f in ${links}; do + fp=$(readlink ${f}) + if [[ ${fp} = *.build* ]]; then + if [ -d ${fp} ]; then + echo "ERROR: ${f} points to a directory" + found_dirs=true + fi + fi + + if [[ ${fp} = *@* || ${f} = *@* ]]; then + echo "Cannot deal with @ in paths" + exit 1 + fi + done + + if ${found_dirs}; then + echo "Cannot proceed with linked directories" + exit 1 + fi + + if [ -s ${log} ]; then + echo "Previous undev found, appending" + echo > ${log} + fi + + for f in ${links}; do + fp=$(readlink ${f}) + if [[ ${fp} = *.build* ]]; then + rm -f ${f} + + pushd $(dirname ${f}) >/dev/null + cp -p ${fp} ${f} + popd >/dev/null + + echo "${f}@${fp}" >> ${log} + if ${verbose}; then + echo "Replacing ${f} with ${fp}" + fi + fi + done + + echo 'Converted all symlinks to real files' +} + +redev() { + local line + local src + local dest + + if [ ! -s "${log}" ]; then + echo "Cannot redev something that was not undeved" + exit 1 + fi + + for line in $(<${log}); do + dest=${line%@*} + src=${line#*@} + + rm -f ${dest} + ln -s ${src} ${dest} + if ${verbose}; then + echo "Linking ${src} at ${dest}" + fi + done + + rm -f ${log} +} + +_chksum() { + echo $(md5sum ${1} | cut -d' ' -f1) +} + +verify() { + local line + local src + local dest + + if [ ! -s "${log}" ]; then + echo "Nothing to verify" + exit 1 + fi + + for line in $(<${log}); do + dest=${line%@*} + src=${line#*@} + + pushd $(dirname ${dest}) >/dev/null + if [ "$(_chksum ${dest})" != "$(_chksum ${src})" ]; then + echo "${dest} does not match ${src}" + fi + popd > /dev/null + done +} + +usage() { + echo "$(basename $0) ARGUMENTS" + echo + echo "ARGUMENTS:" + echo " -r,--redev re-dev the install tree" + echo " -u,--undev un-dev the install tree" + echo " -V,--verify verify an un-deved tree" + echo " -v,--verbose verbose logging" + echo " -h,--help this screen" +} + +action= + +while [ $# -gt 0 ]; do + case $1 in + -r|--redev) + action='redev' + ;; + -u|--undev) + action='undev' + ;; + -V|--verify) + action='verify' + ;; + -v|--verbose) + verbose=true; + ;; + -h|--help) + usage + exit 0 + ;; + esac + shift +done + +if [ -z "${action}" ]; then + echo "No action specified" + exit 1 +fi + +${action} \ No newline at end of file diff --git a/bin/extract_rpm.sh b/bin/extract_rpm.sh new file mode 100755 index 0000000..7cc3cda --- /dev/null +++ b/bin/extract_rpm.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# +# + +# this script extracts the rpm files +# Usage: +# extract_rpm.sh + +function extract_rpm() +{ + rpm --dbpath $rootdir/usr/lib/rpm --relocate /=$rootdir --nodeps -ivh $rpmfile +} + +function strip_rpaths() +{ + rpm -qlp $rpmfile | while read line; do + # If the RPM command failed (no files or whatever) then get out. + if [ ${PIPESTATUS[0]} -ne 0 ]; then + return + fi + + # dest_file will replace the / with (install path) + dest_file=$rootdir/${line#"/"} + if [ ! -e "$dest_file" ]; then + echo "****Cannot find dest file: $dest_file" + continue + fi + + # Skip installed directories + if [ ! -f "$dest_file" ] ; then + continue + fi + + # Get the list of rpaths for this particular file. + # If the command fails, then move on to the next file + file_rpath=$(chrpath --list "$dest_file" 2>/dev/null) + if [ $? -ne 0 ]; then + continue + fi + + # Extract the beginning of chrpath --list command + # that has no value + rpath_string=${file_rpath##*RPATH=} + + # If there is an empty RPATH then move on to next file + if [ "$rpath_string" == "" ]; then + continue + fi + + # Split the RPATH into the array of directories + IFS=':' read -a rpath_array <<< "$rpath_string" + + for index in "${!rpath_array[@]}" + do + rpath_element=${rpath_array[index]} + # If this rpath element contains a .install then readjust it to match + # the current install path. + if [[ $rpath_element == *".install"* ]]; then + unset "rpath_array[index]" + # This logic below is to replace the .install path with the workspace's + # .install path. Unfortunately becuase of the chrpath limitation of the + # replacement rpath being <= source rpath, it is difficult to pull off. + # + #cache_install_path=${rpath_element%\.install*}".install" + #new_rpath_element=${rpath_element//$cache_install_path/$rootdir} + #rpath_array[index]=$new_rpath_element + fi + + # If the rpath entry contains a .build directory then strip it out completely + if [[ $rpath_element == *".build"* ]]; then + unset "rpath_array[index]" + fi + done + + # Reassemble the rpath string + new_rpath_string=$(IFS=$':'; echo "${rpath_array[*]}") + + # Replace the existing rpath with our newly contructed one. + chrpath --replace "$new_rpath_string" $dest_file + if [ $? -ne 0 ]; then + echo "ERROR: ***Failed to replace rpath in $dest_file****" + echo "****************************************************" + echo "Please see RIFT-3498. If the chrpath fails due to path length issues" + echo "A solution is to increase Jenkins/CI build path length" + echo "or shorten your workspace path length." + exit 1 + fi + + done +} + +# Make an RPM database in this directory +rootdir=$1 +# rm -rf $rootdir +mkdir -p $rootdir +rpm --initdb --dbpath $rootdir/usr/lib/rpm + +# Set which rpm file to work on +rpmfile=$2 + +# Extract the RPM file +extract_rpm + +strip_rpaths \ No newline at end of file diff --git a/bin/generate_descriptor_pkg.sh b/bin/generate_descriptor_pkg.sh new file mode 100755 index 0000000..1cbdbdc --- /dev/null +++ b/bin/generate_descriptor_pkg.sh @@ -0,0 +1,38 @@ +#! /usr/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# Author(s): Anil Gunturu +# Creation Date: 2015/10/09 +# +# This shell script is used to create a descriptor package +# The main functions of this script include: +# - Generate checksums.txt file +# - Generate a tar.gz file + +# Usage: generate_descriptor_pkg.sh + +basedir=${1%/} +dir=${2%/} +cd ${basedir}/${dir} +rm -rf checksums.txt +find * -type f | + while read file; do + md5sum $file >> checksums.txt + done +cd .. +tar -zcvf ${dir}.tar.gz ${dir} --remove-files \ No newline at end of file diff --git a/bin/generate_protopy.sh b/bin/generate_protopy.sh new file mode 100755 index 0000000..08eec24 --- /dev/null +++ b/bin/generate_protopy.sh @@ -0,0 +1,38 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# /bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/03 +# +# Generate all .py implementations of .proto files using the +# protoc compiler. Temporary solution until integrating +# into build process is solved. This must be run after +# a make. + +RIFT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +RIFT_ROOT="${RIFT_ROOT%/bin}" + +INSTALL_DIR="$RIFT_ROOT/.install" +PROTO_DIR="$INSTALL_DIR/usr/data/proto" +PROTOC=$INSTALL_DIR/usr/bin/protoc + +cd $PROTO_DIR + +find . -name "*.proto" | while read line; do + $PROTOC --python_out=. $line +done \ No newline at end of file diff --git a/bin/generate_supermodule_hash.sh b/bin/generate_supermodule_hash.sh new file mode 100755 index 0000000..aa79839 --- /dev/null +++ b/bin/generate_supermodule_hash.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# Author(s): Austin Cormier +# Creation Date: 2015/01/26 +# +# This script generates a supermodule hash which is used as part of +# the submodule hash. This ensures that when any of the following +# files/folders change in the supermodule all of the submodule +# build caches are flushed. + +cd "$(dirname ${BASH_SOURCE[0]})" + +read -r -d '\n' CACHE_FILE_FIND_LIST <&2 + exit 1 +fi + +rm -f /root/.pip/pip.conf +$PIP3 install --use-wheel --no-index --find-links=https://wheel.riftio.com/mirrors/python3_wheelhouse "$@" + + diff --git a/bin/pip3-kilo-install b/bin/pip3-kilo-install new file mode 100755 index 0000000..c03266a --- /dev/null +++ b/bin/pip3-kilo-install @@ -0,0 +1,19 @@ +#!/usr/bin/bash + + +progs="/bin/pip3 /bin/python3-pip" +for prog in $progs; do + if [ -f $prog ]; then + PIP3=$prog + break + fi +done + +if [ -z "$PIP3" ]; then + echo "FATAL ERROR cannot locate pip3 installer -- tried $progs" >&2 + exit 1 +fi + +$PIP3 install --use-wheel --no-index --trusted-host wheel.riftio.com --find-links=http://wheel.riftio.com/mirrors/python3_kilo_wheelhouse "$@" + + diff --git a/bin/rift-lint.py b/bin/rift-lint.py new file mode 100755 index 0000000..17ede2e --- /dev/null +++ b/bin/rift-lint.py @@ -0,0 +1,429 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Joshua Downer +# Creation Date: 2014/07/17 + +import argparse +import compileall +import concurrent.futures +import contextlib +import cStringIO +import functools +import os +import re +import shlex +import subprocess +import sys + +class CommandError(Exception): + pass + + +@contextlib.contextmanager +def redirect_stdout(buf): + """A context manager that switches stdout for a buffer""" + tmp, sys.stdout = sys.stdout, buf + yield sys.stdout + sys.stdout = tmp + + +@contextlib.contextmanager +def pushd(path): + """A context manager that acts like pushd one enter and popd on exit + + Using this context manager will change the current working directory to the + specified path. On exit, the context manager will change back to the + original directory. + + """ + cwd = os.getcwd() + os.chdir(path) + yield + os.chdir(cwd) + + +def command(cmd): + """Executes a command in a separate process and returned the output + + The command is executed on a process and the output from the command is + returned as a list of strings. Note that empty strings are not returned. + + """ + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + output, error = process.communicate() + if process.returncode != 0: + raise CommandError(error) + + return [s for s in output.split('\n') if s] + + +def top_level(): + """Returns the path of the top level directory of the repository.""" + git_dir = command("git rev-parse --git-dir")[0] + git_dir = re.match('.*(?=\.git/?)', git_dir).group().rstrip('/') + return git_dir if git_dir else '.' + + +def list_submodules(): + """Returns a list of the submodules in the current repository.""" + return command("git submodule -q foreach 'echo $path'") + + +def list_remote(): + """Returns a list of files that differ from the remote master.""" + return command('git diff remotes/origin/master --name-only') + + +def list_untracked(): + """Returns a list of untracked files.""" + return [f[3:] for f in command('git st --porcelain') if f.startswith('?? ')] + + +def list_added(): + """Returns a list of added files.""" + def added(path): + try: + return 'A' in path[:2] + except: + return False + + return [f[3:] for f in command('git st --porcelain') if added(f)] + + +def list_modified(): + """Returns a list of modified files.""" + def modified(path): + try: + return 'M' in path[:2] + except: + return False + + return [f[3:] for f in command('git st --porcelain') if modified(f)] + + +def list_range(commits): + """Returns a list of files changed over the specified range of commits""" + try: + return command('git diff {commits} --name-only'.format(commits=commits)) + except CommandError: + pass + return [] + + +def list_submodule(func, path): + """Applies a function from within a submodule and returns the result.""" + with pushd(path): + return [os.path.join(path, f) for f in func()] + + +class Repository(object): + def __init__(self, root): + """Create an object to represent the repository + + :root: the root of the repository + + """ + self._root = os.path.abspath(root) + with pushd(self.root): + self._submodules = list_submodules() + + @property + def root(self): + """The path to the root of the repository""" + return self._root + + @property + def submodules(self): + """A list of submodules in the repository""" + return self._submodules + + def foreach_submodule(self, func): + """Applies a function to each of the submodules in the repository. + + :func: a function that returns a list of file paths + + The result of the provided function is required to be a list of paths to + files within each of the submodules. The paths are relative to the root + of the repository. + + """ + with concurrent.futures.ProcessPoolExecutor() as executor: + paths = [os.path.join(self.root, s) for s in self.submodules] + results = executor.map(func, paths) + results = [u for v in results for u in v] + return [os.path.relpath(f, self.root) for f in results] + + def forall(self, func): + """Applies a function to the submodules and the top-level repo. + + :func: a function that returns a list of file paths + + The result of the provided function is required to be a list of paths to + files within each of the submodules or the top-level repo. The paths are + relative to the root of the repository. + + """ + files = [] + with pushd(self.root): + files = [f for f in func() if os.path.isfile(f)] + + list_submodule_func = functools.partial(list_submodule, func) + files.extend(self.foreach_submodule(list_submodule_func)) + + return sorted(files) + + def remote(self): + """Returns a list of files that differ from the remote/origin/master.""" + return self.forall(list_remote) + + def modified(self): + """Returns a list of files have been modified in the repo and submodules""" + return self.forall(list_modified) + + def untracked(self): + """Returns a list of all the untracked files in the repo and submodules""" + return self.forall(list_untracked) + + def range(self, commits): + """Returns a list of files modified over the specified range""" + return self.forall(functools.partial(list_range, commits)) + + +class Lint(object): + command = "pylint -E --rcfile={rcfile}" + + def __init__(self, exclude=None, rcfile=None): + """ + Create a lint object. + + :exclude: a list of regular expressions used to exclude files + :rcfile: a path to a pylintrc file + + """ + self._exclude = exclude + self._command = Lint.command.format(rcfile=rcfile) + + def should_exclude(self, path): + """Returns TRUE if this specified path should be excluded + + :path: the path to test + + """ + if not path.endswith('.py'): + return True + + if not os.path.isfile(path): + return True + + try: + for rule in self._exclude: + if rule.match(path) is not None: + return True + except: + pass + + return False + + def evaluate(self, path): + """Applies pylint to the specified file. + + Pylint will only be applied to python scripts that have a '.py' suffix + and that do not match any excluded paths. + + :path: the path of the file that is to be evaluated + + """ + if self.should_exclude(path): + return [] + + results = command('{cmd} {path}'.format(cmd=self._command, path=path)) + return [line for line in results if not line.startswith("***")] + + +def compile_target(target): + """Generate bytecode for the specified target + + The :target: is a python script that get compiled into byte code. + + Returns a tuple (result, details), where result is a string that with one of + the values: SKIPPED, SUCCESS, or FAILURE. The details provide information + about any failure that has occurred. + + If there is already bytecode in the same directory as the :target:, the + :target: is not compile and returns a result of 'SKIPPED'. If there is a + syntax error in the script, 'FAILURE' is returned with the details of the + compilation error. Otherwise, 'SUCCESS' is returned. + + """ + result = 'SUCCESS' + details = [] + + # The output to stdout is redirected to a buffer so that it can + # be optionally reported in the case of a failure. + with redirect_stdout(cStringIO.StringIO()): + bytecode = target + 'c' + if os.path.isfile(bytecode): + return target, 'SKIPPED', details + + if compileall.compile_file(target, quiet=True): + os.remove(bytecode) + else: + result = 'FAILURE' + sys.stdout.seek(0) + details = [line.rstrip() for line in sys.stdout.readlines() if line] + + return target, result, details + + # If there are any error messages, write them to stdout at this + # time and then exit. Or, in verbose mode, write out a + # success/failure mode for each file. + if verbose: + print('{result} {file}'.format( + result='FAILURE' if failure else 'SUCCESS', file=target)) + else: + for line in failure: + print(line.rstrip()) + + return target, result, details + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--compile', + action='store_true', + help="compile the python scripts to detect syntax errors") + parser.add_argument('-e', '--exclude', + help="a list of rules for excluding file paths") + parser.add_argument('-l', '--list', + action='store_true', + help="list the files to be processed") + parser.add_argument('-m', '--modified', + action='store_true', + help="list files that have been modified") + parser.add_argument('-r', '--remote', + action='store_true', + help="list files that differ from the remote") + parser.add_argument('-t', '--target', + default=None, + type=str, + help="a directory to search (recursively) for python files") + parser.add_argument('--range', + help="list files that have changed over a specified range \ + of commits (as understood by git)") + parser.add_argument('-u', '--untracked', + action='store_true', + help="list files that are untracked") + parser.add_argument('--rcfile', + default=os.path.join(top_level(), 'etc/pylintrc'), + help="specifies the path to a pylintrc file to use") + parser.add_argument('-v', '--verbose', + action='store_true', + help="print out additional diagnostic information") + parser.add_argument('files', + nargs=argparse.REMAINDER, + default=[], + help="a list of additional files to process") + + args = parser.parse_args(argv) + + # If pylint is required, check that it is available + if not args.compile: + try: + command('command -v pylint') + except CommandError: + print('Unable to find pylint on the PATH') + exit(1) + except Exception as e: + print(str(e)) + exit(2) + + repo = Repository(top_level()) + + # Construct the lint object using any rules provided by the caller + exclude = args.exclude.split(":") if args.exclude else [] + lint = Lint(rcfile=args.rcfile, exclude=[re.compile(e) for e in exclude]) + + # Construct a list of the required files + files = args.files + if args.modified: + files.extend(repo.modified()) + if args.untracked: + files.extend(repo.untracked()) + if args.remote: + files.extend(repo.remote()) + if args.range: + files.extend(repo.range(args.range)) + + # If a target directory has been specified, recursively search for python + # files + if args.target is not None: + if not os.path.isdir(args.target): + print("The specified target directory does not exist!") + exit(1) + + for root, _, names in os.walk(args.target): + files.extend(os.path.join(root, n) for n in names if n.endswith('.py')) + + # Simply print out the paths of all of the files + if args.list: + for f in files: + print(f) + + # Compile each of the specified files to determine if there are any syntax + # errors. + elif args.compile: + files = [f for f in files if f.endswith('.py')] + with pushd(repo.root): + with concurrent.futures.ProcessPoolExecutor() as executor: + futures = [executor.submit(compile_target, f) for f in files] + concurrent.futures.wait(futures) + + results = [f.result() for f in futures] + + if args.verbose: + failed = False + for target, result, details in results: + failed = (failed or result == 'FAILURE') + print('PYCHECK {result} {target}'.format(result=result, target=target)) + for line in details: + print(line) + + if failed: + exit(1) + + else: + failures = [target for target, result, _ in results if result == 'FAILURE'] + for target in failures: + print('PYCHECK FAILURE {target}'.format(target=target)) + + if failures: + exit(1) + + # Apply pylint to each of the files and report the result + else: + with pushd(repo.root): + for f in files: + for line in lint.evaluate(f): + print(line) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/bin/submodule_has_failed_tests.sh b/bin/submodule_has_failed_tests.sh new file mode 100755 index 0000000..3a7500a --- /dev/null +++ b/bin/submodule_has_failed_tests.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/05 +# +# Script to be invoked by CMake to determine whether a submodule has failed tests. +# Currently, test_wrapper.sh will create a _FAILED file for each unit test +# that lives within a submodule tree. We can seach this tree for any *_FAILED file +# to determine if any test failed. +# +# submodule_has_failed_test.sh +# +# Arguments: +# - Submodule path relative to the root of the repository. +# +# Returns 0 if failed tests were found and 1 otherwise. + +rift_root="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +rift_root="${rift_root%/bin}" + +unittest_dir="$RIFT_UNIT_TEST" + +if [ ! $# -eq 1 ]; then + echo "ERROR: A submodule prefix argument is expected." + exit 1 +fi + +submodule_prefix="$1" + +if [ ! -d "$rift_root/$submodule_prefix" ]; then + echo "ERROR: Submodule doesn't exist?: $rift_root/$submodule_prefix" + exit 1 +fi + +submodule_unittest_dir="$unittest_dir/$submodule_prefix" +if [ ! -d "$submodule_unittest_dir" ]; then + echo "WARNING: Submodule unittest output directory doesn't exist: $submodule_unittest_dir" + exit 1 +fi + +found_files=$(find "$submodule_unittest_dir" -name "*_FAILED" | wc -l) +if [ $found_files == "0" ]; then + echo "INFO: Did not find any failed unittests in: $submodule_unittest_dir" + exit 1 +fi + +# There a certain cases (build_ladder) where we still want to cache the submodule even +# if there are failures. This was the easiest place to inject that logic. +if [ $BCACHE_IGNORE_FAILED_SUBMODULE_TESTS -eq 1 ]; then + echo 'WARNING: $BCACHE_IGNORE_FAILED_SUBMODULE_TESTS env var set, caching submodule regardless of failed unit tests.' + exit 1 +fi + +echo "INFO: Found $found_files failed unit tests in: $submodule_unittest_dir" +exit 0 \ No newline at end of file diff --git a/bin/uninitialize_cached_submodules.sh b/bin/uninitialize_cached_submodules.sh new file mode 100755 index 0000000..cdb4ee0 --- /dev/null +++ b/bin/uninitialize_cached_submodules.sh @@ -0,0 +1,131 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# /bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/03 +# +# This script is meant to assist Jenkins in performing incremental +# builds by deinitializing submodules if the build cache exists for the +# submodule hash. +# +# uninitialize_cached_submodules.sh +# +# Arguments: +# - Debug, Debug_Coverage, Release +# + +set -o nounset +set -u + +THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +DEPENDENCY_PARSER_CMD="$THIS_DIR/dependency_parser.py" + +# Set some vars if not defined by env variables. Used for testing. +GET_SUBMODULES_CMD="$THIS_DIR/dependency_sort.py" + +function verify_cwd_at_root(){ + if [ ! -f ".gitmodules.deps" ]; then + echo "ERROR: This script should be run at the top-level" + exit 1 + fi +} + +## +# Calculate and return submodule hash. Capture stdout to get hash. +# $1 - submodule to calculate hash for +## +function get_submodule_hash(){ + local submodule="$1" + + set -x + local parser_cmd="$DEPENDENCY_PARSER_CMD --dependency-file=.gitmodules.deps "--submodule=$submodule" --print-hash" + local hash=$($parser_cmd) + set +x + if [ $? -ne 0 ]; then + echo "ERROR: Command failed to retrieve submodule hash (command: $parser_cmd)" + exit 1 + fi + + echo $hash +} + +## +# Gets list of submodules available in workspace. Capture stdout to get list. +## +function get_submodules(){ + local sorted_submodules=$($GET_SUBMODULES_CMD) + if [ $? -ne 0 ]; then + echo "ERROR: Could not get list of submodules." + exit 1 + fi + + echo "$sorted_submodules" +} + +## +# Builds the full build cache path. Capture stdout to get the path. +# $1 - Submodule +# $2 - Submodule Hash +# $3 - Build Type +## +function get_full_cache_path(){ + local submodule="$1" + local hash="$2" + local build_type="$3" + + local cache_path="$RIFT_BUILD_CACHE_DIR/$build_type/$submodule/$hash" + + echo $cache_path +} + +if [ $# -ne 1 ]; then + echo "ERROR: Expecting a single build_type argument" + exit 1 +fi + +if [[ "$1" != "Debug_FILES" &&"$1" != "Debug" && "$1" != "Debug_Coverage" && $1 != "Release" ]]; then + echo "ERROR: Build type should be in the set (Debug, Debug_Coverage, Release)." + exit 1 +fi + +build_type=$1 + +submodules="$(get_submodules)" +# Convert the string into an array using the default IFS of ' ' +read -a submodules_array <<< "$submodules" + +for submodule in "${submodules_array[@]}"; do + if [ ! -e "$submodule" ]; then + echo "WARNING: Could not find $submodule path." + continue + fi + + hash="$(get_submodule_hash $submodule)" + echo "INFO: Calculated submodule hash for $submodule: $hash" + + full_cache_path="$(get_full_cache_path $submodule $hash $build_type)" + echo "INFO: Checking if submodule cache path exists for $submodule: $full_cache_path" + if [ -e "$full_cache_path" ]; then + echo "INFO: Build cache exists for submodule $submodule. Deinitializing." + git submodule deinit $submodule + if [ $? -ne 0 ]; then + echo "ERROR: Could not deinitialize submodule: $submodule" + continue + fi + fi +done \ No newline at end of file diff --git a/modules/core/mano/.cpack-workaround b/modules/core/mano/.cpack-workaround new file mode 100644 index 0000000..e69de29 diff --git a/modules/core/mano/CMakeLists.txt b/modules/core/mano/CMakeLists.txt new file mode 100644 index 0000000..7c6b207 --- /dev/null +++ b/modules/core/mano/CMakeLists.txt @@ -0,0 +1,72 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 03/26/2014 +# + +## +# DEPENDENCY ALERT +# The submodule dependencies must be specified in the +# .gitmodules.dep file at the top level (supermodule) directory +# If this submodule depends other submodules remember to update +# the .gitmodules.dep +## + +cmake_minimum_required(VERSION 2.8) + +## +# Set the path to the top level cmake modules directory +## +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/modules") + +## +# DO NOT add any code before this and DO NOT +# include this file anywhere else +## +include(rift_submodule) +include(rift_python) + +## +# Submodule specific includes will go here, +# These are specified here, since these variables are accessed +# from multiple sub directories. If the variable is subdirectory +# specific it must be declared in the subdirectory. +## + +## +# Include the subdirs +## +set(subdirs + models + common + rwmc + rwlaunchpad + confd_client + rwcm + ) + +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +## +# This macro adds targets for documentaion, unittests, code coverage and packaging +## +rift_add_submodule_targets(SUBMODULE_PACKAGE_NAME "rw.core.mc") + +# Workaround whatever mess rw.package is doing as it can't seem +# to figure out that it should make a directory -before- making +# symlinks.. +set(dir usr/lib64/python${RIFT_PYTHON3}/site-packages/gi/overrides) +install(FILES + ${CMAKE_CURRENT_SOURCE_DIR}/.cpack-workaround + DESTINATION ${dir}) + +if (RIFT_SUPPORT_PYTHON2) + set(dir usr/lib64/python${RIFT_PYTHON2}/site-packages/gi/overrides) + + install(FILES + ${CMAKE_CURRENT_SOURCE_DIR}/.cpack-workaround + DESTINATION ${dir}) +endif() + + diff --git a/modules/core/mano/Makefile b/modules/core/mano/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/README b/modules/core/mano/README new file mode 100644 index 0000000..ed6a799 --- /dev/null +++ b/modules/core/mano/README @@ -0,0 +1,9 @@ +This sumodule contains the MANO subsystem from RIFT.ware. The following +section describes the directory structure of the MANO subsystem: + +common: contains code shared by mission-control and launchpad +examples: contains a ping/pong NS example +models: contains YANG based information models +rwlaunchpad: contains software for RIFT.ware launchpad +rwmc: contains software RIFT.ware mission control +rwcm: conatins software for RIFT.ware configuration manager diff --git a/modules/core/mano/common/CMakeLists.txt b/modules/core/mano/common/CMakeLists.txt new file mode 100644 index 0000000..2cb4f42 --- /dev/null +++ b/modules/core/mano/common/CMakeLists.txt @@ -0,0 +1,30 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME common) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + python + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +install( + FILES + rw_gen_package.py + DESTINATION usr/rift/mano/common + COMPONENT ${PKG_LONG_NAME}) diff --git a/modules/core/mano/common/plugins/CMakeLists.txt b/modules/core/mano/common/plugins/CMakeLists.txt new file mode 100644 index 0000000..95fb6af --- /dev/null +++ b/modules/core/mano/common/plugins/CMakeLists.txt @@ -0,0 +1,19 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + rwcntmgrtasklet + yang + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/CMakeLists.txt b/modules/core/mano/common/plugins/rwcntmgrtasklet/CMakeLists.txt new file mode 100644 index 0000000..18eb9c1 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwcntmgrtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/Makefile b/modules/core/mano/common/plugins/rwcntmgrtasklet/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py new file mode 100644 index 0000000..f7b0ab3 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwcntmgrtasklet import ContainerManager diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py new file mode 100755 index 0000000..3a8b437 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py @@ -0,0 +1,331 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import os +import shlex +import subprocess +import time +import uuid + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + RwDts as rwdts, + RwcalYang, +) + +import rift.rwcal.cloudsim.lvm as lvm +import rift.rwcal.cloudsim.lxc as lxc +import rift.tasklets +import rw_peas + + +class SaltConnectionTimeoutError(Exception): + pass + + +class ContainerManager(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(ContainerManager, self).__init__(*args, **kwargs) + self.lvm = None + self.resources = None + self.dts_api = None + + def start(self): + super(ContainerManager, self).start() + self.log.info("Starting ContainerManager") + self.log.setLevel(logging.DEBUG) + ResourceProvisioning.log_hdl = self.log_hdl + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS( + self.tasklet_info, + RwcalYang.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + def stop(self): + super(ContainerManager, self).stop() + self.resources.destroy() + self.lvm.destroy() + + @asyncio.coroutine + def init(self): + # Create the LVM backing store with the 'rift' volume group + self.lvm = LvmProvisioning() + self.resources = ResourceProvisioning(self.loop, self.log) + + # Create lvm partition + yield from self.loop.run_in_executor( + None, + self.resources.destroy, + ) + + if "REUSE_LXC" not in os.environ: + # Create lvm partition + yield from self.loop.run_in_executor( + None, + self.lvm.destroy, + ) + + # Create lvm partition + yield from self.loop.run_in_executor( + None, + self.lvm.create, + ) + + # Create an initial set of VMs + yield from self.loop.run_in_executor( + None, + self.resources.create, + ) + + yield from self.loop.run_in_executor( + None, + self.resources.wait_ready, + ) + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) + + +class LvmProvisioning(object): + """ + This class represents LVM provisioning. + """ + + def create(self): + """Creates an LVM backing store""" + lvm.create('rift') + + def destroy(self): + """Destroys the existing LVM backing store""" + lvm.destroy('rift') + + +class ResourceProvisioning(object): + """ + This is a placeholder class that is used to represent the provisioning of + container resources. + """ + + cal_interface = None + log_hdl = None + + def __init__(self, loop, log): + # Initialize the CAL interface if it has not already been initialized + if ResourceProvisioning.cal_interface is None: + plugin = rw_peas.PeasPlugin('rwcal_cloudsimproxy', 'RwCal-1.0') + engine, info, extension = plugin() + + ResourceProvisioning.cal_interface = plugin.get_interface("Cloud") + ResourceProvisioning.cal_interface.init(ResourceProvisioning.log_hdl) + + self.account = RwcalYang.CloudAccount() + self.account.account_type = "cloudsim_proxy" + self.account.cloudsim_proxy.host = "192.168.122.1" + + self.log = log + self.loop = loop + self.nvms = 1 + + self._vms = [] + + @property + def cal(self): + return ResourceProvisioning.cal_interface + + def create(self): + """Create all of the necessary resources""" + + rift_root = os.environ['RIFT_ROOT'] + image = self.create_image("%s/images/rift-root-latest.qcow2" % (rift_root)) + + # Create a VM + for index in range(self.nvms): + self._vms.append(self.create_vm(image, index)) + + # Start the VMs + for vm in self._vms: + self.cal.start_vm(self.account, vm.vm_id) + + def destroy(self): + """Destroy all of the provided resources""" + + for container in lxc.containers(): + lxc.stop(container) + + for container in lxc.containers(): + if not ("REUSE_LXC" in os.environ and container == "rwm0"): + lxc.destroy(container) + + def create_image(self, location): + """Creates and returns a CAL image""" + + image = RwcalYang.ImageInfoItem() + image.name = "rift-lxc-image" + image.location = location + image.disk_format = "qcow2" + rc, image.id = self.cal.create_image(self.account, image) + return image + + def create_network(self, network_name, subnet): + """Creates and returns a CAL network""" + + network = RwcalYang.NetworkInfoItem( + network_name=network_name, + subnet=subnet, + ) + rc, network.network_id = self.cal.create_network(self.account, network) + return network + + def create_vm(self, image, index): + """Returns a VM + + Arguments: + image - the image used to create the VM + index - an index used to label the VM + + Returns: + A VM object + + """ + vm = RwcalYang.VMInfoItem() + vm.vm_name = 'rift-s{}'.format(index + 1) + vm.image_id = image.id + vm.user_tags.node_id = str(uuid.uuid4()) + + user_data_template_str = open( + os.path.join( + os.environ['RIFT_INSTALL'], + 'etc/userdata-template', + ) + ).read() + + # Get the interface ip address of the mgmt network + # This is where the salt master is accessible on + mgmt_interface_ip = "192.168.122.1" + + # Create salt-stack userdata + vm.cloud_init.userdata = user_data_template_str.format( + master_ip=mgmt_interface_ip, + lxcname=vm.user_tags.node_id, + ) + + rc, vm.vm_id = self.cal.create_vm(self.account, vm) + + return vm + + def wait_vm_salt_connection(self, vm, timeout_secs=600): + """ Wait for vm salt minion to reach up state with master """ + + vm_node_id = vm.user_tags.node_id + start_time = time.time() + self.log.debug("Waiting up to %s seconds for node id %s", + timeout_secs, vm_node_id) + while (time.time() - start_time) < timeout_secs: + try: + stdout = subprocess.check_output( + shlex.split('salt %s test.ping' % vm_node_id), + universal_newlines=True, + ) + except subprocess.CalledProcessError: + continue + + up_minions = stdout.splitlines() + for line in up_minions: + if "True" in line: + return + + raise SaltConnectionTimeoutError( + "Salt id %s did not enter UP state in %s seconds" % ( + vm_node_id, timeout_secs + ) + ) + + def wait_ready(self): + """ Wait for all resources to become ready """ + + self.log.info("Waiting for all VM's to make a salt minion connection") + for i, vm in enumerate(self._vms): + self.wait_vm_salt_connection(vm) + self.log.debug( + "Node id %s came up (%s/%s)", + vm.user_tags.node_id, i + 1, len(self._vms) + ) + + def create_port(self, network, vm, index): + """Returns a port + + Arguments: + network - a network object + vm - a VM object + index - an index to label the port + + Returns: + Returns a port object + + """ + port = RwcalYang.PortInfoItem() + port.port_name = "eth1" + port.network_id = network.network_id + port.vm_id = vm.vm_id + + rc, port.port_id = self.cal.create_port(self.account, port) + return port \ No newline at end of file diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py b/modules/core/mano/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py new file mode 100755 index 0000000..8d517ea --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwcntmgrtasklet + +class Tasklet(rift.tasklets.rwcntmgrtasklet.ContainerManager): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/common/plugins/yang/CMakeLists.txt b/modules/core/mano/common/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..8e29677 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 2015/11/20 +# + +## +# Yang targets +## +rift_add_yang_target( + TARGET rwcloud_yang + YANG_FILES rw-cloud.yang rw-sdn.yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwsdn_yang_gen +) + +rift_add_yang_target( + TARGET rwconfig_agent_yang + YANG_FILES rw-config-agent.yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + mano_yang_gen + DEPENDS + mano_yang +) diff --git a/modules/core/mano/common/plugins/yang/rw-cloud.tailf.yang b/modules/core/mano/common/plugins/yang/rw-cloud.tailf.yang new file mode 100644 index 0000000..d7dc559 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-cloud.tailf.yang @@ -0,0 +1,29 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-cloud-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-cloud-annotation"; + prefix "rw-cloud-ann"; + + import rw-cloud { + prefix rw-cloud; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-cloud:cloud/rw-cloud:account/rw-cloud:connection-status" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-cloud:update-cloud-status" { + tailf:actionpoint rw_action; + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-cloud.yang b/modules/core/mano/common/plugins/yang/rw-cloud.yang new file mode 100755 index 0000000..c3fb1c7 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-cloud.yang @@ -0,0 +1,81 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-cloud +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-cloud"; + prefix "rw-cloud"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import rw-sdn { + prefix "rw-sdn"; + } + + revision 2015-09-14 { + description + "Initial revision."; + } + + container cloud { + rwpb:msg-new CloudConfig; + list account { + rwpb:msg-new CloudAccount; + description "Configure Cloud Accounts"; + + max-elements 16; + key "name"; + + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + leaf sdn-account { + description "Configured SDN account associated with this cloud account"; + type leafref { + path "/rw-sdn:sdn-account/rw-sdn:name"; + } + } + + uses rwcal:provider-auth; + uses rwcal:connection-status; + } + } + + rpc update-cloud-status { + description "Begin cloud account connection status"; + input { + leaf cloud-account { + mandatory true; + description + "The cloud account name to update connection status for"; + type string; + } + } + } + +} + diff --git a/modules/core/mano/common/plugins/yang/rw-config-agent.taif.yang b/modules/core/mano/common/plugins/yang/rw-config-agent.taif.yang new file mode 100644 index 0000000..cd72eea --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-config-agent.taif.yang @@ -0,0 +1,17 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-config-agent-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-config-agent-annotation"; + prefix "rw-config-agent-ann"; + + import rw-config-agent { + prefix rw-config-agent; + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-config-agent.yang b/modules/core/mano/common/plugins/yang/rw-config-agent.yang new file mode 100755 index 0000000..e97d419 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-config-agent.yang @@ -0,0 +1,83 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-config-agent +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-config-agent"; + prefix "rw-config-agent"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + revision 2016-02-04 { + description + "Initial revision."; + } + + typedef config-agent-account-type { + description "config agent account type"; + type enumeration { + enum juju; + } + } + + container config-agent { + rwpb:msg-new ConfigAgent; + + list account { + rwpb:msg-new ConfigAgentAccount; + key "name"; + + description "List of configuration agent accounts"; + + leaf name { + description "Name of this config agent account"; + type string; + } + + leaf account-type { + type config-agent-account-type; + } + + choice config-agent-account-type { + case juju { + description + "Configure the VNF through Juju."; + container juju { + leaf ip-address { + description "Juju host IP address."; + type inet:ip-address; + } + leaf port { + description + "Juju host port number. Default 17070."; + type inet:port-number; + default 17070; + } + leaf user { + description + "User name to connect to Juju host. Default user-admin."; + type string; + default "user-admin" ; + } + leaf secret { + description + "Admin secret or password for Juju host."; + type string; + } + } + } + } + } + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-sdn.tailf.yang b/modules/core/mano/common/plugins/yang/rw-sdn.tailf.yang new file mode 100644 index 0000000..3cf4beb --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-sdn.tailf.yang @@ -0,0 +1,17 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-sdn-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-sdn-annotation"; + prefix "rw-sdn-ann"; + + import rw-sdn { + prefix rw-sdn; + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-sdn.yang b/modules/core/mano/common/plugins/yang/rw-sdn.yang new file mode 100644 index 0000000..41bc4a8 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-sdn.yang @@ -0,0 +1,47 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-sdn +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-sdn"; + prefix "rw-sdn"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwsdn { + prefix "rwsdn"; + } + + revision 2015-09-14 { + description + "Initial revision."; + } + + list sdn-account { + rwpb:msg-new SDNAccountConfig; + + key "name"; + leaf name { + type string; + } + + uses rwsdn:sdn-provider-auth; + } +} + diff --git a/modules/core/mano/common/python/CMakeLists.txt b/modules/core/mano/common/python/CMakeLists.txt new file mode 100644 index 0000000..a390627 --- /dev/null +++ b/modules/core/mano/common/python/CMakeLists.txt @@ -0,0 +1,18 @@ +# Creation Date: 2016/1/12 +# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END) + +cmake_minimum_required(VERSION 2.8) + + +rift_python_install_tree( + FILES + rift/mano/cloud/__init__.py + rift/mano/cloud/accounts.py + rift/mano/cloud/config.py + rift/mano/cloud/operdata.py + rift/mano/config_agent/operdata.py + rift/mano/config_agent/__init__.py + rift/mano/config_agent/config.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY + ) diff --git a/modules/core/mano/common/python/rift/mano/cloud/__init__.py b/modules/core/mano/common/python/rift/mano/cloud/__init__.py new file mode 100644 index 0000000..4c1191d --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/__init__.py @@ -0,0 +1,28 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .accounts import ( + CloudAccount, + CloudAccountCalError, + ) + +from .config import ( + CloudAccountConfigSubscriber, + CloudAccountConfigCallbacks + ) + +from .operdata import ( + CloudAccountDtsOperdataHandler, +) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/cloud/accounts.py b/modules/core/mano/common/python/rift/mano/cloud/accounts.py new file mode 100644 index 0000000..908f8c8 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/accounts.py @@ -0,0 +1,174 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import asyncio + +from gi.repository import ( + RwTypes, + RwcalYang, + RwCloudYang, + ) +import rw_peas + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class PluginLoadingError(Exception): + pass + + +class CloudAccountCalError(Exception): + pass + + +class CloudAccount(object): + def __init__(self, log, rwlog_hdl, account_msg): + self._log = log + self._account_msg = account_msg.deep_copy() + + self._cal_plugin = None + self._engine = None + + self._cal = self.plugin.get_interface("Cloud") + self._cal.init(rwlog_hdl) + + self._status = RwCloudYang.CloudAccount_ConnectionStatus( + status="unknown", + details="Connection status lookup not started" + ) + + self._validate_task = None + + @property + def plugin(self): + if self._cal_plugin is None: + try: + self._cal_plugin = rw_peas.PeasPlugin( + getattr(self._account_msg, self.account_type).plugin_name, + 'RwCal-1.0', + ) + + except AttributeError as e: + raise PluginLoadingError(str(e)) + + self._engine, _, _ = self._cal_plugin() + + return self._cal_plugin + + def _wrap_status_fn(self, fn, *args, **kwargs): + ret = fn(*args, **kwargs) + rw_status = ret[0] + if rw_status != RwTypes.RwStatus.SUCCESS: + msg = "%s returned %s" % (fn.__name__, str(rw_status)) + self._log.error(msg) + raise CloudAccountCalError(msg) + + # If there was only one other return value besides rw_status, then just + # return that element. Otherwise return the rest of the return values + # as a list. + return ret[1] if len(ret) == 2 else ret[1:] + + @property + def cal(self): + return self._cal + + @property + def name(self): + return self._account_msg.name + + @property + def account_msg(self): + return self._account_msg + + @property + def cal_account_msg(self): + return RwcalYang.CloudAccount.from_dict( + self.account_msg.as_dict(), + ignore_missing_keys=True, + ) + + def cloud_account_msg(self, account_dict): + self._account_msg = RwCloudYang.CloudAccount.from_dict(account_dict) + + @property + def account_type(self): + return self._account_msg.account_type + + @property + def connection_status(self): + return self._status + + def update_from_cfg(self, cfg): + self._log.debug("Updating parent CloudAccount to %s", cfg) + + # Hack to catch updates triggered from apply_callback when a sdn-account is removed + # from a cloud-account. To be fixed properly when updates are handled + if (self.account_msg.name == cfg.name + and self.account_msg.account_type == cfg.account_type): + return + + if cfg.has_field("sdn_account"): + self.account_msg.sdn_account = cfg.sdn_account + else: + raise NotImplementedError("Update cloud account not yet supported") + + def create_image(self, filename): + image_id = self._wrap_status_fn( + self.cal.create_image, self.cal_account_msg, filename + ) + + return image_id + + def get_image_list(self): + self._log.debug("Getting image list from account: %s", self.name) + resources = self._wrap_status_fn( + self.cal.get_image_list, self.cal_account_msg + ) + + return resources.imageinfo_list + + @asyncio.coroutine + def validate_cloud_account_credentials(self, loop): + self._log.debug("Validating Cloud Account credentials %s", self._account_msg) + self._status = RwCloudYang.CloudAccount_ConnectionStatus( + status="validating", + details="Cloud account connection validation in progress" + ) + rwstatus, status = yield from loop.run_in_executor( + None, + self._cal.validate_cloud_creds, + self.cal_account_msg, + ) + if rwstatus == RwTypes.RwStatus.SUCCESS: + self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict()) + else: + self._status = RwCloudYang.CloudAccount_ConnectionStatus( + status="failure", + details="Error when calling CAL validate cloud creds" + ) + + self._log.info("Got cloud account validation response: %s", self._status) + + def start_validate_credentials(self, loop): + if self._validate_task is not None: + self._validate_task.cancel() + self._validate_task = None + + self._validate_task = asyncio.ensure_future( + self.validate_cloud_account_credentials(loop), + loop=loop + ) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/cloud/config.py b/modules/core/mano/common/python/rift/mano/cloud/config.py new file mode 100644 index 0000000..a495d16 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/config.py @@ -0,0 +1,256 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import rw_peas + +import gi +gi.require_version('RwDts', '1.0') +import rift.tasklets + +from gi.repository import ( + RwcalYang as rwcal, + RwDts as rwdts, + ProtobufC, + ) + +from . import accounts + +class CloudAccountNotFound(Exception): + pass + + +class CloudAccountError(Exception): + pass + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class CloudAccountConfigCallbacks(object): + def __init__(self, + on_add_apply=None, on_add_prepare=None, + on_update_apply=None, on_update_prepare=None, + on_delete_apply=None, on_delete_prepare=None): + + @asyncio.coroutine + def prepare_noop(*args, **kwargs): + pass + + def apply_noop(*args, **kwargs): + pass + + self.on_add_apply = on_add_apply + self.on_add_prepare = on_add_prepare + self.on_update_apply = on_update_apply + self.on_update_prepare = on_update_prepare + self.on_delete_apply = on_delete_apply + self.on_delete_prepare = on_delete_prepare + + for f in ('on_add_apply', 'on_update_apply', 'on_delete_apply'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, apply_noop) + continue + + if asyncio.iscoroutinefunction(ref): + raise ValueError('%s cannot be a coroutine' % (f,)) + + for f in ('on_add_prepare', 'on_update_prepare', 'on_delete_prepare'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, prepare_noop) + continue + + if not asyncio.iscoroutinefunction(ref): + raise ValueError("%s must be a coroutine" % f) + + +class CloudAccountConfigSubscriber(object): + XPATH = "C,/rw-cloud:cloud/rw-cloud:account" + + def __init__(self, dts, log, rwlog_hdl, cloud_callbacks): + self._dts = dts + self._log = log + self._rwlog_hdl = rwlog_hdl + self._reg = None + + self.accounts = {} + + self._cloud_callbacks = cloud_callbacks + + def add_account(self, account_msg): + self._log.info("adding cloud account: {}".format(account_msg)) + + account = accounts.CloudAccount(self._log, self._rwlog_hdl, account_msg) + self.accounts[account.name] = account + + self._cloud_callbacks.on_add_apply(account) + + def delete_account(self, account_name): + self._log.info("deleting cloud account: {}".format(account_name)) + del self.accounts[account_name] + + self._cloud_callbacks.on_delete_apply(account_name) + + def update_account(self, account_msg): + self._log.info("updating cloud account: {}".format(account_msg)) + account = accounts.CloudAccount(self._log, self._rwlog_hdl, account_msg) + self.accounts[account.name].update_from_cfg(account_msg) + + # Block update callbacks for cloud accounts if due to SDN account changes + # If there are other cloud-account fields that are also updated at the same time, + # in addition to sdn-account, this update will not be triggered. + # The logic to detect this might not be worth it since this won't happen through UI + if not account_msg.has_field("sdn_account"): + self._cloud_callbacks.on_update_apply(account) + + def register(self): + @asyncio.coroutine + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action) + + if xact.xact is None: + if action == rwdts.AppconfAction.INSTALL: + curr_cfg = self._reg.elements + for cfg in curr_cfg: + self._log.debug("Cloud account being re-added after restart.") + if not cfg.has_field('account_type'): + raise CloudAccountError("New cloud account must contain account_type field.") + print(cfg) + print("Adding account .........") + self.add_account(cfg) + return + else: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self._reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_account(cfg.name) + + # Handle Adds + for cfg in add_cfgs: + self.add_account(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_account(cfg) + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for Cloud Account """ + + action = xact_info.query_action + self._log.debug("Cloud account on_prepare config received (action: %s): %s", + xact_info.query_action, msg) + + if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]: + # If the account already exists, then this is an update. Update the + # cloud account and invoke the on_update_prepare callback + if msg.name in self.accounts: + self._log.debug("Cloud account already exists. Invoking on_prepare update request") + if msg.has_field("account_type"): + raise CloudAccountError("Cannot change cloud's account-type") + + account = self.accounts[msg.name] + account.update_from_cfg(msg) + + # Block update callbacks for cloud accounts if due to SDN account changes + # If there are other cloud-account fields that are also updated at the same time, + # in addition to sdn-account, this update will not be triggered. + # The logic to detect this might not be worth it since this won't happen through UI + if not msg.has_field("sdn_account"): + yield from self._cloud_callbacks.on_update_prepare(account) + + else: + self._log.debug("Cloud account does not already exist. Invoking on_prepare add request") + if not msg.has_field('account_type'): + raise CloudAccountError("New cloud account must contain account_type field.") + + account = accounts.CloudAccount(self._log, self._rwlog_hdl, msg) + yield from self._cloud_callbacks.on_add_prepare(account) + + elif action == rwdts.QueryAction.DELETE: + # Check if the entire cloud account got deleted + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + if fref.is_field_deleted(): + yield from self._cloud_callbacks.on_delete_prepare(msg.name) + else: + fref.goto_proto_name(msg.to_pbcm(), "sdn_account") + if fref.is_field_deleted(): + # SDN account disassociated from cloud account + account = self.accounts[msg.name] + dict_account = account.account_msg.as_dict() + del dict_account["sdn_account"] + account.cloud_account_msg(dict_account) + else: + self._log.error("Deleting individual fields for cloud account not supported") + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Cloud Account config using xpath: %s", + CloudAccountConfigSubscriber.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + self._reg = acg.register( + xpath=CloudAccountConfigSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare, + ) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/cloud/operdata.py b/modules/core/mano/common/python/rift/mano/cloud/operdata.py new file mode 100644 index 0000000..b4db9b3 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/operdata.py @@ -0,0 +1,114 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import rift.tasklets + +from gi.repository import( + RwCloudYang, + RwDts as rwdts, + ) + +class CloudAccountNotFound(Exception): + pass + + +class CloudAccountDtsOperdataHandler(object): + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self.cloud_accounts = {} + + def add_cloud_account(self, account): + self.cloud_accounts[account.name] = account + account.start_validate_credentials(self._loop) + + def delete_cloud_account(self, account_name): + del self.cloud_accounts[account_name] + + def _register_show_status(self): + def get_xpath(cloud_name=None): + return "D,/rw-cloud:cloud/account{}/connection-status".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + self._log.debug("Got show cloud connection status request: %s", ks_path.create_string()) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + account = self.cloud_accounts[cloud_account_name] + except KeyError: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + connection_status = account.connection_status + self._log.debug("Responding to cloud connection status request: %s", connection_status) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=account.connection_status, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + def _register_validate_rpc(self): + def get_xpath(): + return "/rw-cloud:update-cloud-status" + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + if not msg.has_field("cloud_account"): + raise CloudAccountNotFound("Cloud account name not provided") + + cloud_account_name = msg.cloud_account + try: + account = self.cloud_accounts[cloud_account_name] + except KeyError: + raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name) + + account.start_validate_credentials(self._loop) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare + ), + flags=rwdts.Flag.PUBLISHER, + ) + + @asyncio.coroutine + def register(self): + yield from self._register_show_status() + yield from self._register_validate_rpc() \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/config_agent/__init__.py b/modules/core/mano/common/python/rift/mano/config_agent/__init__.py new file mode 100644 index 0000000..02dd8ff --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/config_agent/__init__.py @@ -0,0 +1,24 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import ( + ConfigAgentCallbacks, + ConfigAgentSubscriber + ) + +from .operdata import ( + ConfigAgentJobManager, + CfgAgentJobDtsHandler + ) diff --git a/modules/core/mano/common/python/rift/mano/config_agent/config.py b/modules/core/mano/common/python/rift/mano/config_agent/config.py new file mode 100644 index 0000000..f2be62e --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/config_agent/config.py @@ -0,0 +1,219 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import rw_peas + +import gi +gi.require_version('RwDts', '1.0') +import rift.tasklets + +from gi.repository import ( + RwcalYang as rwcal, + RwDts as rwdts, + RwConfigAgentYang as rwcfg_agent, + ProtobufC, + ) + +class ConfigAccountNotFound(Exception): + pass + +class ConfigAccountError(Exception): + pass + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class ConfigAgentCallbacks(object): + def __init__(self, + on_add_apply=None, on_add_prepare=None, + on_update_apply=None, on_update_prepare=None, + on_delete_apply=None, on_delete_prepare=None): + + @asyncio.coroutine + def prepare_noop(*args, **kwargs): + pass + + def apply_noop(*args, **kwargs): + pass + + self.on_add_apply = on_add_apply + self.on_add_prepare = on_add_prepare + self.on_update_apply = on_update_apply + self.on_update_prepare = on_update_prepare + self.on_delete_apply = on_delete_apply + self.on_delete_prepare = on_delete_prepare + + for f in ('on_add_apply', 'on_update_apply', 'on_delete_apply'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, apply_noop) + continue + + if asyncio.iscoroutinefunction(ref): + raise ValueError('%s cannot be a coroutine' % (f,)) + + for f in ('on_add_prepare', 'on_update_prepare', 'on_delete_prepare'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, prepare_noop) + continue + + if not asyncio.iscoroutinefunction(ref): + raise ValueError("%s must be a coroutine" % f) + + +class ConfigAgentSubscriber(object): + XPATH = "C,/rw-config-agent:config-agent/account" + + def __init__(self, dts, log, config_callbacks): + self._dts = dts + self._log = log + self._reg = None + + self.accounts = {} + + self._config_callbacks = config_callbacks + + def add_account(self, account_msg): + self._log.info("adding config account: {}".format(account_msg)) + + self.accounts[account_msg.name] = account_msg + + self._config_callbacks.on_add_apply(account_msg) + + def delete_account(self, account_name): + self._log.info("deleting config account: {}".format(account_name)) + del self.accounts[account_name] + + self._config_callbacks.on_delete_apply(account_name) + + def update_account(self, account_msg): + self._log.info("updating config account: {}".format(account_msg)) + self.accounts[account_msg.name] = account_msg + + self._config_callbacks.on_update_apply(account_msg) + + def register(self): + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self._reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_account(cfg.name) + + # Handle Adds + for cfg in add_cfgs: + self.add_account(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_account(cfg) + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for Config Account """ + + action = xact_info.handle.query_action + self._log.debug("Config account on_prepare config received (action: %s): %s", + xact_info.handle.query_action, msg) + + if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]: + # If the account already exists, then this is an update. Update the + # cloud account and invoke the on_update_prepare callback + if msg.name in self.accounts: + self._log.debug("Config account already exists. Invoking on_prepare update request") + if msg.has_field("account_type"): + raise CloudAccountError("Cannot change config's account-type") + + account = self.accounts[msg.name] + yield from self._config_callbacks.on_update_prepare(account) + + else: + self._log.debug("Config account does not already exist. Invoking on_prepare add request") + if not msg.has_field('account_type'): + raise ConfigAccountError("New Config account must contain account_type field.") + + account = msg + yield from self._config_callbacks.on_add_prepare(account) + + elif action == rwdts.QueryAction.DELETE: + # Check if the entire cloud account got deleted + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + if fref.is_field_deleted(): + yield from self._config_callbacks.on_delete_prepare(msg.name) + else: + self._log.error("Deleting individual fields for config account not supported") + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Config Account config using xpath: %s", + ConfigAgentSubscriber.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + self._reg = acg.register( + xpath=ConfigAgentSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + on_prepare=on_prepare, + ) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/config_agent/operdata.py b/modules/core/mano/common/python/rift/mano/config_agent/operdata.py new file mode 100644 index 0000000..dcb0b22 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/config_agent/operdata.py @@ -0,0 +1,461 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import concurrent.futures +import time + +from gi.repository import ( + NsrYang, + RwNsrYang, + RwDts as rwdts) + +import rift.tasklets + + +class ConfigAgentJob(object): + """A wrapper over the config agent job object, providing some + convenience functions. + + YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains + || + ==> VNFRS + || + ==> Primitives + + """ + # The normalizes the state terms from Juju to our yang models + # Juju : Yang model + STATUS_MAP = {"completed": "success", + "pending" : "pending", + "running" : "pending", + "failed" : "failure"} + + def __init__(self, nsr_id, job, tasks=None): + """ + Args: + nsr_id (uuid): ID of NSR record + job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object + tasks: List of asyncio.tasks. If provided the job monitor will + use it to monitor the tasks instead of the execution IDs + """ + self._job = job + self.nsr_id = nsr_id + self.tasks = tasks + + @property + def id(self): + """Job id""" + return self._job.job_id + + @property + def name(self): + """Job name""" + return self._job.job_name + + @property + def job_status(self): + """Status of the job (success|pending|failure)""" + return self._job.job_status + + @job_status.setter + def job_status(self, value): + """Setter for job status""" + self._job.job_status = value + + @property + def job(self): + """Gi object""" + return self._job + + @property + def xpath(self): + """Xpath of the job""" + return ("D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref='{}']" + + "/nsr:config-agent-job[nsr:job-id='{}']" + ).format(self.nsr_id, self.id) + + @staticmethod + def convert_rpc_input_to_job(nsr_id, rpc_output, tasks): + """A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive + to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang) + + Args: + nsr_id (uuid): NSR ID + rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): RPC output + tasks (list): A list of asyncio.Tasks + + Returns: + ConfigAgentJob + """ + # Shortcuts to prevent the HUUGE names. + CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob + CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr + CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive + + job = CfgAgentJob.from_dict({ + "job_id": rpc_output.job_id, + "job_name" : rpc_output.name, + "job_status": "pending", + }) + + for vnfr in rpc_output.vnf_out_list: + vnfr_job = CfgAgentVnfr.from_dict({ + "id": vnfr.vnfr_id_ref, + "vnf_job_status": "pending", + }) + + for primitive in vnfr.vnf_out_primitive: + vnf_primitive = CfgAgentPrimitive.from_dict({ + "name": primitive.name, + "execution_status": ConfigAgentJob.STATUS_MAP[primitive.execution_status], + "execution_id": primitive.execution_id + }) + vnfr_job.primitive.append(vnf_primitive) + + job.vnfr.append(vnfr_job) + + return ConfigAgentJob(nsr_id, job, tasks) + + +class ConfigAgentJobMonitor(object): + """Job monitor: Polls the Juju controller and get the status. + Rules: + If all Primitive are success, then vnf & nsr status will be "success" + If any one Primitive reaches a failed state then both vnf and nsr will fail. + """ + POLLING_PERIOD = 2 + + def __init__(self, dts, log, job, executor, loop, config_plugin): + """ + Args: + dts : DTS handle + log : log handle + job (ConfigAgentJob): ConfigAgentJob instance + executor (concurrent.futures): Executor for juju status api calls + loop (eventloop): Current event loop instance + config_plugin : Config plugin to be used. + """ + self.job = job + self.log = log + self.loop = loop + self.executor = executor + self.polling_period = ConfigAgentJobMonitor.POLLING_PERIOD + self.config_plugin = config_plugin + self.dts = dts + + @asyncio.coroutine + def _monitor_processes(self, registration_handle): + result = 0 + for process in self.job.tasks: + rc = yield from process + self.log.debug("Process {} returned rc: {}".format(process, rc)) + result |= rc + + if result == 0: + self.job.job_status = "success" + else: + self.job.job_status = "failure" + + registration_handle.update_element(self.job.xpath, self.job.job) + + + @asyncio.coroutine + def publish_action_status(self): + """ + Starts publishing the status for jobs/primitives + """ + registration_handle = yield from self.dts.register( + xpath=self.job.xpath, + handler=rift.tasklets.DTS.RegistrationHandler(), + flags=(rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ), + ) + + self.log.debug('preparing to publish job status for {}'.format(self.job.xpath)) + + try: + registration_handle.create_element(self.job.xpath, self.job.job) + + # If the config is done via a user defined script + if self.job.tasks is not None: + yield from self._monitor_processes(registration_handle) + return + + prev = time.time() + # Run until pending moves to either failure/success + while self.job.job_status == "pending": + curr = time.time() + + if curr - prev < self.polling_period: + pause = self.polling_period - (curr - prev) + yield from asyncio.sleep(pause, loop=self.loop) + + prev = time.time() + + tasks = [] + for vnfr in self.job.job.vnfr: + task = self.loop.create_task(self.get_vnfr_status(vnfr)) + tasks.append(task) + + # Exit, if no tasks are found + if not tasks: + break + + yield from asyncio.wait(tasks, loop=self.loop) + + job_status = [task.result() for task in tasks] + + if "failure" in job_status: + self.job.job_status = "failure" + elif "pending" in job_status: + self.job.job_status = "pending" + else: + self.job.job_status = "success" + + # self.log.debug("Publishing job status: {} at {} for nsr id: {}".format( + # self.job.job_status, + # self.job.xpath, + # self.job.nsr_id)) + + registration_handle.update_element(self.job.xpath, self.job.job) + + + except Exception as e: + self.log.exception(e) + raise + + + @asyncio.coroutine + def get_vnfr_status(self, vnfr): + """Schedules tasks for all containing primitives and updates it's own + status. + + Args: + vnfr : Vnfr job record containing primitives. + + Returns: + (str): "success|failure|pending" + """ + tasks = [] + job_status = [] + + for primitive in vnfr.primitive: + if primitive.execution_id == "": + # TODO: For some config data, the id will be empty, check if + # mapping is needed. + job_status.append(primitive.execution_status) + continue + + task = self.loop.create_task(self.get_primitive_status(primitive)) + tasks.append(task) + + if tasks: + yield from asyncio.wait(tasks, loop=self.loop) + + job_status.extend([task.result() for task in tasks]) + if "failure" in job_status: + vnfr.vnf_job_status = "failure" + return "failure" + + elif "pending" in job_status: + vnfr.vnf_job_status = "pending" + return "pending" + + else: + vnfr.vnf_job_status = "success" + return "success" + + @asyncio.coroutine + def get_primitive_status(self, primitive): + """ + Queries the juju api and gets the status of the execution id. + + Args: + primitive : Primitive containing the execution ID. + """ + + try: + status = yield from self.loop.run_in_executor( + self.executor, + self.config_plugin.get_action_status, + primitive.execution_id + ) + # self.log.debug("Got {} for execution id: {}".format( + # status, + # primitive.execution_id)) + except Exception as e: + self.log.exception(e) + status = "failed" + + # Handle case status is None + if status: + primitive.execution_status = ConfigAgentJob.STATUS_MAP[status] + else: + primitive.execution_status = "failure" + + return primitive.execution_status + + +class CfgAgentJobDtsHandler(object): + """Dts Handler for CfgAgent""" + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr/nsr:config-agent-job" + + def __init__(self, dts, log, loop, nsm, cfgm): + """ + Args: + dts : Dts Handle. + log : Log handle. + loop : Event loop. + nsm : NsmManager. + cfgm : ConfigManager. + """ + self._dts = dts + self._log = log + self._loop = loop + self._cfgm = cfgm + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NSManager manager instance """ + return self._nsm + + @property + def cfgm(self): + """ Return the ConfigManager manager instance """ + return self._cfgm + + @staticmethod + def cfg_job_xpath(nsr_id, job_id): + return ("D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" + + "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id) + + @asyncio.coroutine + def register(self): + """ Register for NS monitoring read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + if action == rwdts.QueryAction.READ: + schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + try: + nsr_id = path_entry.key00.ns_instance_config_ref + + nsr_ids = [] + if nsr_id is None or nsr_id == "": + nsrs = list(self.nsm.nsrs.values()) + nsr_ids = [nsr.id for nsr in nsrs] + else: + nsr_ids = [nsr_id] + + for nsr_id in nsr_ids: + job = self.cfgm.get_job(nsr_id) + + # If no jobs are queued for the NSR + if job is None: + continue + + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.job_id), + job) + + except Exception as e: + self._log.exception("Caught exception:", str(e)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + else: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER, + ) + + +class ConfigAgentJobManager(object): + """A central class that manager all the Config Agent related data, + Including updating the status + + TODO: Needs to support multiple config agents. + """ + def __init__(self, dts, log, loop, nsm): + """ + Args: + dts : Dts handle + log : Log handler + loop : Event loop + nsm : NsmTasklet instance + """ + self.jobs = {} + self.dts = dts + self.log = log + self.loop = loop + self.nsm = nsm + self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self) + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + + def add_job(self, rpc_output, tasks=None): + """Once an RPC is trigger add a now job + + Args: + rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output + tasks(list) A list of asyncio.Tasks + + """ + nsr_id = rpc_output.nsr_id_ref + self.jobs[nsr_id] = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks) + + self.log.debug("Creating a job monitor for Job id: {}".format( + rpc_output.job_id)) + + # For every Job we will schedule a new monitoring process. + job_monitor = ConfigAgentJobMonitor( + self.dts, + self.log, + self.jobs[nsr_id], + self.executor, + self.loop, + self.nsm.config_agent_plugins[0] # Hack + ) + task = self.loop.create_task(job_monitor.publish_action_status()) + + def get_job(self, nsr_id): + """Get the job associated with the NSR Id, if present.""" + try: + return self.jobs[nsr_id].job + except KeyError: + return None + + @asyncio.coroutine + def register(self): + yield from self.handler.register() \ No newline at end of file diff --git a/modules/core/mano/common/rw_gen_package.py b/modules/core/mano/common/rw_gen_package.py new file mode 100755 index 0000000..b9a0d8c --- /dev/null +++ b/modules/core/mano/common/rw_gen_package.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import subprocess +import argparse +import shutil +import xml.etree.ElementTree as etree + +from gi.repository import ( + RwYang, + NsdYang, + RwNsdYang, + VnfdYang, + RwVnfdYang, + VldYang, + RwVldYang +) + +def read_from_file(module_list, infile, input_format, descr_type): + model = RwYang.Model.create_libncx() + for module in module_list: + model.load_module(module) + + descr = None + if descr_type == "nsd": + descr = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd() + else: + descr = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd() + + if input_format == 'json': + json_str = open(infile).read() + descr.from_json(model, json_str) + + elif input_format.strip() == 'xml': + tree = etree.parse(infile) + root = tree.getroot() + xmlstr = etree.tostring(root, encoding="unicode") + descr.from_xml_v2(model, xmlstr) + else: + raise("Invalid input format for the descriptor") + + return descr + +def write_to_file(name, outdir, infile, descr_type): + dirpath = os.path.join(outdir, name, descr_type) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + shutil.copy(infile, dirpath) + +def main(argv=sys.argv[1:]): + global outdir, output_format + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--infile', required=True, + type=lambda x: os.path.isfile(x) and x or parser.error("INFILE does not exist")) + parser.add_argument('-o', '--outdir', default=".", + type=lambda x: os.path.isdir(x) and x or parser.error("OUTDIR does not exist")) + parser.add_argument('-f', '--format', choices=['json', 'xml'], required=True) + parser.add_argument('-t', '--descriptor-type', choices=['nsd', 'vnfd'], required=True ) + + args = parser.parse_args() + infile = args.infile + input_format = args.format + outdir = args.outdir + dtype = args.descriptor_type + + print('Reading file {} in format {}'.format(infile, input_format)) + module_list = ['vld', 'rw-vld'] + if dtype == 'nsd': + module_list.extend(['nsd', 'rw-nsd']) + else: + module_list.extend(['vnfd', 'rw-vnfd']) + + descr = read_from_file(module_list, args.infile, args.format, dtype) + + print("Creating %s descriptor for {}".format(dtype.upper(), descr.name)) + write_to_file(descr.name, outdir, infile, dtype) + status = subprocess.call(os.path.join(os.environ["RIFT_ROOT"], + "bin/generate_descriptor_pkg.sh %s %s" % (outdir, descr.name)), shell=True) + print("Status of %s descriptor package creation is: %s" % (dtype.upper(), status)) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/confd_client/CMakeLists.txt b/modules/core/mano/confd_client/CMakeLists.txt new file mode 100644 index 0000000..5de8f0a --- /dev/null +++ b/modules/core/mano/confd_client/CMakeLists.txt @@ -0,0 +1,16 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/04/30 +# + +cmake_minimum_required(VERSION 2.8) + +# confd_client executable +add_executable(confd_client confd_client.c) + +target_link_libraries(confd_client + ${CMAKE_INSTALL_PREFIX}/usr/local/confd/lib/libconfd.so + pthread + ) diff --git a/modules/core/mano/confd_client/Makefile b/modules/core/mano/confd_client/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/confd_client/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/confd_client/README b/modules/core/mano/confd_client/README new file mode 100644 index 0000000..aa711c0 --- /dev/null +++ b/modules/core/mano/confd_client/README @@ -0,0 +1,8 @@ +This is barebones confd client test program. This is useful for confd module testing. To use this program follow these steps: + +1. Reserve and login to a VM a root +2. cd ${RIFT_ROOT} +3. ./rift-shell -e +4. cd modules/core/mc/confd_client +4. ./confd_client_opdata.sh (will measure the rate for fetching operational data) +5. ./confd_client_config.sh (will measure the rate of config writes and reads) diff --git a/modules/core/mano/confd_client/confd_client.c b/modules/core/mano/confd_client/confd_client.c new file mode 100644 index 0000000..9c0613e --- /dev/null +++ b/modules/core/mano/confd_client/confd_client.c @@ -0,0 +1,436 @@ +/* * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + * + */ + +#include +#include +#include +#include +#include + +#include +#include "confd_cdb.h" +#include "confd_dp.h" + +static struct confd_daemon_ctx *dctx; +static int ctlsock; +static int workersock; + +typedef struct _foodata { + char *name; + struct _foodata *next; +} foodata_t; + +typedef struct _opdata { + foodata_t *foo; +} opdata_t; + +opdata_t *g_opdata = NULL; + +int process_confd_subscription(int subsock) +{ + int confd_result, flags, length, *subscription_points, i, j, nvalues; + enum cdb_sub_notification type; + confd_tag_value_t *values; + + confd_result = cdb_read_subscription_socket2(subsock, + &type, + &flags, + &subscription_points, + &length); + + if (confd_result != CONFD_OK) { + confd_fatal("Failed to read subscription data \n"); + } + + switch (type) { + case CDB_SUB_PREPARE: + for (i = 0; i < length; i++) { + printf("i = %d, point = %d\n", i, subscription_points[i]); + if (cdb_get_modifications(subsock, subscription_points[i], flags, &values, &nvalues, + "/") == CONFD_OK) { + for (j = 0; j < nvalues; j++) { + printf("j = %d\n", j); + confd_free_value(CONFD_GET_TAG_VALUE(&values[j])); + } + } + } + cdb_sync_subscription_socket(subsock, CDB_DONE_PRIORITY); + fprintf(stdout, "CBD_SUB_PREPARE\n"); + break; + + case CDB_SUB_COMMIT: + cdb_sync_subscription_socket(subsock, CDB_DONE_PRIORITY); + fprintf(stdout, "CDB_SUB_COMMIT\n"); + break; + + case CDB_SUB_ABORT: + fprintf(stdout, "CDB_SUB_ABORT\n"); + break; + + default: + confd_fatal("Invalid type %d in cdb_read_subscription_socket2\n", type); + } + + return 0; +} + +static int do_init_action(struct confd_user_info *uinfo) +{ + int ret = CONFD_OK; + // fprintf(stdout, "init_action called\n"); + confd_action_set_fd(uinfo, workersock); + return ret; +} + +static int do_rw_action(struct confd_user_info *uinfo, + struct xml_tag *name, + confd_hkeypath_t *kp, + confd_tag_value_t *params, + int nparams) +{ + // confd_tag_value_t reply[2]; + // int status; + // char *ret_status; + int i; + char buf[BUFSIZ]; + + /* Just print the parameters and return */ + + // + for (i = 0; i < nparams; i++) { + confd_pp_value(buf, sizeof(buf), CONFD_GET_TAG_VALUE(¶ms[i])); + printf("param %2d: %9u:%-9u, %s\n", i, CONFD_GET_TAG_NS(¶ms[i]), + CONFD_GET_TAG_TAG(¶ms[i]), buf); + } + + i = 0; + // CONFD_SET_TAG_INT32(&reply[i], NULL, 0); i++; + // CONFD_SET_TAG_STR(&reply[i], NULL, "success"); i++; + confd_action_reply_values(uinfo, NULL, i); + + return CONFD_OK; + +} + +static int get_next(struct confd_trans_ctx *tctx, + confd_hkeypath_t *keypath, + long next) +{ + opdata_t *opdata = tctx->t_opaque; + foodata_t *curr; + confd_value_t v[2]; + + if (next == -1) { /* first call */ + curr = opdata->foo; + } else { + curr = (foodata_t *)next; + } + + if (curr == NULL) { + confd_data_reply_next_key(tctx, NULL, -1, -1); + return CONFD_OK; + } + + CONFD_SET_STR(&v[0], curr->name); + confd_data_reply_next_key(tctx, &v[0], 1, (long)curr->next); + return CONFD_OK; +} + +static foodata_t *find_foo(confd_hkeypath_t *keypath, opdata_t *dp) +{ + char *name = (char*)CONFD_GET_BUFPTR(&keypath->v[1][0]); + foodata_t *foo = dp->foo; + while (foo != NULL) { + if (strcmp(foo->name, name) == 0) { + return foo; + } + foo = foo->next; + } + return NULL; +} + +/* Keypath example */ +/* /arpentries/arpe{192.168.1.1 eth0}/hwaddr */ +/* 3 2 1 0 */ +static int get_elem(struct confd_trans_ctx *tctx, + confd_hkeypath_t *keypath) +{ + confd_value_t v; + foodata_t *foo = find_foo(keypath, tctx->t_opaque); + if (foo == NULL) { + confd_data_reply_not_found(tctx); + return CONFD_OK; + } + + CONFD_SET_STR(&v, foo->name); + confd_data_reply_value(tctx, &v); + + return CONFD_OK; +} + +static foodata_t *create_dummy_foodata_list(int count) +{ + foodata_t *head, *curr, *prev; + int i; + char buf[64]; + + head = prev = curr = NULL; + for (i = 0; i < count; ++i) { + curr = malloc(sizeof(foodata_t)); + memset(curr, 0, sizeof(foodata_t)); + snprintf(buf, 64, "foo%d", i); + curr->name = strdup(buf); + if (prev) { + prev->next = curr; + } else { + head = curr; + } + prev = curr; + } + + return head; +} + +static void free_foodata_list(foodata_t *foo) +{ + foodata_t *curr, *next; + curr = foo; + while (curr) { + next = curr->next; + if (curr->name) { + free(curr->name); + } + free(curr); + curr = next; + } +} + +static void print_foodata_list(foodata_t *foo) +{ + foodata_t *curr = foo; + while (curr) { + // fprintf(stdout, "%s\n", curr->name); + curr = curr->next; + } +} + +static int s_init(struct confd_trans_ctx *tctx) +{ + opdata_t *opdata; + if ((opdata = malloc(sizeof(opdata_t))) == NULL) { + return CONFD_ERR; + } + + memset(opdata, 0, sizeof(opdata_t)); + opdata->foo = create_dummy_foodata_list(10); + print_foodata_list(opdata->foo); + tctx->t_opaque = opdata; + confd_trans_set_fd(tctx, workersock); + return CONFD_OK; +} + +static int s_finish(struct confd_trans_ctx *tctx) +{ + opdata_t *opdata = tctx->t_opaque; + if (opdata != NULL) { + free_foodata_list(opdata->foo); + free(opdata); + } + + return CONFD_OK; +} + +int main(int argc, char **argv) +{ + struct sockaddr_in addr; + int debuglevel = CONFD_TRACE; + struct confd_trans_cbs trans; + struct confd_data_cbs data; + struct confd_action_cbs action; + int i; + + int subsock, datasock; + int status; + int spoint; + + addr.sin_addr.s_addr = inet_addr("127.0.0.1"); + addr.sin_family = AF_INET; + addr.sin_port = htons(CONFD_PORT); + + /** + * Setup CDB subscription socket + */ + confd_init(argv[0], stderr, CONFD_DEBUG); + if ((subsock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open subscription socket\n"); + } + + printf("Subscription socket: %d\n", subsock); + + for (i = 1; i < 10; ++i) { + if (cdb_connect(subsock, CDB_SUBSCRIPTION_SOCKET, + (struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + sleep(2); + fprintf(stdout, "Failed in confd_connect() {attempt: %d}\n", i); + } else { + fprintf(stdout, "confd_connect succeeded\n"); + break; + } + } + + if ((status = cdb_subscribe2(subsock, CDB_SUB_RUNNING_TWOPHASE, 0, 0, &spoint, 0, "/")) + != CONFD_OK) { + fprintf(stderr, "Terminate: subscribe %d\n", status); + exit(1); + } + + if (cdb_subscribe_done(subsock) != CONFD_OK) { + confd_fatal("cdb_subscribe_done() failed"); + } + + /** + * Setup CBD data socket + */ + + if ((datasock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open data socket\n"); + } + + if (cdb_connect(datasock, CDB_DATA_SOCKET, + (struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + confd_fatal("Failed to confd_connect() to confd \n"); + } + + memset(&trans, 0, sizeof (struct confd_trans_cbs)); + trans.init = s_init; + trans.finish = s_finish; + + memset(&data, 0, sizeof (struct confd_data_cbs)); + data.get_elem = get_elem; + data.get_next = get_next; + strcpy(data.callpoint, "base_show"); + + memset(&action, 0, sizeof (action)); + strcpy(action.actionpoint, "rw_action"); + action.init = do_init_action; + action.action = do_rw_action; + + + /* initialize confd library */ + confd_init("confd_client_op_data_daemon", stderr, debuglevel); + + + for (i = 1; i < 10; ++i) { + if (confd_load_schemas((struct sockaddr*)&addr, + sizeof(struct sockaddr_in)) != CONFD_OK) { + fprintf(stdout, "Failed to load schemas from confd {attempt: %d}\n", i); + sleep(2); + } else { + fprintf(stdout, "confd_load_schemas succeeded\n"); + break; + } + } + + if ((dctx = confd_init_daemon("confd_client_op_data_daemon")) == NULL) { + confd_fatal("Failed to initialize confdlib\n"); + } + + /* Create the first control socket, all requests to */ + /* create new transactions arrive here */ + if ((ctlsock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open ctlsocket\n"); + } + + if (confd_connect(dctx, ctlsock, CONTROL_SOCKET, (struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + confd_fatal("Failed to confd_connect() to confd \n"); + } + + /* Also establish a workersocket, this is the most simple */ + /* case where we have just one ctlsock and one workersock */ + if ((workersock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open workersocket\n"); + } + + if (confd_connect(dctx, workersock, WORKER_SOCKET,(struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + confd_fatal("Failed to confd_connect() to confd \n"); + } + + if (confd_register_trans_cb(dctx, &trans) == CONFD_ERR) { + confd_fatal("Failed to register trans cb \n"); + } + + if (confd_register_data_cb(dctx, &data) == CONFD_ERR) { + confd_fatal("Failed to register data cb \n"); + } + + if (confd_register_action_cbs(dctx, &action) == CONFD_ERR) { + confd_fatal("Failed to register action cb \n"); + } + + if (confd_register_done(dctx) != CONFD_OK) { + confd_fatal("Failed to complete registration \n"); + } + + while(1) { + struct pollfd set[3]; + int ret; + set[0].fd = ctlsock; + set[0].events = POLLIN; + set[0].revents = 0; + set[1].fd = workersock; + set[1].events = POLLIN; + set[1].revents = 0; + set[2].fd = subsock; + set[2].events = POLLIN; + set[2].revents = 0; + if (poll(set, sizeof(set)/sizeof(*set), -1) < 0) { + perror("Poll failed:"); + continue; + } + /* Check for I/O */ + if (set[0].revents & POLLIN) { + if ((ret = confd_fd_ready(dctx, ctlsock)) == CONFD_EOF) { + confd_fatal("Control socket closed\n"); + } else if (ret == CONFD_ERR && confd_errno != CONFD_ERR_EXTERNAL) { + confd_fatal("Error on control socket request: %s (%d): %s\n", + confd_strerror(confd_errno), confd_errno, confd_lasterr()); + } + } + if (set[1].revents & POLLIN) { + if ((ret = confd_fd_ready(dctx, workersock)) == CONFD_EOF) { + confd_fatal("Worker socket closed\n"); + } else if (ret == CONFD_ERR && confd_errno != CONFD_ERR_EXTERNAL) { + confd_fatal("Error on worker socket request: %s (%d): %s\n", + confd_strerror(confd_errno), confd_errno, confd_lasterr()); + } + } + if (set[2].revents & POLLIN) { + process_confd_subscription(set[2].fd); + } + } + + return 0; +} diff --git a/modules/core/mano/confd_client/confd_client.py b/modules/core/mano/confd_client/confd_client.py new file mode 100755 index 0000000..7aa52e5 --- /dev/null +++ b/modules/core/mano/confd_client/confd_client.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python2 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import subprocess +import contextlib +import rift.auto.proxy +import sys +import os +import time +import rw_peas +import requests + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('YangModelPlugin', '1.0') +from gi.repository import RwMcYang + +# NOTE: This cript measures the single threaded performance +# This also gives an idea about latency +# To get throughput numbers may need multiple parallel clients + + +yang = rw_peas.PeasPlugin('yangmodel_plugin-c', 'YangModelPlugin-1.0') +yang_model_api = yang.get_interface('Model') +yang_model = yang_model_api.alloc() +mc_module = yang_model_api.load_module(yang_model, 'rw-mc') + +@contextlib.contextmanager +def start_confd(): + print("Starting confd") + proc = subprocess.Popen(["./usr/bin/rw_confd"]) + try: + yield + finally: + print("Killing confd") + proc.kill() + +@contextlib.contextmanager +def start_confd_client(): + print("Starting confd_client") + proc = subprocess.Popen(["{}/.build/modules/core/mc/src/core_mc-build/confd_client/confd_client".format( + os.environ["RIFT_ROOT"]) + ]) + try: + yield + finally: + proc.kill() + print("Starting confd_client") + +def run_rpc_perf_test(proxy, num_rpcs=1): + start_time = time.time() + + for i in range(1, num_rpcs + 1): + start = RwMcYang.StartLaunchpadInput() + start.federation_name = "lp_%s" % i + print(proxy.rpc(start.to_xml(yang_model))) + + stop_time = time.time() + + print("Retrieved %s rpc in %s seconds" % (num_rpcs, stop_time - start_time)) + return (stop_time - start_time) + + +def run_federation_config_http_perf_test(num_federations=1): + session = requests.Session() + + start_time = time.time() + for i in range(1, num_federations + 1): + req = session.post( + url="http://localhost:8008/api/config", + json={"federation": {"name": "foo_%s" % i}}, + headers={'Content-Type': 'application/vnd.yang.data+json'}, + auth=('admin', 'admin') + ) + req.raise_for_status() + stop_time = time.time() + + print("Configured %s federations using restconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_opdata_get_opdata_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + print(proxy.get_from_xpath(filter_xpath="/opdata")) + pass + + stop_time = time.time() + print("Retrieved %s opdata in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_config_perf_test(proxy, num_federations=1): + start_time = time.time() + + for i in range(1, num_federations + 1): + fed = RwMcYang.FederationConfig() + fed.name = "foobar_%s" % i + proxy.merge_config(fed.to_xml(yang_model)) + + stop_time = time.time() + + print("Configured %s federations using netconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_get_config_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + proxy.get_config(filter_xpath="/federation") + + stop_time = time.time() + + print("Retrieved %s federations in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def main(): + with start_confd(): + with start_confd_client(): + nc_proxy = rift.auto.proxy.NetconfProxy() + nc_proxy.connect() + n_fed = 10; + n_fed_get = 100 + n_opdata_get = 100 + n_rpc = 100 + config_time = run_federation_config_perf_test(nc_proxy, num_federations=n_fed) + config_get_time = run_federation_get_config_perf_test(nc_proxy, num_gets=n_fed_get) + opdata_get_time = run_opdata_get_opdata_perf_test(nc_proxy, num_gets=n_opdata_get) + rpc_time = run_rpc_perf_test(nc_proxy, num_rpcs=n_rpc) + + print("") + print("..............................................") + print("CONFD Performance Results Using Netconf Client") + print("..............................................") + print("Rate of config writes: %d" % (n_fed/config_time)) + print("Rate of config reads : %d" % (n_fed_get/config_get_time)) + print("Rate of opdata reads : %d" % (n_opdata_get/opdata_get_time)) + print("Rate of rpc calls : %d" % (n_rpc/rpc_time)) + print("* Config read is reading a list with %d entries" % n_fed) + print("* Opdata read is reading a list with 5 entries") + print("..............................................") + +if __name__ == "__main__": + if "RIFT_ROOT" not in os.environ: + print("Must be in rift shell to run.") + sys.exit(1) + + os.chdir(os.environ["RIFT_INSTALL"]) + main() \ No newline at end of file diff --git a/modules/core/mano/confd_client/confd_client.sh b/modules/core/mano/confd_client/confd_client.sh new file mode 100755 index 0000000..aaa1638 --- /dev/null +++ b/modules/core/mano/confd_client/confd_client.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + +echo "Starting confd" +cd $RIFT_ROOT/.install +pwd +./usr/bin/rw_confd& + + +echo "Starting the confd_client" +cd $RIFT_ROOT/.build/modules/core/mc/src/core_mc-build/confd_client/ +./confd_client& + +echo "sleeping for 20 secs for confd to complete initialization" +sleep 20 + +cd $RIFT_ROOT/modules/core/mc/confd_client +time ./test.sh + + + +# echo "Testing confd config write performance" +# echo "Sending 20 create fedaration requests..." + +# time for i in `seq 1 20`; do curl -d '{"federation": {"name": "foobar'$i'"}}' -H 'Content-Type: application/vnd.yang.data+json' http://localhost:8008/api/running -uadmin:admin -v; done + +# echo "Testing confd config read performance" +# echo "Sending 200 read fedaration requests..." +# time for i in `seq 1 50`; do curl -s -H 'Content-Type: application/vnd.yang.data+json' http://localhost:8008/api/running/federation -uadmin:admin -v -X GET; done + +# echo "Testing confd operational data get performance" +# echo "Sending 20 create fedaration requests..." + +# time for i in `seq 1 200`; do curl -s -H "Content-Type: application/vnd.yang.data+json" http://localhost:8008/api/operational/opdata -uadmin:admin -v -X GET; done + +killall confd +trap 'kill $(jobs -pr)' SIGINT SIGTERM EXIT \ No newline at end of file diff --git a/modules/core/mano/confd_client/test.sh b/modules/core/mano/confd_client/test.sh new file mode 100755 index 0000000..938328c --- /dev/null +++ b/modules/core/mano/confd_client/test.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + +# This script tests the throughput of get operations. +# change iter and loop variables + +NETCONF_CONSOLE_DIR=${RIFT_ROOT}/.install/usr/local/confd/bin + +iter=100 +loop=30 + +for i in `seq 1 $loop`; +do + echo "Background script $i" + ${NETCONF_CONSOLE_DIR}/netconf-console-tcp -s all --iter=$iter --get -x /opdata& +done + +wait + +total=$(($iter * $loop)) +echo "Total number of netconf operations=$total" \ No newline at end of file diff --git a/modules/core/mano/examples/CMakeLists.txt b/modules/core/mano/examples/CMakeLists.txt new file mode 100644 index 0000000..ce7be69 --- /dev/null +++ b/modules/core/mano/examples/CMakeLists.txt @@ -0,0 +1,23 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 03/26/2014 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwmano_examples) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + + +## +# Include the subdirs +## +set(subdirs + ping_pong_ns + ) + +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/examples/Makefile b/modules/core/mano/examples/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/examples/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/examples/ping_pong_ns/CMakeLists.txt b/modules/core/mano/examples/ping_pong_ns/CMakeLists.txt new file mode 100644 index 0000000..5a2d6f9 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/CMakeLists.txt @@ -0,0 +1,49 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 03/26/2014 +# + +cmake_minimum_required(VERSION 2.8) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/generate_packages.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/generate_packages.sh + ESCAPE_QUOTES @ONLY + ) + +set(PACKAGE_OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_image.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_image.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_epa.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_epa.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_epa.tar.gz) + +add_custom_command( + OUTPUT ${PACKAGE_OUTPUT} + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/generate_packages.sh + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/ping_pong_nsd.py + ) + +add_custom_target(ping_pong_pkg_gen ALL + DEPENDS ${PACKAGE_OUTPUT} + mano_yang + ) + +install( + FILES ${PACKAGE_OUTPUT} + DESTINATION + usr/rift/mano/examples/ping_pong_ns + COMPONENT ${PKG_LONG_NAME} + ) + +rift_python_install_tree( + COMPONENT ${PKG_LONG_NAME} + FILES + rift/mano/examples/ping_pong_nsd.py + ) + diff --git a/modules/core/mano/examples/ping_pong_ns/Makefile b/modules/core/mano/examples/ping_pong_ns/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/examples/ping_pong_ns/config_desc.py b/modules/core/mano/examples/ping_pong_ns/config_desc.py new file mode 100755 index 0000000..2afc170 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/config_desc.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import logging +import rift.auto.proxy +import rift.vcs +import sys + +import gi +gi.require_version('RwYang', '1.0') + +from gi.repository import NsdYang, VldYang, VnfdYang, RwYang + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +model = RwYang.Model.create_libncx() +model.load_schema_ypbc(VldYang.get_schema()) +model.load_schema_ypbc(NsdYang.get_schema()) +model.load_schema_ypbc(VnfdYang.get_schema()) + + +def configure_vld(proxy, vld_xml_hdl): + vld_xml = vld_xml_hdl.read() + logger.debug("Attempting to deserialize XML into VLD protobuf: %s", vld_xml) + vld = VldYang.YangData_Vld_VldCatalog_Vld() + vld.from_xml_v2(model, vld_xml) + + logger.debug("Sending VLD to netconf: %s", vld) + proxy.merge_config(vld.to_xml_v2(model)) + + +def configure_vnfd(proxy, vnfd_xml_hdl): + vnfd_xml = vnfd_xml_hdl.read() + logger.debug("Attempting to deserialize XML into VNFD protobuf: %s", vnfd_xml) + vnfd = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd() + vnfd.from_xml_v2(model, vnfd_xml) + + logger.debug("Sending VNFD to netconf: %s", vnfd) + proxy.merge_config(vnfd.to_xml_v2(model)) + + +def configure_nsd(proxy, nsd_xml_hdl): + nsd_xml = nsd_xml_hdl.read() + logger.debug("Attempting to deserialize XML into NSD protobuf: %s", nsd_xml) + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsd.from_xml_v2(model, nsd_xml) + + logger.debug("Sending NSD to netconf: %s", nsd) + proxy.merge_config(nsd.to_xml_v2(model)) + + +def parse_args(argv=sys.argv[1:]): + """Create a parser which includes all generic demo arguments and parse args + + Arguments: + argv - arguments to be parsed + + Returns: List of parsed arguments + """ + + parser = argparse.ArgumentParser() + parser.add_argument( + '--confd-host', + default="127.0.0.1", + help="Hostname or IP where the confd netconf server is running.") + + parser.add_argument( + '--vld-xml-file', + action="append", + default=[], + type=argparse.FileType(), + help="VLD XML File Path", + ) + + parser.add_argument( + '--vnfd-xml-file', + action="append", + default=[], + type=argparse.FileType(), + help="VNFD XML File Path", + ) + + parser.add_argument( + '--nsd-xml-file', + action="append", + default=[], + type=argparse.FileType(), + help="VNFD XML File Path", + ) + + parser.add_argument( + '-v', '--verbose', + action='store_true', + help="Logging is normally set to an INFO level. When this flag " + "is used logging is set to DEBUG. ") + + args = parser.parse_args(argv) + + return args + + +def connect(args): + # Initialize Netconf Management Proxy + mgmt_proxy = rift.auto.proxy.NetconfProxy(args.confd_host) + mgmt_proxy.connect() + + # Ensure system started + vcs_component_info = rift.vcs.mgmt.VcsComponentInfo(mgmt_proxy) + vcs_component_info.wait_until_system_started() + + return mgmt_proxy + + +def main(): + args = parse_args() + proxy = connect(args) + for xml_file in args.vnfd_xml_file: + configure_vnfd(proxy, xml_file) + + for xml_file in args.vld_xml_file: + configure_vld(proxy, xml_file) + + for xml_file in args.nsd_xml_file: + configure_nsd(proxy, xml_file) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/generate_packages.sh.in b/modules/core/mano/examples/ping_pong_ns/generate_packages.sh.in new file mode 100755 index 0000000..d214b69 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/generate_packages.sh.in @@ -0,0 +1,78 @@ +#! /usr/bin/bash + +set -e +set -x + +SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@ +BINARY_DIR=@CMAKE_CURRENT_BINARY_DIR@ +PROJECT_TOP_DIR=@PROJECT_TOP_DIR@ +QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda.qcow2 +RIFT_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda.qcow2 +PONG_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2 +PING_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2 + +# These paths are needed for finding the overrides and so files +PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang + +# Remove any old directories +rm -rf ${BINARY_DIR}/ping_vnfd +rm -rf ${BINARY_DIR}/pong_vnfd +rm -rf ${BINARY_DIR}/ping_pong_nsd + +rm -rf ${BINARY_DIR}/ping_vnfd_with_image +rm -rf ${BINARY_DIR}/pong_vnfd_with_image + +rm -rf ${BINARY_DIR}/ping_vnfd_with_epa +rm -rf ${BINARY_DIR}/pong_vnfd_with_epa +rm -rf ${BINARY_DIR}/ping_pong_nsd_with_epa + + +# Generate image md5sum +ping_md5sum="$(md5sum ${PING_QCOW_IMAGE} | cut -f1 -d" ")" +pong_md5sum="$(md5sum ${PONG_QCOW_IMAGE} | cut -f1 -d" ")" + +# Generate the descriptors +${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR} --format=json --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum} + +# create directories for packages with images +cp -r ${BINARY_DIR}/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_image +cp -r ${BINARY_DIR}/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_image +mkdir -p ${BINARY_DIR}/ping_vnfd_with_image/images +mkdir -p ${BINARY_DIR}/pong_vnfd_with_image/images + +### Generate descriptors with EPA +${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_epa --format=json --epa --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum} + +### Move the generated artifacts to appropriate directories +mv ${BINARY_DIR}/with_epa/ping_vnfd_with_epa ${BINARY_DIR}/ping_vnfd_with_epa +mv ${BINARY_DIR}/with_epa/pong_vnfd_with_epa ${BINARY_DIR}/pong_vnfd_with_epa +mv ${BINARY_DIR}/with_epa/ping_pong_nsd_with_epa ${BINARY_DIR}/ping_pong_nsd_with_epa + +### ReMove the original directories +rm -rf ${BINARY_DIR}/with_epa + +# copy a dummy image for now +if [ -e ${PING_QCOW_IMAGE} ]; then + cp ${PING_QCOW_IMAGE} ${BINARY_DIR}/ping_vnfd_with_image/images/ + ${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_image +else + echo >&2 "Warn: Skipped creating ping_vnfd_with_image due to missing image: ${PING_QCOW_IMAGE}" +fi + +if [ -e ${PONG_QCOW_IMAGE} ]; then + cp ${PONG_QCOW_IMAGE} ${BINARY_DIR}/pong_vnfd_with_image/images/ + ${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_image +else + echo >&2 "Warn: Skipped creating pong_vnfd_with_image due to missing image: ${PONG_QCOW_IMAGE}" +fi + +# Generate the tar files +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd + +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_epa +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_epa +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_epa diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/__init__.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/__init__.py new file mode 100644 index 0000000..e57e943 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.py new file mode 100644 index 0000000..9892643 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.py @@ -0,0 +1,314 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +from datetime import date +import logging +import json +import socket +import threading +import time + +import tornado.web + +from util.util import get_url_target + +class Ping(object): + def __init__(self): + self._log = logging.getLogger("ping") + self._log.setLevel(logging.DEBUG) + + self._ping_count = 0; + self._request_count = 0; + self._response_count = 0; + + self._pong_ip = None + self._pong_port = None + + self._send_rate = 1 # per second + + self._close_lock = threading.Lock() + + self._enabled = False + self._socket = None + + @property + def rate(self): + return self._send_rate + + @rate.setter + def rate(self, value): + self._log.debug("new rate: %s" % value) + self._send_rate = value + + @property + def pong_port(self): + return self._pong_port + + @pong_port.setter + def pong_port(self, value): + self._log.debug("new pong port: %s" % value) + self._pong_port = value + + @property + def pong_ip(self): + return self._pong_ip + + @pong_ip.setter + def pong_ip(self, value): + + self._log.debug("new pong ip: %s" % value) + self._pong_ip = value + + @property + def enabled(self): + return self._enabled + + @property + def request_count(self): + return self._request_count + + @property + def response_count(self): + return self._response_count + + def start(self): + self._log.debug("starting") + self._enabled = True + # self.open_socket() + self.send_thread = threading.Thread(target=self.send_ping) + self.recv_thread = threading.Thread(target=self.recv_resp) + self.send_thread.start() + self.recv_thread.start() + + def stop(self): + self._log.debug("stopping") + self._enabled = False + self.close_socket("stopping") + + def close_socket(self, msg): + self._close_lock.acquire() + if self._socket != None: + self._socket.close() + self._socket = None + self._log.info("Closed socket with msg={}".format(msg)) + self._close_lock.release() + + def open_socket(self): + try: + self._log.debug("construct socket") + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._socket.settimeout(1) + except socket.error as msg: + self._log.error("error constructing socket %s" % msg) + self._socket = None + + while self._enabled: + try: + self._log.info("Trying to connect....") + self._socket.connect((self.pong_ip, self.pong_port)) + self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + self._log.info("Socket connected") + break + except socket.error as msg: + time.sleep(1) + + + def send_ping(self): + self.open_socket() + + while self._enabled: + if self._socket != None: + req = "rwpingpong-{}".format(self._ping_count) + try: + self._log.info("sending: %s" %req) + self._socket.sendall(req) + self._ping_count += 1 + self._request_count += 1 + except socket.error as msg: + self._log.error("Error({}) sending data".format(msg)) + self.close_socket(msg) + return + + time.sleep(1.0/self._send_rate) + + self._log.info("Stopping send_ping") + + def recv_resp(self): + while self._enabled: + respb = None + if self._socket != None: + try: + respb = self._socket.recv(1024) + except socket.timeout: + continue + except socket.error as msg: + self._log.error("Error({}) receiving data".format(msg)) + time.sleep(1) + continue + # self.close_socket(msg) + # return + + if not respb: + continue + + resp = respb.decode('UTF-8') + self._response_count += 1 + self._log.info("receive: %s" % resp) + + self._log.info("Stopping recv_resp") + +class PingServerHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self, args): + response = {'ip': self._ping_instance.pong_ip, + 'port': self._ping_instance.pong_port} + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json 2") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json 1") + self.set_status(405) + return + + if target == "server": + if type(json_dicts['port']) is not int: + self.set_status(405) + return + + if type(json_dicts['ip']) not in (str, unicode): + self.set_status(405) + return + + self._ping_instance.pong_ip = json_dicts['ip'] + self._ping_instance.pong_port = json_dicts['port'] + + else: + self.set_status(404) + return + + self.set_status(200) + +class PingAdminStatusHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self, args): + target = get_url_target(self.request.uri) + if target == "state": + value = "enabled" if self._ping_instance.enabled else "disabled" + + response = { 'adminstatus': value } + else: + self.set_status(404) + return + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json 2") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json 1") + self.set_status(405) + return + + if target == "state": + if type(json_dicts['enable']) is not bool: + self.set_status(405) + return + + if json_dicts['enable']: + if not self._ping_instance.enabled: + self._ping_instance.start() + else: + self._ping_instance.stop() + + else: + self.set_status(404) + return + + self.set_status(200) + +class PingStatsHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self): + response = {'ping-request-tx-count': self._ping_instance.request_count, + 'ping-response-rx-count': self._ping_instance.response_count} + + self.write(response) + +class PingRateHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self, args): + response = { 'rate': self._ping_instance.rate } + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.set_status(405) + return + + if target == "rate": + if type(json_dicts['rate']) is not int: + self.set_status(405) + return + + self._ping_instance.rate = json_dicts['rate'] + else: + self.set_status(404) + return + + self.set_status(200) \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.service b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.service new file mode 100644 index 0000000..cd0ac65 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.service @@ -0,0 +1,12 @@ +[Unit] +Description=Ping Client +After=syslog.target network.target + +[Service] +Type=simple +ExecStart=/opt/rift/ping_pong_ns/start_ping + +[Install] +WantedBy=multi-user.target + + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.py new file mode 100644 index 0000000..ea8f552 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.py @@ -0,0 +1,334 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +from datetime import date +from Queue import Queue +import logging +import json +import socket +import threading +import time + +import tornado.web + +from util.util import get_url_target + +class Stats(object): + def __init__(self): + self._request_count = 0 + self._response_count = 0 + + self._lock = threading.Lock() + + @property + def request_count(self): + with self._lock: + return self._request_count + + @request_count.setter + def request_count(self, value): + with self._lock: + self._request_count = value + + @property + def response_count(self): + with self._lock: + return self._response_count + + @response_count.setter + def response_count(self, value): + with self._lock: + self._response_count = value + +class Worker(threading.Thread): + def __init__(self, log, connections, stats): + super(Worker, self).__init__() + self._log = log + self._connections = connections + self._stats = stats + + self._running = True + + self._lock = threading.Lock() + + @property + def running(self): + return self._running + + @running.setter + def running(self, value): + self._running = value + + def run(self): + while self.running: + try: + connection = self._connections.get_nowait() + except: + continue + + try: + req = connection.recv(1024) + except socket.error as msg: + self._log.error("error with connection read: " % msg) + self._connections.put(connection) + continue + + if not req: + self._connections.put(connection) + continue + + resp = req.decode('UTF-8') + self._log.debug("got: %s", resp) + + self._stats.request_count += 1 + + try: + connection.sendall(resp) + self._stats.response_count += 1 + except socket.error as msg: + self._log.error("error with connection read: " % msg) + self._connections.put(connection) + continue + + self._connections.put(connection) + +class Pong(object): + def __init__(self, worker_count=5): + self._log = logging.getLogger("pong") + self._log.setLevel(logging.DEBUG) + + self.listen_ip = None + self.listen_port = None + + self._lock = threading.Lock() + + self._connections = Queue() + + self._stats = Stats() + + self._workers = list() + + self._enabled = False + + for _ in range(worker_count): + self._workers.append(Worker(self._log, self._connections, self._stats)) + + @property + def listen_port(self): + return self._listen_port + + @listen_port.setter + def listen_port(self, value): + self._log.debug("new listen port: %s" % value) + self._listen_port = value + + @property + def listen_ip(self): + return self._listen_ip + + @listen_ip.setter + def listen_ip(self, value): + self._log.debug("listen pong ip: %s" % value) + self._listen_ip = value + + + @property + def enabled(self): + with self._lock: + return self._enabled + + @property + def request_count(self): + return self._stats.request_count + + @property + def response_count(self): + return self._stats.response_count + + def start(self): + self._log.debug("starting") + self._enabled = True + self.listener_thread = threading.Thread(target=self._listen) + self.listener_thread.start() + for worker in self._workers: + worker.start() + + def stop(self): + with self._lock: + self._enabled = False + + self._log.debug("stopping workers") + for worker in self._workers: + worker.running = False + + self._log.debug("joining on workers") + for worker in self._workers: + if worker.is_alive(): + worker.join() + + while self._connections.full(): + try: + connection = self._connections.get_nowait() + connection.close() + except: + pass + + def close_socket(self, msg): + with self._lock: + if self._socket != None: + self._socket.shutdown(socket.SHUT_RD) + self._socket.close() + self._socket = None + self._log.info("Closed socket with msg={}".format(msg)) + + def _listen(self): + if self._listen_ip is None or self.listen_port is None: + self._log.error("address not properly configured to listen") + return + + self._log.info("listen for incomming connections") + try: + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # self._socket.bind((self.listen_ip, self.listen_port)) + self._socket.bind(("0.0.0.0", self.listen_port)) + self._socket.settimeout(1) + + while self.enabled: + + try: + self._socket.listen(1) + connection, address = self._socket.accept() + except socket.timeout: + continue + self._log.info("Accepted connection from {}".format(address)) + + self._connections.put(connection) + else: + self.stop() + except socket.error as msg: + self.close_socket(msg) + +class PongStatsHandler(tornado.web.RequestHandler): + def initialize(self, pong_instance): + self._pong_instance = pong_instance + + def get(self): + response = {'ping-request-rx-count': self._pong_instance.request_count, + 'ping-response-tx-count': self._pong_instance.response_count} + + self.write(response) + + +class PongServerHandler(tornado.web.RequestHandler): + def initialize(self, pong_instance): + self._pong_instance = pong_instance + + def get(self, args): + response = {'ip': self._pong_instance.listen_ip, + 'port': self._pong_instance.listen_port} + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + if target == "server": + + if type(json_dicts['port']) is not int: + self.set_status(405) + return + + if type(json_dicts['ip']) not in (str, unicode): + self.set_status(405) + return + + self._pong_instance.listen_ip = json_dicts['ip'] + self._pong_instance.listen_port = json_dicts['port'] + + else: + self.set_status(404) + return + + self.set_status(200) + +class PongAdminStatusHandler(tornado.web.RequestHandler): + def initialize(self, pong_instance): + self._pong_instance = pong_instance + + def get(self, args): + target = get_url_target(self.request.uri) + + if target == "state": + value = "enabled" if self._pong_instance.enabled else "disabled" + + response = { 'adminstatus': value } + else: + self.set_status(404) + return + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + if target == "state": + if type(json_dicts['enable']) is not bool: + self.set_status(405) + return + + if json_dicts['enable']: + if not self._pong_instance.enabled: + self._pong_instance.start() + else: + self._pong_instance.stop() + + else: + self.set_status(404) + return + + self.set_status(200) \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.service b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.service new file mode 100644 index 0000000..7d94836 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.service @@ -0,0 +1,12 @@ +[Unit] +Description=Ping Client +After=syslog.target network.target + +[Service] +Type=simple +ExecStart=/opt/rift/ping_pong_ns/start_pong + +[Install] +WantedBy=multi-user.target + + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh new file mode 100755 index 0000000..fc34710 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh @@ -0,0 +1,139 @@ +#! /bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# +# Author(s): Anil Gunturu +# Creation Date: 07/24/2014 +# + +## +# This script is used to copy the riftware software into the qcow image +# This script must be run on the grunt machine as root +## + +set -x +set -e + +if ! [ $# -eq 1 ]; then + echo "Usage: $0 " + echo " Example:" + echo " $0 /net/boson/home1/agunturu/lepton/atg/modules/core/mc/examples/ping_pong_ns" + exit 1 +fi + +# Currently returning 0 on error as this script fails in Bangalore +# systems and causes the jenkins spot_debug to fail +function cleanup { + if [ "$(ls -A $MOUNT_PT)" ]; then + guestunmount $MOUNT_PT + fi + exit 0 +} +trap cleanup EXIT + +MOUNT_PT=ping_pong/mnt$$ + +if [ -d $MOUNT_PT ]; then + echo "ping_pong_mnt directory exists - deleting..!!" + guestunmount $MOUNT_PT || true + rm -rf ping_pong +fi + +mkdir -p $MOUNT_PT +FC20QCOW=Fedora-x86_64-20-20131211.1-sda.qcow2 +PINGQCOW=Fedora-x86_64-20-20131211.1-sda-ping.qcow2 +PONGQCOW=Fedora-x86_64-20-20131211.1-sda-pong.qcow2 + +if [ ! -e ${RIFT_ROOT}/images/${FC20QCOW} ]; then + echo >&2 "Warn: Cannot prepare ping_pong qcow due to missing FC20 image: ${RIFT_ROOT}/images/${FC20QCOW}" + exit 0 +fi + +echo "Copying $FC20QCOW" +cp ${RIFT_ROOT}/images/${FC20QCOW} ping_pong/${PINGQCOW} +chmod +w ping_pong/${PINGQCOW} +cp ${RIFT_ROOT}/images/${FC20QCOW} ping_pong/${PONGQCOW} +chmod +w ping_pong/${PONGQCOW} + +CURRENT_DIR=$PWD +echo "Mounting guestfs for $PINGQCOW" +guestmount -a ping_pong/$PINGQCOW -m /dev/sda1 $MOUNT_PT + +echo "Setting up resolv.conf" +# removed RIFT.io lab-centric setup in RIFT-11991 +#echo "search lab.riftio.com eng.riftio.com riftio.com" > $MOUNT_PT/etc/resolv.conf +#echo "nameserver 10.64.1.3" >> $MOUNT_PT/etc/resolv.conf +#echo "PEERDNS=no" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 + +# add a valid DNS server just in case +echo "nameserver 8.8.8.8" > $MOUNT_PT/etc/resolv.conf +echo "DEFROUTE=yes" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 + +for i in 1 2 +do + cat <> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth$i +DEVICE="eth$i" +BOOTPROTO="dhcp" +ONBOOT="no" +TYPE="Ethernet" +DEFROUTE=no +PEERDNS=no +EOF +done + + +echo "Copying ping/pong ns..." +cd $MOUNT_PT/opt +mkdir rift +cd rift +cp -r $1 . +cd $CURRENT_DIR +mv $MOUNT_PT/opt/rift/ping_pong_ns/ping.service $MOUNT_PT/etc/systemd/system +cp -ar /usr/lib/python2.7/site-packages/tornado $MOUNT_PT/usr/lib/python2.7/site-packages/ +guestunmount $MOUNT_PT + +echo "Mounting guestfs for $PINGQCOW" +guestmount -a ping_pong/$PONGQCOW -m /dev/sda1 $MOUNT_PT + +echo "Setting up resolv.conf" +echo "search lab.riftio.com eng.riftio.com riftio.com" > $MOUNT_PT/etc/resolv.conf +echo "nameserver 10.64.1.3" >> $MOUNT_PT/etc/resolv.conf +echo "PEERDNS=no" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 +echo "DEFROUTE=yes" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 + +for i in 1 2 +do + cat <> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth$i +DEVICE="eth$i" +BOOTPROTO="dhcp" +ONBOOT="no" +DEFROUTE=no +TYPE="Ethernet" +PEERDNS=no +EOF +done + +echo "Copying ping/pong ns..." +cd $MOUNT_PT/opt +mkdir rift +cd rift +cp -r $1 . +cd $CURRENT_DIR +cp -ar /usr/lib/python2.7/site-packages/tornado $MOUNT_PT/usr/lib/python2.7/site-packages/ +mv $MOUNT_PT/opt/rift/ping_pong_ns/pong.service $MOUNT_PT/etc/systemd/system +guestunmount $MOUNT_PT \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping new file mode 100755 index 0000000..fb29422 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping @@ -0,0 +1,5 @@ +#!/bin/bash +ulimit -c 0 +#yum install -y python-tornado +python /opt/rift/ping_pong_ns/start_ping.py 2>&1 | logger + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping.py new file mode 100644 index 0000000..90400dd --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import argparse +import signal +import logging + +import tornado +import tornado.httpserver + +from ping import ( + Ping, + PingAdminStatusHandler, + PingServerHandler, + PingRateHandler, + PingStatsHandler, +) +from util.util import ( + VersionHandler, +) + +logging.basicConfig(level=logging.DEBUG, + format='(%(threadName)-10s) %(name)-8s :: %(message)s', +) + +def main(): + log = logging.getLogger("main") + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "--ping-manager-port", + required=False, + default="18888", + help="port number for ping") + + arguments = parser.parse_args() + + # setup application + log.debug("setup application") + ping_instance = Ping() + ping_application_arguments = {'ping_instance': ping_instance} + ping_application = tornado.web.Application([ + (r"/api/v1/ping/stats", PingStatsHandler, ping_application_arguments), + (r"/api/v1/ping/adminstatus/([a-z]+)", PingAdminStatusHandler, ping_application_arguments), + (r"/api/v1/ping/server/?([0-9a-z\.]*)", PingServerHandler, ping_application_arguments), + (r"/api/v1/ping/rate/?([0-9]*)", PingRateHandler, ping_application_arguments), + (r"/version", VersionHandler, ping_application_arguments) + ]) + ping_server = tornado.httpserver.HTTPServer( + ping_application) + + # setup SIGINT handler + log.debug("setup SIGINT handler") + def signal_handler(signal, frame): + print("") # print newline to clear user input + log.info("Exiting") + ping_instance.stop() + ping_server.stop() + log.info("Sayonara!") + quit() + + signal.signal(signal.SIGINT, signal_handler) + + # start + log.debug("start") + try: + ping_server.listen(arguments.ping_manager_port) + except OSError: + print("port %s is already is use, exiting" % arguments.ping_manager_port) + return + + tornado.ioloop.IOLoop.instance().start() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong new file mode 100755 index 0000000..af46646 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong @@ -0,0 +1,5 @@ +#!/bin/bash +ulimit -c 0 +#yum install -y python-tornado +python /opt/rift/ping_pong_ns/start_pong.py 2>&1 | logger + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong.py new file mode 100644 index 0000000..ba0c9b7 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import argparse +import signal +import logging + +import tornado +import tornado.httpserver + +from pong import ( + Pong, + PongAdminStatusHandler, + PongServerHandler, + PongStatsHandler, +) +from util.util import ( + VersionHandler, +) + +logging.basicConfig(level=logging.DEBUG, + format='(%(threadName)-10s) %(name)-8s :: %(message)s', +) + +def main(): + log = logging.getLogger("main") + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "--pong-manager-port", + required=False, + default="18889", + help="port number for pong") + parser.add_argument( + "--worker-count", + required=False, + default=5, + help="ip address of pong") + + arguments = parser.parse_args() + + # setup application + log.debug("setup application") + pong_instance = Pong(arguments.worker_count) + pong_application_arguments = {'pong_instance': pong_instance} + pong_application = tornado.web.Application([ + (r"/version", VersionHandler, pong_application_arguments), + (r"/api/v1/pong/stats", PongStatsHandler, pong_application_arguments), + (r"/api/v1/pong/server/?([0-9a-z\.]*)", PongServerHandler, pong_application_arguments), + (r"/api/v1/pong/adminstatus/([a-z]+)", PongAdminStatusHandler, pong_application_arguments) + ]) + pong_server = tornado.httpserver.HTTPServer( + pong_application) + + # setup SIGINT handler + log.debug("setup SIGINT handler") + def signal_handler(signal, frame): + print("") # print newline to clear user input + log.info("Exiting") + pong_instance.stop() + pong_server.stop() + log.info("Sayonara!") + quit() + + signal.signal(signal.SIGINT, signal_handler) + + # start + log.debug("pong application listening on %s" % arguments.pong_manager_port) + try: + pong_server.listen(arguments.pong_manager_port) + except OSError: + print("port %s is already is use, exiting" % arguments.ping_manager_port) + return + tornado.ioloop.IOLoop.instance().start() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/test/test.sh b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/test/test.sh new file mode 100644 index 0000000..8bd480f --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/test/test.sh @@ -0,0 +1,151 @@ +#!/usr/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +pong_ip='localhost' +pong_port=18889 + +ping_ip='localhost' +ping_port=18888 + +if [ "$1" == "pong" ]; +then + if [ "$2" == "enable" ]; + then + echo "enable pong" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state + fi + if [ "$2" == "disable" ]; + then + echo "disable pong" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":false}" \ + http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state + fi + + if [ "$2" == "server" ]; + then + echo "set server" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$3\", \"port\":$4}" \ + http://${pong_ip}:${pong_port}/api/v1/pong/server + fi + + echo "" +fi + +if [ "$1" == "ping" ]; +then + if [ "$2" == "enable" ]; + then + echo "enable ping" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state + fi + if [ "$2" == "disable" ]; + then + echo "disable ping" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":false}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state + fi + echo "" + + if [ "$2" == "rate" ]; + then + echo "disable ping" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"rate\":$3}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/rate + fi + echo "" + + if [ "$2" == "server" ]; + then + echo "set server" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$3\", \"port\":$4}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/server + fi + echo "" + + +fi + +if [ "$1" == "stats" ]; +then + echo "ping stats:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/stats + echo "" + + echo "pong stats:" + curl http://${pong_ip}:${pong_port}/api/v1/pong/stats + echo "" +fi + +if [ "$1" == "config" ]; +then + echo "ping server:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/server + echo "" + echo "ping rate:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/rate + echo "" + echo "ping admin status:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state + echo "" + echo "pong server:" + curl http://${pong_ip}:${pong_port}/api/v1/pong/server + echo "" + echo "pong admin status:" + curl http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state + echo "" +fi \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/user-data b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/user-data new file mode 100644 index 0000000..9bf1d5b --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/user-data @@ -0,0 +1,8 @@ +#cloud-config +password: fedora +chpasswd: { expire: False } +ssh_pwauth: True +runcmd: + - [ systemctl, daemon-reload ] + - [ systemctl, enable, ping.service ] + - [ systemctl, start, --no-block, ping.service ] diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/__init__.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/__init__.py new file mode 100644 index 0000000..e57e943 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/util.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/util.py new file mode 100644 index 0000000..1d35ae5 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/util.py @@ -0,0 +1,40 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +from datetime import date +import urlparse + +import tornado.web + +class VersionHandler(tornado.web.RequestHandler): + def initialize(self, instance): + self._instance = instance + + def get(self): + response = { 'version': '3.5.1', + 'last_build': date.today().isoformat() } + self.write(response) + +def get_url_target(url): + is_operation = False + url_parts = urlparse.urlsplit(url) + whole_url = url_parts[2] + + url_pieces = whole_url.split("/") + + return url_pieces[-1] \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_nsd.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_nsd.py new file mode 120000 index 0000000..3147ac8 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_nsd.py @@ -0,0 +1 @@ +rift/mano/examples/ping_pong_nsd.py \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/rift/mano/__init__.py b/modules/core/mano/examples/ping_pong_ns/rift/mano/__init__.py new file mode 100644 index 0000000..00f74ea --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/rift/mano/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/__init__.py b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/__init__.py new file mode 100644 index 0000000..00f74ea --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py new file mode 100755 index 0000000..e7bd172 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py @@ -0,0 +1,588 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import sys +import os +import argparse +import uuid +import rift.vcs.component as vcs + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('VnfdYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwNsdYang', '1.0') + + + +from gi.repository import ( + NsdYang, + VldYang, + VnfdYang, + RwNsdYang, + RwVnfdYang, + RwYang, + ) + +NUM_PING_INSTANCES = 1 +MAX_VNF_INSTANCES_PER_NS = 10 +use_epa = False +pingcount = NUM_PING_INSTANCES + +PING_USERDATA_FILE = '''#cloud-config +password: fedora +chpasswd: { expire: False } +ssh_pwauth: True +runcmd: + - [ systemctl, daemon-reload ] + - [ systemctl, enable, ping.service ] + - [ systemctl, start, --no-block, ping.service ] + - [ ifup, eth1 ] +''' + +PONG_USERDATA_FILE = '''#cloud-config +password: fedora +chpasswd: { expire: False } +ssh_pwauth: True +runcmd: + - [ systemctl, daemon-reload ] + - [ systemctl, enable, pong.service ] + - [ systemctl, start, --no-block, pong.service ] + - [ ifup, eth1 ] +''' + + +class UnknownVNFError(Exception): + pass + +class ManoDescriptor(object): + def __init__(self, name): + self.name = name + self.descriptor = None + + def write_to_file(self, module_list, outdir, output_format): + model = RwYang.Model.create_libncx() + for module in module_list: + model.load_module(module) + + if output_format == 'json': + with open('%s/%s.json' % (outdir, self.name), "w") as fh: + fh.write(self.descriptor.to_json(model)) + elif output_format.strip() == 'xml': + with open('%s/%s.xml' % (outdir, self.name), "w") as fh: + fh.write(self.descriptor.to_xml_v2(model, pretty_print=True)) + else: + raise("Invalid output format for the descriptor") + +class VirtualNetworkFunction(ManoDescriptor): + def __init__(self, name, instance_count=1): + self.vnfd_catalog = None + self.vnfd = None + self.instance_count = instance_count + super(VirtualNetworkFunction, self).__init__(name) + + def compose(self, image_name, cloud_init="", endpoint=None, mon_params=[], + mon_port=8888, mgmt_port=8888, num_vlr_count=1, num_ivlr_count=1, + num_vms=1, image_md5sum=None): + self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog() + self.id = str(uuid.uuid1()) + vnfd = self.descriptor.vnfd.add() + vnfd.id = self.id + vnfd.name = self.name + vnfd.short_name = self.name + vnfd.vendor = 'RIFT.io' + vnfd.description = 'This is an example RIFT.ware VNF' + vnfd.version = '1.0' + + self.vnfd = vnfd + + internal_vlds = [] + for i in range(num_ivlr_count): + internal_vld = vnfd.internal_vld.add() + internal_vld.id = str(uuid.uuid1()) + internal_vld.name = 'fabric%s' % i + internal_vld.short_name = 'fabric%s' % i + internal_vld.description = 'Virtual link for internal fabric%s' % i + internal_vld.type_yang = 'ELAN' + internal_vlds.append(internal_vld) + + for i in range(num_vlr_count): + cp = vnfd.connection_point.add() + cp.type_yang = 'VPORT' + cp.name = '%s/cp%d' % (self.name, i) + + if endpoint is not None: + endp = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_HttpEndpoint( + path=endpoint, port=mon_port, polling_interval_secs=2 + ) + vnfd.http_endpoint.append(endp) + + # Monitoring params + for monp_dict in mon_params: + monp = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_MonitoringParam.from_dict(monp_dict) + monp.http_endpoint_ref = endpoint + vnfd.monitoring_param.append(monp) + + for i in range(num_vms): + # VDU Specification + vdu = vnfd.vdu.add() + vdu.id = str(uuid.uuid1()) + vdu.name = 'iovdu_%s' % i + vdu.count = 1 + #vdu.mgmt_vpci = '0000:00:20.0' + + # specify the VM flavor + if use_epa: + vdu.vm_flavor.vcpu_count = 4 + vdu.vm_flavor.memory_mb = 1024 + vdu.vm_flavor.storage_gb = 4 + else: + vdu.vm_flavor.vcpu_count = 1 + vdu.vm_flavor.memory_mb = 512 + vdu.vm_flavor.storage_gb = 4 + + # Management interface + mgmt_intf = vnfd.mgmt_interface + mgmt_intf.vdu_id = vdu.id + mgmt_intf.port = mgmt_port + mgmt_intf.dashboard_params.path = "/api/v1/pong/stats" + + vdu.cloud_init = cloud_init + + # sepcify the guest EPA + if use_epa: + vdu.guest_epa.trusted_execution = False + vdu.guest_epa.mempage_size = 'LARGE' + vdu.guest_epa.cpu_pinning_policy = 'DEDICATED' + vdu.guest_epa.cpu_thread_pinning_policy = 'PREFER' + vdu.guest_epa.numa_node_policy.node_cnt = 2 + vdu.guest_epa.numa_node_policy.mem_policy = 'STRICT' + + node = vdu.guest_epa.numa_node_policy.node.add() + node.id = 0 + node.memory_mb = 512 + node.vcpu = [0, 1] + + node = vdu.guest_epa.numa_node_policy.node.add() + node.id = 1 + node.memory_mb = 512 + node.vcpu = [2, 3] + + # specify the vswitch EPA + vdu.vswitch_epa.ovs_acceleration = 'DISABLED' + vdu.vswitch_epa.ovs_offload = 'DISABLED' + + # Specify the hypervisor EPA + vdu.hypervisor_epa.type_yang = 'PREFER_KVM' + + # Specify the host EPA + vdu.host_epa.cpu_model = 'PREFER_SANDYBRIDGE' + vdu.host_epa.cpu_arch = 'PREFER_X86_64' + vdu.host_epa.cpu_vendor = 'PREFER_INTEL' + vdu.host_epa.cpu_socket_count = 'PREFER_TWO' + vdu.host_epa.cpu_feature = ['PREFER_AES', 'PREFER_CAT'] + + vdu.image = image_name + if image_md5sum is not None: + vdu.image_checksum = image_md5sum + + for i in range(num_ivlr_count): + internal_cp = vdu.internal_connection_point.add() + internal_cp.id = str(uuid.uuid1()) + internal_cp.type_yang = 'VPORT' + internal_vlds[i].internal_connection_point_ref.append(internal_cp.id) + + internal_interface = vdu.internal_interface.add() + internal_interface.name = 'fab%d' % i + internal_interface.vdu_internal_connection_point_ref = internal_cp.id + internal_interface.virtual_interface.type_yang = 'VIRTIO' + + #internal_interface.virtual_interface.vpci = '0000:00:1%d.0'%i + + for i in range(num_vlr_count): + external_interface = vdu.external_interface.add() + external_interface.name = 'eth%d' % i + external_interface.vnfd_connection_point_ref = '%s/cp%d' % (self.name, i) + if use_epa: + external_interface.virtual_interface.type_yang = 'VIRTIO' + else: + external_interface.virtual_interface.type_yang = 'VIRTIO' + #external_interface.virtual_interface.vpci = '0000:00:2%d.0'%i + + def write_to_file(self, outdir, output_format): + dirpath = "%s/%s/vnfd" % (outdir, self.name) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + super(VirtualNetworkFunction, self).write_to_file(['vnfd', 'rw-vnfd'], + "%s/%s/vnfd" % (outdir, self.name), + output_format) + +class NetworkService(ManoDescriptor): + def __init__(self, name): + super(NetworkService, self).__init__(name) + + def ping_config(self): + suffix = '' + if use_epa: + suffix = '_with_epa' + ping_cfg = r''' +#!/usr/bin/bash + +# Rest API config +ping_mgmt_ip='' +ping_mgmt_port=18888 + +# VNF specific configuration +pong_server_ip='' +ping_rate=5 +server_port=5555 + +# Make rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server info for ping!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"rate\":$ping_rate}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set ping rate!" + exit $rc +fi + +output=$(curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state) +if [[ $output == *"Internal Server Error"* ]] +then + echo $output + exit 3 +else + echo $output +fi + + +exit 0 + ''' % suffix + return ping_cfg + + def pong_config(self): + suffix = '' + if use_epa: + suffix = '_with_epa' + pong_cfg = r''' +#!/usr/bin/bash + +# Rest API configuration +pong_mgmt_ip='' +pong_mgmt_port=18889 +# username= +# password= + +# VNF specific configuration +pong_server_ip='' +server_port=5555 + +# Make Rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server(own) info for pong!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/adminstatus/state +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to enable pong service!" + exit $rc +fi + +exit 0 + ''' % suffix + return pong_cfg + + def default_config(self, const_vnfd, vnfd): + vnf_config = const_vnfd.vnf_configuration + + vnf_config.input_params.config_priority = 0 + vnf_config.input_params.config_delay = 0 + + # Select "script" configuration + vnf_config.config_type = 'script' + vnf_config.script.script_type = 'bash' + + if vnfd.name == 'pong_vnfd' or vnfd.name == 'pong_vnfd_with_epa': + vnf_config.input_params.config_priority = 1 + # First priority config delay will delay the entire NS config delay + vnf_config.input_params.config_delay = 60 + vnf_config.config_template = self.pong_config() + if vnfd.name == 'ping_vnfd' or vnfd.name == 'ping_vnfd_with_epa': + vnf_config.input_params.config_priority = 2 + vnf_config.config_template = self.ping_config() + ## Remove this - test only + ## vnf_config.config_access.mgmt_ip_address = '1.1.1.1' + + print("### TBR ###", vnfd.name, "vng_config = ", vnf_config) + + def compose(self, vnfd_list, cpgroup_list): + self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog() + self.id = str(uuid.uuid1()) + nsd = self.descriptor.nsd.add() + nsd.id = self.id + nsd.name = self.name + nsd.short_name = self.name + nsd.vendor = 'RIFT.io' + nsd.description = 'Toy NS' + nsd.version = '1.0' + nsd.input_parameter_xpath.append( + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:vendor", + ) + ) + + for cpgroup in cpgroup_list: + vld = nsd.vld.add() + vld.id = str(uuid.uuid1()) + vld.name = 'ping_pong_vld' #hard coded + vld.short_name = vld.name + vld.vendor = 'RIFT.io' + vld.description = 'Toy VL' + vld.version = '1.0' + vld.type_yang = 'ELAN' + + for cp in cpgroup: + cpref = vld.vnfd_connection_point_ref.add() + cpref.member_vnf_index_ref = cp[0] + cpref.vnfd_id_ref = cp[1] + cpref.vnfd_connection_point_ref = cp[2] + + member_vnf_index = 1 + for vnfd in vnfd_list: + for i in range(vnfd.instance_count): + constituent_vnfd = nsd.constituent_vnfd.add() + constituent_vnfd.member_vnf_index = member_vnf_index + + constituent_vnfd.vnfd_id_ref = vnfd.descriptor.vnfd[0].id + self.default_config(constituent_vnfd, vnfd) + member_vnf_index += 1 + + def write_to_file(self, outdir, output_format): + dirpath = "%s/%s/nsd" % (outdir, self.name) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + super(NetworkService, self).write_to_file(["nsd", "rw-nsd"], + "%s/%s/nsd" % (outdir, self.name), + output_format) + + +def get_ping_mon_params(path): + return [ + { + 'id': '1', + 'name': 'ping-request-tx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + + { + 'id': '2', + 'name': 'ping-response-rx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping responses', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + ] + + +def get_pong_mon_params(path): + return [ + { + 'id': '1', + 'name': 'ping-request-rx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + + { + 'id': '2', + 'name': 'ping-response-tx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping responses', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + ] + +def generate_ping_pong_descriptors(fmt="json", + write_to_file=False, + out_dir="./", + pingcount=NUM_PING_INSTANCES, + external_vlr_count=1, + internal_vlr_count=0, + num_vnf_vms=1, + ping_md5sum=None, + pong_md5sum=None, + ): + # List of connection point groups + # Each connection point group refers to a virtual link + # the CP group consists of tuples of connection points + cpgroup_list = [] + for i in range(external_vlr_count): + cpgroup_list.append([]) + + if use_epa: + suffix = '_with_epa' + else: + suffix = '' + + ping = VirtualNetworkFunction("ping_vnfd%s" % (suffix), pingcount) + #ping = VirtualNetworkFunction("ping_vnfd", pingcount) + ping.compose( + "Fedora-x86_64-20-20131211.1-sda-ping.qcow2", + PING_USERDATA_FILE, + "api/v1/ping/stats", + get_ping_mon_params("api/v1/ping/stats"), + mon_port=18888, + mgmt_port=18888, + num_vlr_count=external_vlr_count, + num_ivlr_count=internal_vlr_count, + num_vms=num_vnf_vms, + image_md5sum=ping_md5sum, + ) + + pong = VirtualNetworkFunction("pong_vnfd%s" % (suffix)) + #pong = VirtualNetworkFunction("pong_vnfd") + pong.compose( + "Fedora-x86_64-20-20131211.1-sda-pong.qcow2", + PONG_USERDATA_FILE, + "api/v1/pong/stats", + get_pong_mon_params("api/v1/pong/stats"), + mon_port=18889, + mgmt_port=18889, + num_vlr_count=external_vlr_count, + num_ivlr_count=internal_vlr_count, + num_vms=num_vnf_vms, + image_md5sum=pong_md5sum, + ) + + # Initialize the member VNF index + member_vnf_index = 1 + + # define the connection point groups + for index, cp_group in enumerate(cpgroup_list): + desc_id = ping.descriptor.vnfd[0].id + filename = 'ping_vnfd{}/cp{}'.format(suffix, index) + + for idx in range(pingcount): + cp_group.append(( + member_vnf_index, + desc_id, + filename, + )) + + member_vnf_index += 1 + + desc_id = pong.descriptor.vnfd[0].id + filename = 'pong_vnfd{}/cp{}'.format(suffix, index) + + cp_group.append(( + member_vnf_index, + desc_id, + filename, + )) + + member_vnf_index += 1 + + vnfd_list = [ping, pong] + nsd_catalog = NetworkService("ping_pong_nsd%s" % (suffix)) + #nsd_catalog = NetworkService("ping_pong_nsd") + nsd_catalog.compose(vnfd_list, cpgroup_list) + + if write_to_file: + ping.write_to_file(out_dir, fmt) + pong.write_to_file(out_dir, fmt) + nsd_catalog.write_to_file(out_dir, fmt) + + return (ping, pong, nsd_catalog) + +def main(argv=sys.argv[1:]): + global outdir, output_format, use_epa + parser = argparse.ArgumentParser() + parser.add_argument('-o', '--outdir', default='.') + parser.add_argument('-f', '--format', default='json') + parser.add_argument('-e', '--epa', action="store_true", default=False) + parser.add_argument('-n', '--pingcount', default=NUM_PING_INSTANCES) + parser.add_argument('--ping-image-md5') + parser.add_argument('--pong-image-md5') + args = parser.parse_args() + outdir = args.outdir + output_format = args.format + use_epa = args.epa + pingcount = args.pingcount + + generate_ping_pong_descriptors(args.format, True, args.outdir, pingcount, + ping_md5sum=args.ping_image_md5, pong_md5sum=args.pong_image_md5) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/foss.txt b/modules/core/mano/foss.txt new file mode 100644 index 0000000..6d1ef78 --- /dev/null +++ b/modules/core/mano/foss.txt @@ -0,0 +1 @@ +RIFT.core, rwmc/mock/node_modules/autobahn, AutobahnJS, MIT, https://github.com/crossbario/autobahn-js diff --git a/modules/core/mano/manifest/LICENSE b/modules/core/mano/manifest/LICENSE new file mode 100644 index 0000000..e69de29 diff --git a/modules/core/mano/models/CMakeLists.txt b/modules/core/mano/models/CMakeLists.txt new file mode 100644 index 0000000..cc9bed6 --- /dev/null +++ b/modules/core/mano/models/CMakeLists.txt @@ -0,0 +1,19 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME models) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + openmano + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/models/openmano/CMakeLists.txt b/modules/core/mano/models/openmano/CMakeLists.txt new file mode 100644 index 0000000..296dc6d --- /dev/null +++ b/modules/core/mano/models/openmano/CMakeLists.txt @@ -0,0 +1,15 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + bin + src + python + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/models/openmano/bin/CMakeLists.txt b/modules/core/mano/models/openmano/bin/CMakeLists.txt new file mode 100644 index 0000000..07472ff --- /dev/null +++ b/modules/core/mano/models/openmano/bin/CMakeLists.txt @@ -0,0 +1,15 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 1/11/2015 +# + +install( + PROGRAMS + openmano + openmano_cleanup.sh + add_corporation.py + DESTINATION usr/bin + COMPONENT ${PKG_LONG_NAME} +) diff --git a/modules/core/mano/models/openmano/bin/add_corporation.py b/modules/core/mano/models/openmano/bin/add_corporation.py new file mode 100755 index 0000000..511369a --- /dev/null +++ b/modules/core/mano/models/openmano/bin/add_corporation.py @@ -0,0 +1,528 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import ipaddress +import itertools +import jujuclient +import logging +import sys +import time +import yaml +import hashlib + + +logging.basicConfig(filename="/tmp/rift_ns_add_corp.log", level=logging.DEBUG) +logger = logging.getLogger() + +ch = logging.StreamHandler() +ch.setLevel(logging.INFO) + +# create formatter and add it to the handlers +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) + + +dry_run = False + +class JujuActionError(Exception): + pass + + +class JujuClient(object): + """Class for executing Juju actions """ + def __init__(self, ip, port, user, passwd): + self._ip = ip + self._port = port + self._user = user + self._passwd = passwd + + endpoint = 'wss://%s:%d' % (ip, port) + logger.debug("Using endpoint=%s", endpoint) + if dry_run: + return + self.env = jujuclient.Environment(endpoint) + self.env.login(passwd, user) + + def get_service(self, name): + return self.env.get_service(name) + + def _get_units(self, name): + """ + Get the units associated with service + """ + units = self.env.status(name)['Services'][name]['Units'] + units = list(units.keys()) + + # convert to a friendly format for juju-python-client + units[:] = [('unit-%s' % u).replace('/', '-') for u in units] + return units + + def exec_action(self, name, action_name, params, block=False): + logger.debug("execute actiion %s using params %s", action_name, params) + if dry_run: + return + + actions = jujuclient.Actions(self.env) + results = actions.enqueue_units(self._get_units(name), + action_name, + params) + if not block: + return results + + if 'error' in results['results'][0].keys(): + raise JujuActionError("Juju action error: %s" % results['results'][0]) + + action = results['results'][0]['action'] + info = actions.info([action]) + i = 0 + logging.debug("Initial action results: %s", results['results'][0]) + while info['results'][0]['status'] not in ['completed', 'failed']: + time.sleep(1) + info = actions.info([action]) + + # break out if the action doesn't complete in 10 secs + i += 1 + if i == 10: + raise JujuActionError("Juju action timed out after 30 seconds") + + if info['results'][0]['status'] != 'completed': + raise JujuActionError("Action %s failure: %s" % (action_name, info['results'][0])) + + return info + + +class CharmAction(object): + def __init__(self, deployed_name, action_name, action_params=None): + self._deployed_name = deployed_name + self._action_name = action_name + self._params = action_params if action_params is not None else [] + + def execute(self, juju_client): + logger.info("Executing charm (%s) action (%s) with params (%s)", + self._deployed_name, self._action_name, self._params) + try: + info = juju_client.exec_action( + name=self._deployed_name, + action_name=self._action_name, + params=self._params, + block=True + ) + + except JujuActionError as e: + logger.error("Juju charm (%s) action (%s) failed: %s", + self._deployed_name, self._action_name, str(e)) + raise + + logger.debug("Juju charm (%s) action (%s) success.", + self._deployed_name, self._action_name) + + +class DeployedProxyCharm(object): + def __init__(self, juju_client, service_name, mgmt_ip=None, charm_name=None): + self._juju_client = juju_client + self.service_name = service_name + self.mgmt_ip = mgmt_ip + self.charm_name = charm_name + + def do_action(self, action_name, action_params={}): + action = CharmAction(self.service_name, action_name, action_params) + action.execute(self._juju_client) + + +class SixWindPEProxyCharm(DeployedProxyCharm): + USER = "root" + PASSWD = "6windos" + + def configure_interface(self, iface_name, ipv4_interface_str=None): + action = "configure-interface" + params = {'iface-name', iface_name} + + if ipv4_interface_str is None: + # Use ipaddress module to validate ipv4 interface string + ip_intf = ipaddress.IPv4Interface(ipv4_interface_str) + params["cidr"] = ip_intf.with_prefixlen + + self.do_action(action, params) + else: + self.do_action(action, params) + + + def add_corporation(self, domain_name, user_iface_name, vlan_id, corp_gw, + corp_net, local_net="10.255.255.0/24", local_net_area="0"): + logger.debug("Add corporation called with params: %s", locals()) + + action = "add-corporation" + params = { + "domain-name": domain_name, + "iface-name": user_iface_name, + "vlan-id": int(vlan_id), + "cidr": corp_net, + "area": corp_gw, + "subnet-cidr":local_net, + "subnet-area":local_net_area, + } + + self.do_action(action, params) + + def connect_domains(self, domain_name, core_iface_name, local_ip, remote_ip, + internal_local_ip, internal_remote_ip, tunnel_name, + tunnel_key, tunnel_type="gre"): + + logger.debug("Connect domains called with params: %s", locals()) + + action = "connect-domains" + params = { + "domain-name": domain_name, + "iface-name": core_iface_name, + "tunnel-name": tunnel_name, + "local-ip": local_ip, + "remote-ip": remote_ip, + "tunnel-key": tunnel_key, + "internal-local-ip": internal_local_ip, + "internal-remote-ip": internal_remote_ip, + "tunnel-type":tunnel_type, + } + + self.do_action(action, params) + + +class PEGroupConfig(object): + def __init__(self, pe_group_cfg): + self._pe_group_cfg = pe_group_cfg + + def _get_param_value(self, param_name): + for param in self._pe_group_cfg["parameter"]: + if param["name"] == param_name: + return param["value"] + + raise ValueError("PE param not found: %s" % param_name) + + @property + def vlan_id(self): + return self._get_param_value("Vlan ID") + + @property + def interface_name(self): + return self._get_param_value("Interface Name") + + @property + def corp_network(self): + return self._get_param_value("Corp. Network") + + @property + def corp_gateway(self): + return self._get_param_value("Corp. Gateway") + + +class AddCorporationRequest(object): + def __init__(self, add_corporation_rpc): + self._add_corporation_rpc = add_corporation_rpc + + @property + def name(self): + return self._add_corporation_rpc["name"] + + @property + def param_groups(self): + return self._add_corporation_rpc["parameter_group"] + + @property + def params(self): + return self._add_corporation_rpc["parameter"] + + @property + def corporation_name(self): + for param in self.params: + if param["name"] == "Corporation Name": + return param["value"] + + raise ValueError("Could not find 'Corporation Name' field") + + @property + def tunnel_key(self): + for param in self.params: + if param["name"] == "Tunnel Key": + return param["value"] + + raise ValueError("Could not find 'Tunnel Key' field") + + def get_pe_parameter_group_map(self): + group_name_map = {} + for group in self.param_groups: + group_name_map[group["name"]] = group + + return group_name_map + + def get_parameter_name_map(self): + name_param_map = {} + for param in self.params: + name_param_map[param["name"]] = param + + return name_param_map + + @classmethod + def from_yaml_cfg(cls, yaml_hdl): + config = yaml.load(yaml_hdl) + return cls( + config["rpc_ip"], + ) + + +class JujuVNFConfig(object): + def __init__(self, vnfr_index_map, vnf_name_map, vnf_init_config_map): + self._vnfr_index_map = vnfr_index_map + self._vnf_name_map = vnf_name_map + self._vnf_init_config_map = vnf_name_map + + def get_service_name(self, vnf_index): + for vnfr_id, index in self._vnfr_index_map.items(): + if index != vnf_index: + continue + + return self._vnf_name_map[vnfr_id] + + raise ValueError("VNF Index not found: %s" % vnf_index) + + def get_vnfr_id(self, vnf_index): + for vnfr_id, index in self._vnfr_index_map.items(): + if index != vnf_index: + continue + + return vnfr_id + + raise ValueError("VNF Index not found: %s" % vnf_index) + + @classmethod + def from_yaml_cfg(cls, yaml_hdl): + config = yaml.load(yaml_hdl) + return cls( + config["vnfr_index_map"], + config["unit_names"], + config["init_config"], + ) + + +class JujuClientConfig(object): + def __init__(self, juju_ctrl_cfg): + self._juju_ctrl_cfg = juju_ctrl_cfg + + @property + def name(self): + return self._juju_ctrl_cfg["name"] + + @property + def host(self): + return self._juju_ctrl_cfg["host"] + + @property + def port(self): + return self._juju_ctrl_cfg["port"] + + @property + def user(self): + return self._juju_ctrl_cfg["user"] + + @property + def secret(self): + return self._juju_ctrl_cfg["secret"] + + @classmethod + def from_yaml_cfg(cls, yaml_hdl): + config = yaml.load(yaml_hdl) + return cls( + config["config_agent"], + ) + + +class OSM_MWC_Demo(object): + VNF_INDEX_NAME_MAP = { + "PE1": 1, + "PE2": 2, + "PE3": 3, + } + + CORE_PE_CONN_MAP = { + "PE1": { + "PE2": { + "ifacename": "eth1", + "ip": "10.10.10.9", + "mask": "30", + "internal_local_ip": "10.255.255.1" + }, + "PE3": { + "ifacename": "eth2", + "ip": "10.10.10.1", + "mask": "30", + "internal_local_ip": "10.255.255.1" + }, + }, + "PE2": { + "PE1": { + "ifacename": "eth1", + "ip": "10.10.10.10", + "mask": "30", + "internal_local_ip": "10.255.255.2" + }, + "PE3": { + "ifacename": "eth2", + "ip": "10.10.10.6", + "mask": "30", + "internal_local_ip": "10.255.255.2" + } + }, + "PE3": { + "PE1": { + "ifacename": "eth1", + "ip": "10.10.10.2", + "mask": "30", + "internal_local_ip": "10.255.255.3" + }, + "PE2": { + "ifacename": "eth2", + "ip": "10.10.10.5", + "mask": "30", + "internal_local_ip": "10.255.255.3" + } + } + } + + @staticmethod + def get_pe_vnf_index(pe_name): + if pe_name not in OSM_MWC_Demo.VNF_INDEX_NAME_MAP: + raise ValueError("Could not find PE name: %s", pe_name) + + return OSM_MWC_Demo.VNF_INDEX_NAME_MAP[pe_name] + + @staticmethod + def get_src_core_iface(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[src_pe_name][dest_pe_name]["ifacename"] + + @staticmethod + def get_local_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[src_pe_name][dest_pe_name]["ip"] + + @staticmethod + def get_remote_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[dest_pe_name][src_pe_name]["ip"] + + @staticmethod + def get_internal_local_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[src_pe_name][dest_pe_name]["internal_local_ip"] + + @staticmethod + def get_internal_remote_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[dest_pe_name][src_pe_name]["internal_local_ip"] + + +def add_pe_corporation(src_pe_name, src_pe_charm, src_pe_group_cfg, corporation_name): + domain_name = corporation_name + vlan_id = src_pe_group_cfg.vlan_id + corp_gw = src_pe_group_cfg.corp_gateway + corp_net = src_pe_group_cfg.corp_network + + user_iface = src_pe_group_cfg.interface_name + + src_pe_charm.add_corporation(domain_name, user_iface, vlan_id, corp_gw, corp_net) + + +def connect_pe_domains(src_pe_name, src_pe_charm, dest_pe_name, corporation_name, tunnel_key): + domain_name = corporation_name + core_iface_name = OSM_MWC_Demo.get_src_core_iface(src_pe_name, dest_pe_name) + local_ip = OSM_MWC_Demo.get_local_ip(src_pe_name, dest_pe_name) + remote_ip = OSM_MWC_Demo.get_remote_ip(src_pe_name, dest_pe_name) + internal_local_ip = OSM_MWC_Demo.get_internal_local_ip(src_pe_name, dest_pe_name) + internal_remote_ip = OSM_MWC_Demo.get_internal_remote_ip(src_pe_name, dest_pe_name) + + + src_pe_idx = OSM_MWC_Demo.get_pe_vnf_index(src_pe_name) + dest_pe_idx = OSM_MWC_Demo.get_pe_vnf_index(dest_pe_name) + + # Create a 4 digit hash of the corporation name + hash_object = hashlib.md5(corporation_name.encode()) + corp_hash = hash_object.hexdigest()[-4:] + + # Tunnel name is the 4 digit corporation name hash followed by + # src index and dest index. When there are less than 10 PE's + # this creates a 8 character tunnel name which is the limit. + tunnel_name = "".join([corp_hash, "_", str(src_pe_idx), str(dest_pe_idx)]) + + src_pe_charm.connect_domains(domain_name, core_iface_name, local_ip, remote_ip, + internal_local_ip, internal_remote_ip, tunnel_name, + tunnel_key) + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument("yaml_cfg_file", type=argparse.FileType('r')) + parser.add_argument("--dry-run", action="store_true") + parser.add_argument("--quiet", "-q", dest="verbose", action="store_false") + args = parser.parse_args() + if args.verbose: + ch.setLevel(logging.DEBUG) + + global dry_run + dry_run = args.dry_run + + yaml_str = args.yaml_cfg_file.read() + + juju_cfg = JujuClientConfig.from_yaml_cfg(yaml_str) + juju_client = JujuClient(juju_cfg.host, juju_cfg.port, juju_cfg.user, juju_cfg.secret) + + juju_vnf_config = JujuVNFConfig.from_yaml_cfg(yaml_str) + + rpc_request = AddCorporationRequest.from_yaml_cfg(yaml_str) + pe_param_group_map = rpc_request.get_pe_parameter_group_map() + + pe_name_charm_map = {} + for pe_name, pe_group_cfg in pe_param_group_map.items(): + # The PE name (i.e. PE1) must be in the parameter group name so we can correlate + # to an actual VNF in the descriptor. + pe_vnf_index = OSM_MWC_Demo.get_pe_vnf_index(pe_name) + + # Get the deployed VNFR charm service name + pe_charm_service_name = juju_vnf_config.get_service_name(pe_vnf_index) + + pe_name_charm_map[pe_name] = SixWindPEProxyCharm(juju_client, pe_charm_service_name) + + # At this point we have SixWindPEProxyCharm() instances for each PE and each + # PE param group configuration. + for src_pe_name in pe_param_group_map: + add_pe_corporation( + src_pe_name=src_pe_name, + src_pe_charm=pe_name_charm_map[src_pe_name], + src_pe_group_cfg=PEGroupConfig(pe_param_group_map[src_pe_name]), + corporation_name=rpc_request.corporation_name + ) + + # Create a permutation of all PE's involved in this topology and connect + # them together by creating tunnels with matching keys + for src_pe_name, dest_pe_name in itertools.permutations(pe_name_charm_map, 2): + connect_pe_domains( + src_pe_name=src_pe_name, + src_pe_charm=pe_name_charm_map[src_pe_name], + dest_pe_name=dest_pe_name, + corporation_name=rpc_request.corporation_name, + tunnel_key=rpc_request.tunnel_key, + ) + +if __name__ == "__main__": + try: + main() + except Exception as e: + logger.exception("Caught exception when executing add_corporation ns") + raise \ No newline at end of file diff --git a/modules/core/mano/models/openmano/bin/openmano b/modules/core/mano/models/openmano/bin/openmano new file mode 100755 index 0000000..a453b10 --- /dev/null +++ b/modules/core/mano/models/openmano/bin/openmano @@ -0,0 +1,1069 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# PYTHON_ARGCOMPLETE_OK + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +openmano client used to interact with openmano-server (openmanod) +''' +__author__="Alfonso Tierno, Gerardo Garcia" +__date__ ="$09-oct-2014 09:09:48$" +__version__="0.4.1-r449" +version_date="Dec 2015" + +import os +import argparse +import requests +import json +import yaml +import logging + +class ArgumentParserError(Exception): pass + +class ThrowingArgumentParser(argparse.ArgumentParser): + def error(self, message): + print "Error: %s" %message + print + self.print_usage() + #self.print_help() + print + print "Type 'openmano -h' for help" + raise ArgumentParserError + + +def config(args): + print "OPENMANO_HOST: %s" %mano_host + print "OPENMANO_PORT: %s" %mano_port + print "OPENMANO_TENANT: %s" %mano_tenant + print "OPENMANO_DATACENTER: %s" %str (mano_datacenter) + + +def _print_verbose(mano_response, verbose_level=0): + content = mano_response.json() + result = 0 if mano_response.status_code==200 else mano_response.status_code + if type(content)!=dict or len(content)!=1: + #print "Non expected format output" + print str(content) + return result + + val=content.values()[0] + if type(val)==str: + print val + return result + elif type(val) == list: + content_list = val + elif type(val)==dict: + content_list = [val] + else: + #print "Non expected dict/list format output" + print str(content) + return result + + #print content_list + if verbose_level==None: + verbose_level=0 + if verbose_level >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + + if mano_response.status_code == 200: + for content in content_list: + myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20)) + if verbose_level >=1: + myoutput += " " + content['created_at'].ljust(20) + if verbose_level >=2: + new_line='\n' + if 'type' in content and content['type']!=None: + myoutput += new_line + " Type: " + content['type'].ljust(29) + new_line='' + if 'description' in content and content['description']!=None: + myoutput += new_line + " Description: " + content['description'].ljust(20) + print myoutput + else: + print content['error']['description'] + return result + +def parser_json_yaml(file_name): + try: + f = file(file_name, "r") + text = f.read() + f.close() + except Exception as e: + return (False, str(e)) + + #Read and parse file + if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml' or (file_name[-5:]!='.json' and '\t' not in text): + try: + config = yaml.load(text) + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at line:%s column:%s" % (mark.line+1, mark.column+1) + return (False, "Error loading file '"+file_name+"' yaml format error" + error_pos) + else: #json + try: + config = json.loads(text) + except Exception as e: + return (False, "Error loading file '"+file_name+"' json format error " + str(e) ) + + return True, config + +def _load_file_or_yaml(content): + ''' + 'content' can be or a yaml/json file or a text containing a yaml/json text format + This function autodetect, trying to load and parse the file, + if fails trying to parse the 'content' text + Returns the dictionary once parsed, or print an error and finish the program + ''' + #Check config file exists + if os.path.isfile(content): + r,payload = parser_json_yaml(content) + if not r: + print payload + exit(-1) + elif "{" in content or ":" in content: + try: + payload = yaml.load(content) + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print "Error loading yaml/json text"+error_pos + exit (-1) + else: + print "'%s' is neither a valid file nor a yaml/json content" % content + exit(-1) + return payload + +def vnf_create(args): + #print "vnf-create",args + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + myvnf = _load_file_or_yaml(args.file) + + if args.name or args.description or args.image_path: + #print args.name + try: + if args.name: + myvnf['vnf']['name'] = args.name + if args.description: + myvnf['vnf']['description'] = args.description + if args.image_path: + index=0 + for image_path_ in args.image_path.split(","): + #print "image-path", image_path_ + myvnf['vnf']['VNFC'][index]['VNFC image']=image_path_ + index=index+1 + except (KeyError, TypeError), e: + if str(e)=='vnf': error_pos= "missing field 'vnf'" + elif str(e)=='name': error_pos= "missing field 'vnf':'name'" + elif str(e)=='description': error_pos= "missing field 'vnf':'description'" + elif str(e)=='VNFC': error_pos= "missing field 'vnf':'VNFC'" + elif str(e)==str(index): error_pos= "field 'vnf':'VNFC' must be an array" + elif str(e)=='VNFC image': error_pos= "missing field 'vnf':'VNFC'['VNFC image']" + else: error_pos="wrong format" + print "Wrong VNF descriptor: " + error_pos + return -1 + payload_req = json.dumps(myvnf) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/vnfs" %(mano_host, mano_port, mano_tenant) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + + return _print_verbose(mano_response, args.verbose) + +def vnf_list(args): + #print "vnf-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, mano_tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/vnfs" %(mano_host, mano_port, mano_tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose==None: + args.verbose=0 + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + if not args.name: + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + if len(content['vnfs']) == 0: + print "No VNFs were found." + return 404 #HTTP_Not_Found + for vnf in content['vnfs']: + myoutput = "%s %s" %(vnf['uuid'].ljust(38),vnf['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, vnf['created_at'].ljust(20)) + print myoutput + if args.verbose >=2: + print " Description: %s" %vnf['description'] + print " VNF descriptor file: %s" %vnf['path'] + else: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + vnf = content['vnf'] + print "%s %s %s" %(vnf['uuid'].ljust(38),vnf['name'].ljust(20), vnf['created_at'].ljust(20)) + print " Description: %s" %vnf['description'] + print " VNF descriptor file: %s" %vnf['path'] + print " VMs:" + for vm in vnf['VNFC']: + #print " %s %s %s" %(vm['name'].ljust(20), vm['uuid'].ljust(38), vm['description'].ljust(30)) + print " %s %s" %(vm['name'].ljust(20), vm['description']) + if len(vnf['nets'])>0: + print " Internal nets:" + for net in vnf['nets']: + print " %s %s" %(net['name'].ljust(20), net['description']) + if len(vnf['external-connections'])>0: + print " External interfaces:" + for interface in vnf['external-connections']: + print " %s %s %s %s" %(interface['external_name'].ljust(20), interface['vm_name'].ljust(20), interface['internal_name'].ljust(20), \ + interface['vpci'].ljust(14)) + else: + print content['error']['description'] + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + +def vnf_delete(args): + #print "vnf-delete",args + if not args.force: + r = raw_input("Delete VNF %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def scenario_create(args): + #print "scenario-create",args + headers_req = {'content-type': 'application/yaml'} + myscenario = _load_file_or_yaml(args.file) + + if args.name: + myscenario['name'] = args.name + if args.description: + myscenario['description'] = args.description + payload_req = yaml.safe_dump(myscenario, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/scenarios" %(mano_host, mano_port, mano_tenant) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def scenario_list(args): + #print "scenario-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, mano_tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/scenarios" %(mano_host, mano_port, mano_tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + if not args.name: + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + if len(content['scenarios']) == 0: + print "No scenarios were found." + return 404 #HTTP_Not_Found + for scenario in content['scenarios']: + myoutput = "%s %s" %(scenario['uuid'].ljust(38),scenario['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, scenario['created_at'].ljust(20)) + print myoutput + if args.verbose >=2: + print " Description: %s" %scenario['description'] + else: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + scenario = content['scenario'] + myoutput = "%s %s %s" %(scenario['uuid'].ljust(38),scenario['name'].ljust(20), scenario['created_at'].ljust(20)) + print myoutput + print " Description: %s" %scenario['description'] + print " VNFs:" + for vnf in scenario['vnfs']: + print " %s %s %s" %(vnf['name'].ljust(20), vnf['vnf_id'].ljust(38), vnf['description']) + if len(scenario['nets'])>0: + print " Internal nets:" + for net in scenario['nets']: + if net['description'] is None: #if description does not exist, description is "-". Valid for external and internal nets. + net['description'] = '-' + if not net['external']: + print " %s %s %s" %(net['name'].ljust(20), net['uuid'].ljust(38), net['description'].ljust(30)) + print " External nets:" + for net in scenario['nets']: + if net['external']: + print " %s %s %s vim-id:%s" %(net['name'].ljust(20), net['uuid'].ljust(38), net['description'].ljust(30), net['vim_id']) + else: + print content['error']['description'] + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + +def scenario_delete(args): + #print "scenario-delete",args + if not args.force: + r = raw_input("Delete scenario %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def scenario_deploy(args): + #print "scenario-deploy",args + headers_req = {'content-type': 'application/json'} + action = {} + actionCmd="start" + if args.nostart: + actionCmd="reserve" + action[actionCmd] = {} + action[actionCmd]["instance_name"] = args.name + if args.datacenter != None: + action[actionCmd]["datacenter"] = args.datacenter + elif mano_datacenter != None: + action[actionCmd]["datacenter"] = mano_datacenter + + if args.description: + action[actionCmd]["description"] = args.description + payload_req = json.dumps(action, indent=4) + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + + if mano_response.status_code == 200: + myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, content['created_at'].ljust(20)) + if args.verbose >=2: + myoutput = "%s %s %s" %(myoutput, content['description'].ljust(30)) + print myoutput + print "" + print "To check the status, run the following command:" + print "openmano instance-scenario-list " + else: + print content['error']['description'] + return result + +def scenario_verify(args): + #print "scenario-verify",args + headers_req = {'content-type': 'application/json'} + action = {} + action["verify"] = {} + action["verify"]["instance_name"] = "scen-verify-return5" + payload_req = json.dumps(action, indent=4) + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def instance_create(args): + headers_req = {'content-type': 'application/yaml'} + myInstance={"instance": {}, "schema_version": "0.1"} + if args.file: + instance_dict = _load_file_or_yaml(args.file) + if "instance" not in instance_dict: + myInstance = {"instance": instance_dict, "schema_version": "0.1"} + else: + myInstance = instance_dict + if args.name: + myInstance["instance"]['name'] = args.name + if args.description: + myInstance["instance"]['description'] = args.description + if args.nostart: + myInstance["instance"]['action'] = "reserve" + if args.datacenter != None: + myInstance["instance"]["datacenter"] = args.datacenter + elif "datacenter" not in myInstance and mano_datacenter != None: + myInstance["instance"]["datacenter"] = mano_datacenter + if args.scenario != None: + myInstance["instance"]["scenario"] = args.scenario + elif "scenario" not in myInstance["instance"]: + print "you must provide an scenario in the file descriptor or with --scenario" + return -1 + if "name" not in myInstance["instance"]: + print "you must provide a name in the file descriptor or with --name" + return 1 + if args.net_map: + if "networks" not in myInstance["instance"]: + myInstance["instance"]["networks"] = {} + for net in args.net_map: + net_comma_list = net.split(",") + for net_comma in net_comma_list: + net_tuple = net_comma.split("=") + if len(net_tuple) != 2: + print "error at net-map. Expected net-scenario=net-datacenter. (%s)?" % net_comma + return + net_scenario = net_comma[0].strip() + net_datacenter = net_comma[1].strip() + if net_scenario not in myInstance["instance"]["networks"]: + myInstance["instance"]["networks"][net_scenario] = {} + myInstance["instance"]["networks"][net_scenario]["source"] = net_datacenter + + payload_req = yaml.safe_dump(myInstance, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) + logger.debug("openmano request: %s", payload_req) + URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, mano_tenant) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + + if mano_response.status_code == 200: + myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, content['created_at'].ljust(20)) + if args.verbose >=2: + myoutput = "%s %s %s" %(myoutput, content['description'].ljust(30)) + print myoutput + else: + print content['error']['description'] + return result + + +def instance_scenario_list(args): + #print "instance-scenario-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, mano_tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, mano_tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + if not args.name: + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + if len(content['instances']) == 0: + print "No scenario instances were found." + return result + for instance in content['instances']: + myoutput = "%s %s" %(instance['uuid'].ljust(38),instance['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, instance['created_at'].ljust(20)) + print myoutput + if args.verbose >=2: + print "Description: %s" %instance['description'] + else: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + instance = content + print "%s %s %s" %(instance['uuid'].ljust(38),instance['name'].ljust(20),instance['created_at'].ljust(20)) + print "Description: %s" %instance['description'] + print "Template scenario id: %s" %instance['scenario_id'] + print "Template scenario name: %s" %instance['scenario_name'] + print "---------------------------------------" + print "VNF instances: %d" %len(instance['vnfs']) + for vnf in instance['vnfs']: + #print " %s %s Template vnf name: %s Template vnf id: %s" %(vnf['uuid'].ljust(38), vnf['name'].ljust(20), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38)) + print " %s %s Template vnf id: %s" %(vnf['uuid'].ljust(38), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38)) + if len(instance['nets'])>0: + print "---------------------------------------" + print "Internal nets:" + for net in instance['nets']: + if not net['external']: + print " %s %s VIM ID: %s" %(net['uuid'].ljust(38), net['status'].ljust(12), net['vim_net_id']) + print "---------------------------------------" + print "External nets:" + for net in instance['nets']: + if net['external']: + print " %s %s VIM ID: %s" %(net['uuid'].ljust(38), net['status'].ljust(12), net['vim_net_id']) + print "---------------------------------------" + print "VM instances:" + for vnf in instance['vnfs']: + for vm in vnf['vms']: + print " %s %s %s %s VIM ID: %s" %(vm['uuid'].ljust(38), vnf['vnf_name'].ljust(20), vm['name'].ljust(20), vm['status'].ljust(12), vm['vim_vm_id']) + else: + print content['error']['description'] + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + +def instance_scenario_status(args): + print "instance-scenario-status" + return 0 + +def instance_scenario_delete(args): + #print "instance-scenario-delete",args + if not args.force: + r = raw_input("Delete scenario instance %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return + URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def instance_scenario_action(args): + #print "instance-scenario-action", args + action={} + action[ args.action ] = args.param + if args.vnf: + action["vnfs"] = args.vnf + if args.vm: + action["vms"] = args.vm + + headers_req = {'content-type': 'application/json'} + payload_req = json.dumps(action, indent=4) + URLrequest = "http://%s:%s/openmano/%s/instances/%s/action" %(mano_host, mano_port, mano_tenant, args.name) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + for uuid,c in content.iteritems(): + print "%s %s %s" %(uuid.ljust(38), c['name'].ljust(20),c['description'].ljust(20)) + else: + print content['error']['description'] + return result + + +def instance_vnf_list(args): + print "instance-vnf-list" + return 0 + +def instance_vnf_status(args): + print "instance-vnf-status" + return 0 + +def tenant_create(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + tenant_dict={"name": args.name} + if args.description!=None: + tenant_dict["description"] = args.description + payload_req = json.dumps( {"tenant": tenant_dict }) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def tenant_list(args): + #print "tenant-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, args.name) + else: + URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + if args.name!=None: + args.verbose += 1 + return _print_verbose(mano_response, args.verbose) + +def tenant_delete(args): + #print "tenant-delete",args + if not args.force: + r = raw_input("Delete tenant %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def datacenter_attach(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + + datacenter_dict={} + if args.vim_tenant_id != None: + datacenter_dict['vim_tenant'] = args.vim_tenant_id + if args.vim_tenant_name != None: + datacenter_dict['vim_tenant_name'] = args.vim_tenant_name + if args.user != None: + datacenter_dict['vim_username'] = args.user + if args.password != None: + datacenter_dict['vim_password'] = args.password + payload_req = json.dumps( {"datacenter": datacenter_dict }) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, mano_tenant, args.name) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + result = _print_verbose(mano_response, args.verbose) + #provide addional information if error + if mano_response.status_code != 200: + content = mano_response.json() + if "already in use for 'name'" in content['error']['description'] and \ + "to database vim_tenants table" in content['error']['description']: + print "Try to specify a different name with --vim-tenant-name" + return result + +def datacenter_detach(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest, headers=headers_req) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def datacenter_create(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + datacenter_dict={"name": args.name, "vim_url": args.url} + if args.description!=None: + datacenter_dict["description"] = args.description + if args.type!=None: + datacenter_dict["type"] = args.type + if args.url!=None: + datacenter_dict["vim_url_admin"] = args.url_admin + if args.config!=None: + datacenter_dict["config"] = _load_file_or_yaml(args.config) + payload_req = json.dumps( {"datacenter": datacenter_dict }) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/datacenters" %(mano_host, mano_port) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def datacenter_delete(args): + #print "datacenter-delete",args + if not args.force: + r = raw_input("Delete datacenter %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/datacenters/%s" %(mano_host, mano_port, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def datacenter_list(args): + #print "datacenter-list",args + tenant='any' if args.all else mano_tenant + if args.name: + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/datacenters" %(mano_host, mano_port, tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + if args.name!=None: + args.verbose += 1 + return _print_verbose(mano_response, args.verbose) + + +def datacenter_net_action(args): + #print "datacenter-net-action",args + if args.verbose==None: + args.verbose=0 + if args.action=="net-list": + URLrequest = "http://%s:%s/openmano/datacenters/%s/networks" %(mano_host, mano_port, args.datacenter) + mano_response = requests.get(URLrequest) + if args.datacenter!=None: + args.verbose += 1 + else: + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/action" %(mano_host, mano_port, mano_tenant, args.datacenter) + if not args.force: + if args.action=="net-update": + r = raw_input(" Edit datacenter " + args.datacenter + " (y/N)? ") + elif args.action=="net-delete": + r = raw_input(" Delete datacenter " + args.datacenter + " net " + args.net +" (y/N)? ") + else: + r = raw_input(" Edit datacenter " + args.datacenter + " net " + args.net +" (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + return 0 + if args.action=="net-update": + payload={args.action : None} + else: + payload = {args.action: {'net': args.net} } + if args.action=="net-edit": + payload[args.action].update(_load_file_or_yaml(args.file)) + + payload_req = json.dumps(payload) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def element_edit(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + URLrequest = "http://%s:%s/openmano/%s/%s" %(mano_host, mano_port, args.element, args.name) + payload=_load_file_or_yaml(args.file) + if args.element[:-1] not in payload: + payload = {args.element[:-1]: payload } + payload_req = json.dumps(payload) + + #print payload_req + if not args.force or (args.name==None and args.filer==None): + r = raw_input(" Edit " + args.element[:-1] + " " + args.name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + return 0 + logger.debug("openmano request: %s", payload_req) + mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + if args.name!=None: + args.verbose += 1 + return _print_verbose(mano_response, args.verbose) + + +global mano_host +global mano_port +global mano_tenant + +if __name__=="__main__": + + mano_tenant = os.getenv('OPENMANO_TENANT',"bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb") + mano_host = os.getenv('OPENMANO_HOST',"localhost") + mano_port = os.getenv('OPENMANO_PORT',"9090") + mano_datacenter = os.getenv('OPENMANO_DATACENTER',None) + + main_parser = ThrowingArgumentParser(description='User program to interact with OPENMANO-SERVER (openmanod)') + main_parser.add_argument('--version', action='version', version='%(prog)s ' + __version__ ) + + subparsers = main_parser.add_subparsers(help='commands') + + config_parser = subparsers.add_parser('config', help="prints configuration values") + config_parser.set_defaults(func=config) + + vnf_create_parser = subparsers.add_parser('vnf-create', help="adds a vnf into the catalogue") + vnf_create_parser.add_argument("file", action="store", help="location of the JSON file describing the VNF") + vnf_create_parser.add_argument("--name", action="store", help="name of the VNF (if it exists in the VNF descriptor, it is overwritten)") + vnf_create_parser.add_argument("--description", action="store", help="description of the VNF (if it exists in the VNF descriptor, it is overwritten)") + vnf_create_parser.add_argument("--image-path", action="store", help="change image path locations (overwritten)") + vnf_create_parser.add_argument('--verbose', '-v', action='count') + vnf_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + vnf_create_parser.set_defaults(func=vnf_create) + + vnf_list_parser = subparsers.add_parser('vnf-list', help="lists information about a vnf") + vnf_list_parser.add_argument("name", nargs='?', help="name of the VNF") + vnf_list_parser.add_argument('--verbose', '-v', action='count') + vnf_list_parser.add_argument('--details', action="store_true", help="prints details of the VNF (internal structure)") + #vnf_list_parser.add_argument('--descriptor', help="prints the VNF descriptor", action="store_true") + vnf_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + vnf_list_parser.set_defaults(func=vnf_list) + + vnf_delete_parser = subparsers.add_parser('vnf-delete', help="deletes a vnf from the catalogue") + vnf_delete_parser.add_argument("name", action="store", help="name or uuid of the VNF to be deleted") + vnf_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + vnf_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + vnf_delete_parser.set_defaults(func=vnf_delete) + + scenario_create_parser = subparsers.add_parser('scenario-create', help="adds a scenario into the OPENMANO DB") + scenario_create_parser.add_argument("file", action="store", help="location of the YAML file describing the scenario") + scenario_create_parser.add_argument("--name", action="store", help="name of the scenario (if it exists in the YAML scenario, it is overwritten)") + scenario_create_parser.add_argument("--description", action="store", help="description of the scenario (if it exists in the YAML scenario, it is overwritten)") + scenario_create_parser.add_argument('--verbose', '-v', action='count') + scenario_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_create_parser.set_defaults(func=scenario_create) + + scenario_list_parser = subparsers.add_parser('scenario-list', help="lists information about a scenario") + scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario") + scenario_list_parser.add_argument('--verbose', '-v', action='count') + scenario_list_parser.add_argument('--details', action="store_true", help="prints details of the scenario (internal structure)") + #scenario_list_parser.add_argument('--descriptor', help="prints the scenario descriptor", action="store_true") + scenario_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_list_parser.set_defaults(func=scenario_list) + + scenario_delete_parser = subparsers.add_parser('scenario-delete', help="deletes a scenario from the OPENMANO DB") + scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario to be deleted") + scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + scenario_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_delete_parser.set_defaults(func=scenario_delete) + + scenario_deploy_parser = subparsers.add_parser('scenario-deploy', help="deploys a scenario") + scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be deployed") + scenario_deploy_parser.add_argument("name", action="store", help="name of the instance") + scenario_deploy_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources") + scenario_deploy_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available") + scenario_deploy_parser.add_argument("--description", action="store", help="description of the instance") + scenario_deploy_parser.add_argument('--verbose', '-v', action='count') + scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_deploy_parser.set_defaults(func=scenario_deploy) + + scenario_deploy_parser = subparsers.add_parser('scenario-verify', help="verifies if a scenario can be deployed (deploys it and deletes it)") + scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be verified") + scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_deploy_parser.set_defaults(func=scenario_verify) + + instance_scenario_create_parser = subparsers.add_parser('instance-scenario-create', help="deploys a scenario") + instance_scenario_create_parser.add_argument("file", nargs='?', help="descriptor of the instance. Must be a file or yaml/json text") + instance_scenario_create_parser.add_argument("--scenario", action="store", help="name or uuid of the scenario to be deployed") + instance_scenario_create_parser.add_argument("--name", action="store", help="name of the instance") + instance_scenario_create_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources") + instance_scenario_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available") + instance_scenario_create_parser.add_argument("--net-map", action="append", type=str, dest="net_map", help="indicates maps between 'scenario-network=datacenter-network'. Can be used several times") + instance_scenario_create_parser.add_argument("--description", action="store", help="description of the instance") + instance_scenario_create_parser.add_argument('--verbose', '-v', action='count') + instance_scenario_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_create_parser.set_defaults(func=instance_create) + + instance_scenario_list_parser = subparsers.add_parser('instance-scenario-list', help="lists information about a scenario instance") + instance_scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario instance") + instance_scenario_list_parser.add_argument('--verbose', '-v', action='count') + instance_scenario_list_parser.add_argument('--details', action="store_true", help="prints details of the scenario instance (internal structure)") + instance_scenario_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_list_parser.set_defaults(func=instance_scenario_list) + + instance_scenario_delete_parser = subparsers.add_parser('instance-scenario-delete', help="deletes a scenario instance (and deletes all VM and net instances in VIM)") + instance_scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario instance to be deleted") + instance_scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + instance_scenario_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_delete_parser.set_defaults(func=instance_scenario_delete) + + instance_scenario_action_parser = subparsers.add_parser('instance-scenario-action', help="invoke an action over part or the whole scenario instance") + instance_scenario_action_parser.add_argument("name", action="store", help="name or uuid of the scenario instance") + instance_scenario_action_parser.add_argument("action", action="store", type=str, \ + choices=["start","pause","resume","shutoff","shutdown","forceOff","rebuild","reboot", "console"],\ + help="action to send") + instance_scenario_action_parser.add_argument("param", nargs='?', help="addional param of the action. e.g. console type (novnc, ...), reboot type (TODO)") + instance_scenario_action_parser.add_argument("--vnf", action="append", help="VNF to act on (can use several entries)") + instance_scenario_action_parser.add_argument("--vm", action="append", help="VM to act on (can use several entries)") + instance_scenario_action_parser.add_argument('--verbose', '-v', action='count') + instance_scenario_action_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_action_parser.set_defaults(func=instance_scenario_action) + + #instance_scenario_status_parser = subparsers.add_parser('instance-scenario-status', help="show the status of a scenario instance") + #instance_scenario_status_parser.add_argument("name", action="store", help="name or uuid of the scenario instance") + #instance_scenario_status_parser.set_defaults(func=instance_scenario_status) + + tenant_create_parser = subparsers.add_parser('tenant-create', help="creates a new tenant") + tenant_create_parser.add_argument("name", action="store", help="name for the tenant") + tenant_create_parser.add_argument("--description", action="store", help="description of the tenant") + tenant_create_parser.add_argument('--verbose', '-v', action='count') + tenant_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + tenant_create_parser.set_defaults(func=tenant_create) + + tenant_delete_parser = subparsers.add_parser('tenant-delete', help="deletes a tenant from the catalogue") + tenant_delete_parser.add_argument("name", action="store", help="name or uuid of the tenant to be deleted") + tenant_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + tenant_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + tenant_delete_parser.set_defaults(func=tenant_delete) + + tenant_list_parser = subparsers.add_parser('tenant-list', help="lists information about a tenant") + tenant_list_parser.add_argument("name", nargs='?', help="name or uuid of the tenant") + tenant_list_parser.add_argument('--verbose', '-v', action='count') + tenant_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + tenant_list_parser.set_defaults(func=tenant_list) + + item_list=('tenant','datacenter') #put tenant before so that help appear in order + for item in item_list: + element_edit_parser = subparsers.add_parser(item+'-edit', help="edits one "+item) + element_edit_parser.add_argument("name", help="name or uuid of the "+item) + element_edit_parser.add_argument("file", help="json/yaml text or file with the changes") + element_edit_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + element_edit_parser.add_argument('--verbose', '-v', action='count') + element_edit_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + element_edit_parser.set_defaults(func=element_edit, element=item + 's') + + datacenter_create_parser = subparsers.add_parser('datacenter-create', help="creates a new datacenter") + datacenter_create_parser.add_argument("name", action="store", help="name for the datacenter") + datacenter_create_parser.add_argument("url", action="store", help="url for the datacenter") + datacenter_create_parser.add_argument("--url_admin", action="store", help="url for administration for the datacenter") + datacenter_create_parser.add_argument("--type", action="store", help="datacenter type: openstack or openvim (default)") + datacenter_create_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format") + datacenter_create_parser.add_argument("--description", action="store", help="description of the datacenter") + datacenter_create_parser.add_argument('--verbose', '-v', action='count') + datacenter_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_create_parser.set_defaults(func=datacenter_create) + + datacenter_delete_parser = subparsers.add_parser('datacenter-delete', help="deletes a datacenter from the catalogue") + datacenter_delete_parser.add_argument("name", action="store", help="name or uuid of the datacenter to be deleted") + datacenter_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + datacenter_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_delete_parser.set_defaults(func=datacenter_delete) + + datacenter_list_parser = subparsers.add_parser('datacenter-list', help="lists information about a datacenter") + datacenter_list_parser.add_argument("name", nargs='?', help="name or uuid of the datacenter") + datacenter_list_parser.add_argument('--verbose', '-v', action='count') + datacenter_list_parser.add_argument("-a", "--all", action="store_true", help="shows all datacenters, not only datacenters attached to tenant") + datacenter_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_list_parser.set_defaults(func=datacenter_list) + + datacenter_attach_parser = subparsers.add_parser('datacenter-attach', help="associates a datacenter to the operating tenant") + datacenter_attach_parser.add_argument("name", help="name or uuid of the datacenter") + datacenter_attach_parser.add_argument('--vim-tenant-id', action='store', help="specify a datacenter tenant to use. A new one is created by default") + datacenter_attach_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.") + datacenter_attach_parser.add_argument("--user", action="store", help="user credentials for the datacenter") + datacenter_attach_parser.add_argument("--password", action="store", help="password credentials for the datacenter") + datacenter_attach_parser.add_argument('--verbose', '-v', action='count') + datacenter_attach_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_attach_parser.set_defaults(func=datacenter_attach) + + datacenter_detach_parser = subparsers.add_parser('datacenter-detach', help="removes the association between a datacenter and the operating tenant") + datacenter_detach_parser.add_argument("name", help="name or uuid of the datacenter") + datacenter_detach_parser.add_argument('--verbose', '-v', action='count') + datacenter_detach_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_detach_parser.set_defaults(func=datacenter_detach) + + action_dict={'net-update': 'retrieves external networks from datacenter', + 'net-edit': 'edits an external network', + 'net-delete': 'deletes an external network', + 'net-list': 'lists external networks from a datacenter' + } + for item in action_dict: + datacenter_action_parser = subparsers.add_parser('datacenter-'+item, help=action_dict[item]) + datacenter_action_parser.add_argument("datacenter", help="name or uuid of the datacenter") + if item=='net-edit' or item=='net-delete': + datacenter_action_parser.add_argument("net", help="name or uuid of the datacenter net") + if item=='net-edit': + datacenter_action_parser.add_argument("file", help="json/yaml text or file with the changes") + if item!='net-list': + datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + datacenter_action_parser.add_argument('--verbose', '-v', action='count') + datacenter_action_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_action_parser.set_defaults(func=datacenter_net_action, action=item) + + try: + args = main_parser.parse_args() + #logging info + level = logging.CRITICAL + streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s" + if "debug" in args and args.debug: + level = logging.DEBUG + logging.basicConfig(format=streamformat, level= level) + logger = logging.getLogger('mano') + logger.setLevel(level) + result = args.func(args) + if result == None: + result = 0 + #for some reason it fails if call exit inside try instance. Need to call exit at the end !? + except (requests.exceptions.ConnectionError): + print "Connection error: not possible to contact OPENMANO-SERVER (openmanod)" + result = -2 + except (KeyboardInterrupt): + print 'Exiting openmano' + result = -3 + except (SystemExit, ArgumentParserError): + result = -4 + + #print result + exit(result) + diff --git a/modules/core/mano/models/openmano/bin/openmano_cleanup.sh b/modules/core/mano/models/openmano/bin/openmano_cleanup.sh new file mode 100755 index 0000000..c306e68 --- /dev/null +++ b/modules/core/mano/models/openmano/bin/openmano_cleanup.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Run this on openmano VM to clean up all instances, scenarios and vnfs. + +./openmano instance-scenario-list | cut -d " " -f1 | while read line; do +./openmano instance-scenario-delete $line -f +done + +./openmano scenario-list | cut -d " " -f1 | while read line; do +./openmano scenario-delete $line -f +done + +./openmano vnf-list | cut -d " " -f1 | while read line; do +./openmano vnf-delete $line -f +done \ No newline at end of file diff --git a/modules/core/mano/models/openmano/python/CMakeLists.txt b/modules/core/mano/models/openmano/python/CMakeLists.txt new file mode 100644 index 0000000..abbf139 --- /dev/null +++ b/modules/core/mano/models/openmano/python/CMakeLists.txt @@ -0,0 +1,13 @@ +# Creation Date: 2016/1/12 +# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END) + +cmake_minimum_required(VERSION 2.8) + + +rift_python_install_tree( + FILES + rift/openmano/__init__.py + rift/openmano/rift2openmano.py + rift/openmano/openmano_client.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/models/openmano/python/rift/openmano/__init__.py b/modules/core/mano/models/openmano/python/rift/openmano/__init__.py new file mode 100644 index 0000000..00f74ea --- /dev/null +++ b/modules/core/mano/models/openmano/python/rift/openmano/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/modules/core/mano/models/openmano/python/rift/openmano/openmano_client.py b/modules/core/mano/models/openmano/python/rift/openmano/openmano_client.py new file mode 100755 index 0000000..a5ddb37 --- /dev/null +++ b/modules/core/mano/models/openmano/python/rift/openmano/openmano_client.py @@ -0,0 +1,479 @@ +#!/usr/bin/python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import logging +import os +import re +import subprocess +import sys +import tempfile +import requests + + +class OpenmanoCommandFailed(Exception): + pass + + +class OpenmanoUnexpectedOutput(Exception): + pass + + +class VNFExistsError(Exception): + pass + + +class InstanceStatusError(Exception): + pass + + +class OpenmanoHttpAPI(object): + def __init__(self, log, host, port, tenant): + self._log = log + self._host = host + self._port = port + self._tenant = tenant + + self._session = requests.Session() + + def get_instance(self, instance_uuid): + url = "http://{host}:{port}/openmano/{tenant}/instances/{instance}".format( + host=self._host, + port=self._port, + tenant=self._tenant, + instance=instance_uuid, + ) + + resp = self._session.get(url) + try: + resp.raise_for_status() + except requests.exceptions.HTTPError as e: + raise InstanceStatusError(e) + + return resp.json() + + +class OpenmanoCliAPI(object): + """ This class implements the necessary funtionality to interact with """ + + CMD_TIMEOUT = 15 + + def __init__(self, log, host, port, tenant): + self._log = log + self._host = host + self._port = port + self._tenant = tenant + + @staticmethod + def openmano_cmd_path(): + return os.path.join( + os.environ["RIFT_INSTALL"], + "usr/bin/openmano" + ) + + def _openmano_cmd(self, arg_list, expected_lines=None): + cmd_args = list(arg_list) + cmd_args.insert(0, self.openmano_cmd_path()) + + env = { + "OPENMANO_HOST": self._host, + "OPENMANO_PORT": str(self._port), + "OPENMANO_TENANT": self._tenant, + } + + self._log.debug( + "Running openmano command (%s) using env (%s)", + subprocess.list2cmdline(cmd_args), + env, + ) + + proc = subprocess.Popen( + cmd_args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + env=env + ) + try: + stdout, stderr = proc.communicate(timeout=self.CMD_TIMEOUT) + except subprocess.TimeoutExpired: + self._log.error("Openmano command timed out") + proc.terminate() + stdout, stderr = proc.communicate(timeout=self.CMD_TIMEOUT) + + if proc.returncode != 0: + self._log.error( + "Openmano command failed (rc=%s) with stdout: %s", + proc.returncode, stdout + ) + raise OpenmanoCommandFailed(stdout) + + self._log.debug("Openmano command completed with stdout: %s", stdout) + + output_lines = stdout.splitlines() + if expected_lines is not None: + if len(output_lines) != expected_lines: + msg = "Expected %s lines from openmano command. Got %s" % (expected_lines, len(output_lines)) + self._log.error(msg) + raise OpenmanoUnexpectedOutput(msg) + + return output_lines + + + def vnf_create(self, vnf_yaml_str): + """ Create a Openmano VNF from a Openmano VNF YAML string """ + + self._log.debug("Creating VNF: %s", vnf_yaml_str) + + with tempfile.NamedTemporaryFile() as vnf_file_hdl: + vnf_file_hdl.write(vnf_yaml_str.encode()) + vnf_file_hdl.flush() + + try: + output_lines = self._openmano_cmd( + ["vnf-create", vnf_file_hdl.name], + expected_lines=1 + ) + except OpenmanoCommandFailed as e: + if "already in use" in str(e): + raise VNFExistsError("VNF was already added") + raise + + vnf_info_line = output_lines[0] + vnf_id, vnf_name = vnf_info_line.split() + + self._log.info("VNF %s Created: %s", vnf_name, vnf_id) + + return vnf_id, vnf_name + + def vnf_delete(self, vnf_uuid): + self._openmano_cmd( + ["vnf-delete", vnf_uuid, "-f"], + ) + + self._log.info("VNF Deleted: %s", vnf_uuid) + + def vnf_list(self): + try: + output_lines = self._openmano_cmd( + ["vnf-list"], + ) + except OpenmanoCommandFailed as e: + self._log.warning("Vnf listing returned an error: %s", str(e)) + return {} + + name_uuid_map = {} + for line in output_lines: + line = line.strip() + uuid, name = line.split() + name_uuid_map[name] = uuid + + return name_uuid_map + + def ns_create(self, ns_yaml_str, name=None): + self._log.info("Creating NS: %s", ns_yaml_str) + + with tempfile.NamedTemporaryFile() as ns_file_hdl: + ns_file_hdl.write(ns_yaml_str.encode()) + ns_file_hdl.flush() + + cmd_args = ["scenario-create", ns_file_hdl.name] + if name is not None: + cmd_args.extend(["--name", name]) + + output_lines = self._openmano_cmd( + cmd_args, + expected_lines=1 + ) + + ns_info_line = output_lines[0] + ns_id, ns_name = ns_info_line.split() + + self._log.info("NS %s Created: %s", ns_name, ns_id) + + return ns_id, ns_name + + def ns_list(self): + self._log.debug("Getting NS list") + + try: + output_lines = self._openmano_cmd( + ["scenario-list"], + ) + + except OpenmanoCommandFailed as e: + self._log.warning("NS listing returned an error: %s", str(e)) + return {} + + name_uuid_map = {} + for line in output_lines: + line = line.strip() + uuid, name = line.split() + name_uuid_map[name] = uuid + + return name_uuid_map + + def ns_delete(self, ns_uuid): + self._log.info("Deleting NS: %s", ns_uuid) + + self._openmano_cmd( + ["scenario-delete", ns_uuid, "-f"], + ) + + self._log.info("NS Deleted: %s", ns_uuid) + + def ns_instance_list(self): + self._log.debug("Getting NS instance list") + + try: + output_lines = self._openmano_cmd( + ["instance-scenario-list"], + ) + + except OpenmanoCommandFailed as e: + self._log.warning("Instance scenario listing returned an error: %s", str(e)) + return {} + + if "No scenario instances were found" in output_lines[0]: + self._log.debug("No openmano instances were found") + return {} + + name_uuid_map = {} + for line in output_lines: + line = line.strip() + uuid, name = line.split() + name_uuid_map[name] = uuid + + return name_uuid_map + + + def ns_instantiate(self, scenario_name, instance_name, datacenter_name=None): + self._log.info( + "Instantiating NS %s using instance name %s", + scenario_name, + instance_name, + ) + + cmd_args = ["scenario-deploy", scenario_name, instance_name] + if datacenter_name is not None: + cmd_args.extend(["--datacenter", datacenter_name]) + + output_lines = self._openmano_cmd( + cmd_args, + expected_lines=4 + ) + + uuid, _ = output_lines[0].split() + + self._log.info("NS Instance Created: %s", uuid) + + return uuid + + def ns_terminate(self, ns_instance_name): + self._log.info("Terminating NS: %s", ns_instance_name) + + self._openmano_cmd( + ["instance-scenario-delete", ns_instance_name, "-f"], + ) + + self._log.info("NS Instance Deleted: %s", ns_instance_name) + + def datacenter_list(self): + lines = self._openmano_cmd(["datacenter-list",]) + + # The results returned from openmano are formatted with whitespace and + # datacenter names may contain whitespace as well, so we use a regular + # expression to parse each line of the results return from openmano to + # extract the uuid and name of a datacenter. + hex = '[0-9a-fA-F]' + uuid_pattern = '(xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)'.replace('x', hex) + name_pattern = '(.+?)' + datacenter_regex = re.compile(r'{uuid}\s+\b{name}\s*$'.format( + uuid=uuid_pattern, + name=name_pattern, + )) + + # Parse the results for the datacenter uuids and names + datacenters = list() + for line in lines: + result = datacenter_regex.match(line) + if result is not None: + uuid, name = result.groups() + datacenters.append((uuid, name)) + + return datacenters + + +def valid_uuid(uuid_str): + uuid_re = re.compile( + "^xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx$".replace('x', '[0-9a-fA-F]') + ) + + if not uuid_re.match(uuid_str): + raise argparse.ArgumentTypeError("Got a valid uuid: %s" % uuid_str) + + return uuid_str + + +def parse_args(argv=sys.argv[1:]): + """ Parse the command line arguments + + Arguments: + argv - The list of arguments to parse + + Returns: + Argparse Namespace instance + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '-d', '--host', + default='localhost', + help="Openmano host/ip", + ) + + parser.add_argument( + '-p', '--port', + default='9090', + help="Openmano port", + ) + + parser.add_argument( + '-t', '--tenant', + required=True, + type=valid_uuid, + help="Openmano tenant uuid to use", + ) + + subparsers = parser.add_subparsers(dest='command', help='openmano commands') + + vnf_create_parser = subparsers.add_parser( + 'vnf-create', + help="Adds a openmano vnf into the catalog" + ) + vnf_create_parser.add_argument( + "file", + help="location of the JSON file describing the VNF", + type=argparse.FileType('rb'), + ) + + vnf_delete_parser = subparsers.add_parser( + 'vnf-delete', + help="Deletes a openmano vnf into the catalog" + ) + vnf_delete_parser.add_argument( + "uuid", + help="The vnf to delete", + type=valid_uuid, + ) + + + ns_create_parser = subparsers.add_parser( + 'scenario-create', + help="Adds a openmano ns scenario into the catalog" + ) + ns_create_parser.add_argument( + "file", + help="location of the JSON file describing the NS", + type=argparse.FileType('rb'), + ) + + ns_delete_parser = subparsers.add_parser( + 'scenario-delete', + help="Deletes a openmano ns into the catalog" + ) + ns_delete_parser.add_argument( + "uuid", + help="The ns to delete", + type=valid_uuid, + ) + + + ns_instance_create_parser = subparsers.add_parser( + 'scenario-deploy', + help="Deploys a openmano ns scenario into the catalog" + ) + ns_instance_create_parser.add_argument( + "scenario_name", + help="The ns scenario name to deploy", + ) + ns_instance_create_parser.add_argument( + "instance_name", + help="The ns instance name to deploy", + ) + + + ns_instance_delete_parser = subparsers.add_parser( + 'instance-scenario-delete', + help="Deploys a openmano ns scenario into the catalog" + ) + ns_instance_delete_parser.add_argument( + "instance_name", + help="The ns instance name to delete", + ) + + + _ = subparsers.add_parser( + 'datacenter-list', + ) + + args = parser.parse_args(argv) + + return args + + +def main(): + logging.basicConfig(level=logging.DEBUG) + logger = logging.getLogger("openmano_client.py") + + if "RIFT_INSTALL" not in os.environ: + logger.error("Must be in rift-shell to run.") + sys.exit(1) + + args = parse_args() + openmano_cli = OpenmanoCliAPI(logger, args.host, args.port, args.tenant) + + if args.command == "vnf-create": + openmano_cli.vnf_create(args.file.read()) + + elif args.command == "vnf-delete": + openmano_cli.vnf_delete(args.uuid) + + elif args.command == "scenario-create": + openmano_cli.ns_create(args.file.read()) + + elif args.command == "scenario-delete": + openmano_cli.ns_delete(args.uuid) + + elif args.command == "scenario-deploy": + openmano_cli.ns_instantiate(args.scenario_name, args.instance_name) + + elif args.command == "instance-scenario-delete": + openmano_cli.ns_terminate(args.instance_name) + + elif args.command == "datacenter-list": + for uuid, name in openmano_cli.datacenter_list(): + print("{} {}".format(uuid, name)) + + else: + logger.error("Unknown command: %s", args.command) + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/python/rift/openmano/rift2openmano.py b/modules/core/mano/models/openmano/python/rift/openmano/rift2openmano.py new file mode 100755 index 0000000..6a4d796 --- /dev/null +++ b/modules/core/mano/models/openmano/python/rift/openmano/rift2openmano.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import collections +import logging +import math +import os +import sys +import tempfile +import yaml + +from gi.repository import ( + RwYang, + RwVnfdYang, + RwNsdYang, + ) + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("rift2openmano.py") + + +class VNFNotFoundError(Exception): + pass + + +class RiftNSD(object): + model = RwYang.Model.create_libncx() + model.load_module('nsd') + model.load_module('rw-nsd') + + def __init__(self, descriptor): + self._nsd = descriptor + + def __str__(self): + return str(self._nsd) + + @property + def name(self): + return self._nsd.name + + @property + def id(self): + return self._nsd.id + + @property + def vnfd_ids(self): + return [c.vnfd_id_ref for c in self._nsd.constituent_vnfd] + + @property + def constituent_vnfds(self): + return self._nsd.constituent_vnfd + + @property + def vlds(self): + return self._nsd.vld + + @property + def cps(self): + return self._nsd.connection_point + + @property + def description(self): + return self._nsd.description + + @classmethod + def from_xml_file_hdl(cls, hdl): + hdl.seek(0) + descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd() + descriptor.from_xml_v2(RiftNSD.model, hdl.read()) + return cls(descriptor) + + @classmethod + def from_dict(cls, nsd_dict): + descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict(nsd_dict) + return cls(descriptor) + + +class RiftVNFD(object): + model = RwYang.Model.create_libncx() + model.load_module('vnfd') + model.load_module('rw-vnfd') + + def __init__(self, descriptor): + self._vnfd = descriptor + + def __str__(self): + return str(self._vnfd) + + @property + def id(self): + return self._vnfd.id + + @property + def name(self): + return self._vnfd.name + + @property + def description(self): + return self._vnfd.description + + @property + def cps(self): + return self._vnfd.connection_point + + @property + def vdus(self): + return self._vnfd.vdu + + @property + def internal_vlds(self): + return self._vnfd.internal_vld + + @classmethod + def from_xml_file_hdl(cls, hdl): + hdl.seek(0) + descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd() + descriptor.from_xml_v2(RiftVNFD.model, hdl.read()) + return cls(descriptor) + + @classmethod + def from_dict(cls, vnfd_dict): + descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict(vnfd_dict) + return cls(descriptor) + + +def is_writable_directory(dir_path): + """ Returns True if dir_path is writable, False otherwise + + Arguments: + dir_path - A directory path + """ + if not os.path.exists(dir_path): + raise ValueError("Directory does not exist: %s", dir_path) + + try: + testfile = tempfile.TemporaryFile(dir=dir_path) + testfile.close() + except OSError: + return False + + return True + + +def create_vnfd_from_xml_files(vnfd_file_hdls): + """ Create a list of RiftVNFD instances from xml file handles + + Arguments: + vnfd_file_hdls - Rift VNFD XML file handles + + Returns: + A list of RiftVNFD instances + """ + vnfd_dict = {} + for vnfd_file_hdl in vnfd_file_hdls: + vnfd = RiftVNFD.from_xml_file_hdl(vnfd_file_hdl) + vnfd_dict[vnfd.id] = vnfd + + return vnfd_dict + + +def create_nsd_from_xml_file(nsd_file_hdl): + """ Create a list of RiftNSD instances from xml file handles + + Arguments: + nsd_file_hdls - Rift NSD XML file handles + + Returns: + A list of RiftNSD instances + """ + nsd = RiftNSD.from_xml_file_hdl(nsd_file_hdl) + return nsd + + +def ddict(): + return collections.defaultdict(dict) + +def convert_vnfd_name(vnfd_name, member_idx): + return vnfd_name + "__" + str(member_idx) + + +def rift2openmano_nsd(rift_nsd, rift_vnfds): + for vnfd_id in rift_nsd.vnfd_ids: + if vnfd_id not in rift_vnfds: + raise VNFNotFoundError("VNF id %s not provided" % vnfd_id) + + openmano = {} + openmano["name"] = rift_nsd.name + openmano["description"] = rift_nsd.description + topology = {} + openmano["topology"] = topology + + topology["nodes"] = {} + for vnfd in rift_nsd.constituent_vnfds: + vnfd_id = vnfd.vnfd_id_ref + rift_vnfd = rift_vnfds[vnfd_id] + member_idx = vnfd.member_vnf_index + topology["nodes"][rift_vnfd.name + "__" + str(member_idx)] = { + "type": "VNF", + "VNF model": rift_vnfd.name + } + + for vld in rift_nsd.vlds: + # Openmano has both bridge_net and dataplane_net models for network types + # For now, since we are using openmano in developer mode lets just hardcode + # to bridge_net since it won't matter anyways. + # topology["nodes"][vld.name] = {"type": "network", "model": "bridge_net"} + pass + + topology["connections"] = {} + for vld in rift_nsd.vlds: + + # Create a connections entry for each external VLD + topology["connections"][vld.name] = {} + topology["connections"][vld.name]["nodes"] = [] + + if vld.provider_network.has_field("physical_network"): + # Add the external datacenter network to the topology + # node list if it isn't already added + ext_net_name = vld.provider_network.physical_network + ext_net_name_with_seg = ext_net_name + if vld.provider_network.has_field("segmentation_id"): + ext_net_name_with_seg += ":{}".format(vld.provider_network.segmentation_id) + + if ext_net_name not in topology["nodes"]: + topology["nodes"][ext_net_name] = { + "type": "external_network", + "model": ext_net_name_with_seg, + } + + # Add the external network to the list of connection points + topology["connections"][vld.name]["nodes"].append( + {ext_net_name: "0"} + ) + + + for vnfd_cp in vld.vnfd_connection_point_ref: + + # Get the RIFT VNF for this external VLD connection point + vnfd = rift_vnfds[vnfd_cp.vnfd_id_ref] + + # For each VNF in this connection, use the same interface name + topology["connections"][vld.name]["type"] = "link" + # Vnf ref is the vnf name with the member_vnf_idx appended + member_idx = vnfd_cp.member_vnf_index_ref + vnf_ref = vnfd.name + "__" + str(member_idx) + topology["connections"][vld.name]["nodes"].append( + { + vnf_ref: vnfd_cp.vnfd_connection_point_ref + } + ) + + return openmano + + +def rift2openmano_vnfd(rift_vnfd): + openmano_vnf = {"vnf":{}} + vnf = openmano_vnf["vnf"] + + vnf["name"] = rift_vnfd.name + vnf["description"] = rift_vnfd.description + + vnf["external-connections"] = [] + + def find_vdu_and_ext_if_by_cp_ref(cp_ref_name): + for vdu in rift_vnfd.vdus: + for ext_if in vdu.external_interface: + if ext_if.vnfd_connection_point_ref == cp_ref_name: + return vdu, ext_if + + raise ValueError("External connection point reference %s not found" % cp_ref_name) + + def find_vdu_and_int_if_by_cp_ref(cp_ref_id): + for vdu in rift_vnfd.vdus: + for int_if in vdu.internal_interface: + if int_if.vdu_internal_connection_point_ref == cp_ref_id: + return vdu, int_if + + raise ValueError("Internal connection point reference %s not found" % cp_ref_id) + + def rift2openmano_if_type(rift_type): + if rift_type == "OM_MGMT": + return "mgmt" + elif rift_type == "VIRTIO": + return "bridge" + else: + return "data" + + # Add all external connections + for cp in rift_vnfd.cps: + # Find the VDU and and external interface for this connection point + vdu, ext_if = find_vdu_and_ext_if_by_cp_ref(cp.name) + connection = { + "name": cp.name, + "type": rift2openmano_if_type(ext_if.virtual_interface.type_yang), + "VNFC": vdu.name, + "local_iface_name": ext_if.name, + "description": "%s iface on VDU %s" % (ext_if.name, vdu.name), + } + + vnf["external-connections"].append(connection) + + # Add all internal networks + for vld in rift_vnfd.internal_vlds: + connection = { + "name": vld.name, + "description": vld.description, + "type": "data", + "elements": [], + } + + # Add the specific VDU connection points + for int_cp_ref in vld.internal_connection_point_ref: + vdu, int_if = find_vdu_and_int_if_by_cp_ref(int_cp_ref) + connection["elements"].append({ + "VNFC": vdu.name, + "local_iface_name": int_if.name, + }) + if "internal-connections" not in vnf: + vnf["internal-connections"] = [] + + vnf["internal-connections"].append(connection) + + # Add VDU's + vnf["VNFC"] = [] + for vdu in rift_vnfd.vdus: + vnfc = { + "name": vdu.name, + "description": vdu.name, + "VNFC image": vdu.image if os.path.isabs(vdu.image) else "/var/images/{}".format(vdu.image), + "numas": [{ + "memory": max(int(vdu.vm_flavor.memory_mb/1024), 1), + "interfaces":[], + }], + "bridge-ifaces": [], + } + + numa_node_policy = vdu.guest_epa.numa_node_policy + if numa_node_policy.has_field("node"): + numa_node = numa_node_policy.node[0] + + if numa_node.has_field("paired_threads"): + if numa_node.paired_threads.has_field("num_paired_threads"): + vnfc["numas"][0]["paired-threads"] = numa_node.paired_threads.num_paired_threads + if len(numa_node.paired_threads.paired_thread_ids) > 0: + vnfc["numas"][0]["paired-threads-id"] = [] + for pair in numa_node.paired_threads.paired_thread_ids: + vnfc["numas"][0]["paired-threads-id"].append( + [pair.thread_a, pair.thread_b] + ) + + else: + if vdu.vm_flavor.has_field("vcpu_count"): + vnfc["numas"][0]["cores"] = max(vdu.vm_flavor.vcpu_count, 1) + + if vdu.has_field("hypervisor_epa"): + vnfc["hypervisor"] = {} + if vdu.hypervisor_epa.has_field("type"): + if vdu.hypervisor_epa.type_yang == "REQUIRE_KVM": + vnfc["hypervisor"]["type"] = "QEMU-kvm" + + if vdu.hypervisor_epa.has_field("version"): + vnfc["hypervisor"]["version"] = vdu.hypervisor_epa.version + + if vdu.has_field("host_epa"): + vnfc["processor"] = {} + if vdu.host_epa.has_field("om_cpu_model_string"): + vnfc["processor"]["model"] = vdu.host_epa.om_cpu_model_string + if vdu.host_epa.has_field("om_cpu_feature"): + vnfc["processor"]["features"] = [] + for feature in vdu.host_epa.om_cpu_feature: + vnfc["processor"]["features"].append(feature) + + + if vdu.vm_flavor.has_field("storage_gb"): + vnfc["disk"] = vdu.vm_flavor.storage_gb + + vnf["VNFC"].append(vnfc) + + for int_if in list(vdu.internal_interface) + list(vdu.external_interface): + intf = { + "name": int_if.name, + } + if int_if.virtual_interface.has_field("vpci"): + intf["vpci"] = int_if.virtual_interface.vpci + + if int_if.virtual_interface.type_yang in ["VIRTIO", "OM_MGMT"]: + vnfc["bridge-ifaces"].append(intf) + + elif int_if.virtual_interface.type_yang == "SR-IOV": + intf["bandwidth"] = "10 Gbps" + intf["dedicated"] = "yes:sriov" + vnfc["numas"][0]["interfaces"].append(intf) + + elif int_if.virtual_interface.type_yang == "PCI_PASSTHROUGH": + intf["bandwidth"] = "10 Gbps" + intf["dedicated"] = "yes" + if "interfaces" not in vnfc["numas"][0]: + vnfc["numas"][0]["interfaces"] = [] + vnfc["numas"][0]["interfaces"].append(intf) + else: + raise ValueError("Interface type %s not supported" % int_if.virtual_interface) + + if int_if.virtual_interface.has_field("bandwidth"): + if int_if.virtual_interface.bandwidth != 0: + bps = int_if.virtual_interface.bandwidth + + # Calculate the bits per second conversion + for x in [('M', 1000000), ('G', 1000000000)]: + if bps/x[1] >= 1: + intf["bandwidth"] = "{} {}bps".format(math.ceil(bps/x[1]), x[0]) + + + return openmano_vnf + + +def parse_args(argv=sys.argv[1:]): + """ Parse the command line arguments + + Arguments: + arv - The list of arguments to parse + + Returns: + Argparse Namespace instance + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '-o', '--outdir', + default='-', + help="Directory to output converted descriptors. Default is stdout", + ) + + parser.add_argument( + '-n', '--nsd-file-hdl', + metavar="nsd_xml_file", + type=argparse.FileType('r'), + help="Rift NSD Descriptor File", + ) + + parser.add_argument( + '-v', '--vnfd-file-hdls', + metavar="vnfd_xml_file", + action='append', + type=argparse.FileType('r'), + help="Rift VNFD Descriptor File", + ) + + args = parser.parse_args(argv) + + if not os.path.exists(args.outdir): + os.makedirs(args.outdir) + + if not is_writable_directory(args.outdir): + logging.error("Directory %s is not writable", args.outdir) + sys.exit(1) + + return args + + +def write_yaml_to_file(name, outdir, desc_dict): + file_name = "%s.yaml" % name + yaml_str = yaml.dump(desc_dict) + if outdir == "-": + sys.stdout.write(yaml_str) + return + + file_path = os.path.join(outdir, file_name) + dir_path = os.path.dirname(file_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(file_path, "w") as hdl: + hdl.write(yaml_str) + + logger.info("Wrote descriptor to %s", file_path) + + +def main(argv=sys.argv[1:]): + args = parse_args(argv) + + nsd = None + if args.vnfd_file_hdls is not None: + vnf_dict = create_vnfd_from_xml_files(args.vnfd_file_hdls) + + if args.nsd_file_hdl is not None: + nsd = create_nsd_from_xml_file(args.nsd_file_hdl) + + openmano_nsd = rift2openmano_nsd(nsd, vnf_dict) + + write_yaml_to_file(openmano_nsd["name"], args.outdir, openmano_nsd) + + for vnf in vnf_dict.values(): + openmano_vnf = rift2openmano_vnfd(vnf) + write_yaml_to_file(openmano_vnf["vnf"]["name"], args.outdir, openmano_vnf) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/src/CMakeLists.txt b/modules/core/mano/models/openmano/src/CMakeLists.txt new file mode 100644 index 0000000..486a9df --- /dev/null +++ b/modules/core/mano/models/openmano/src/CMakeLists.txt @@ -0,0 +1,71 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/generate_tidgen_packages.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/generate_tidgen_packages.sh + ESCAPE_QUOTES @ONLY + ) + +add_custom_command( + OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz + + + COMMAND + ${CMAKE_CURRENT_BINARY_DIR}/generate_tidgen_packages.sh + + DEPENDS + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_2sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_4sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_2sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_4sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/src/openmano2rift.py + ) + +add_custom_target(tidgen ALL + DEPENDS + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz + mano_yang + ) + +install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz + + + DESTINATION + usr/rift/mano/examples/tidgen_ns + COMPONENT ${PKG_LONG_NAME} + ) diff --git a/modules/core/mano/models/openmano/src/generate_tidgen_packages.sh.in b/modules/core/mano/models/openmano/src/generate_tidgen_packages.sh.in new file mode 100755 index 0000000..95c2f38 --- /dev/null +++ b/modules/core/mano/models/openmano/src/generate_tidgen_packages.sh.in @@ -0,0 +1,40 @@ +#! /usr/bin/bash + +set -e + +SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@ +BINARY_DIR=@CMAKE_CURRENT_BINARY_DIR@ +PROJECT_TOP_DIR=@PROJECT_TOP_DIR@ + +# These paths are needed for finding the overrides and so files +PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang + +# Remove any old directories +rm -rf ${BINARY_DIR}/2tidgenMWC_4sriov +rm -rf ${BINARY_DIR}/tidgenMWC_4sriov +rm -rf ${BINARY_DIR}/2tidgenMWC_2sriov +rm -rf ${BINARY_DIR}/tidgenMWC_2sriov +rm -rf ${BINARY_DIR}/2tidgenMWC_2sriov_noctrlnet +rm -rf ${BINARY_DIR}/tidgenMWC_2sriov_noctrlnet +rm -rf ${BINARY_DIR}/2tidgenMWC_4sriov_noctrlnet +rm -rf ${BINARY_DIR}/tidgenMWC_4sriov_noctrlnet + + +# Generate the descriptors +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_4sriov.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_4sriov.yaml +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_2sriov.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_2sriov.yaml +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml + + +# Generate the tar files +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_4sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_4sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_2sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_2sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_2sriov_no_ctrlnet +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_2sriov_no_ctrlnet +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_4sriov_no_ctrlnet +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_4sriov_no_ctrlnet diff --git a/modules/core/mano/models/openmano/src/openmano2rift.py b/modules/core/mano/models/openmano/src/openmano2rift.py new file mode 100755 index 0000000..ba6ef57 --- /dev/null +++ b/modules/core/mano/models/openmano/src/openmano2rift.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import itertools +import logging +import os +import sys +import tempfile +import uuid +import yaml + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwNsdYang', '1.0') +from gi.repository import ( + RwYang, + RwVnfdYang, + RwNsdYang, + ) + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("openmano2rift.py") + + +class UnknownVNFError(Exception): + pass + + +class DescriptorFileWriter(object): + def __init__(self, module_list, output_dir, output_format): + self._model = RwYang.Model.create_libncx() + for module in module_list: + self._model.load_module(module) + + self._output_dir = output_dir + self._output_format = output_format + + def _write_file(self, file_name, output): + file_path = os.path.join(self._output_dir, file_name) + dir_path = os.path.dirname(file_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(file_path, "w") as hdl: + hdl.write(output) + + logger.info("Wrote descriptor to %s", file_path) + + def _write_json(self, descriptor, subdir): + self._write_file( + '%s.json' % os.path.join(descriptor.name, subdir, descriptor.name), + descriptor.descriptor.to_json(self._model) + ) + + def _write_xml(self, descriptor, subdir): + self._write_file( + '%s.xml' % os.path.join(descriptor.name, subdir, descriptor.name), + descriptor.descriptor.to_xml_v2(self._model, pretty_print=True) + ) + + def _write_yaml(self, descriptor, subdir): + self._write_file( + '%s.yaml' % os.path.join(descriptor.name, subdir, descriptor.name), + yaml.dump(descriptor.descriptor.as_dict()), + ) + + def write_descriptor(self, descriptor, subdir=""): + if self._output_format == 'json': + self._write_json(descriptor, subdir=subdir) + + elif self._output_format == 'xml': + self._write_xml(descriptor, subdir=subdir) + + elif self._output_format == 'yaml': + self._write_yaml(descriptor, subdir=subdir) + + +class RiftManoDescriptor(object): + def __init__(self, openmano=None): + self.openmano = openmano + self.descriptor = None + + +class RiftNS(RiftManoDescriptor): + def __init__(self, openmano=None): + super().__init__(openmano) + self.nsd_catalog = None + self.nsd = None + self.name = None + + def get_vnfd_id(self, vnf_list, vnf_name): + for vnf in vnf_list: + if vnf.name == vnf_name: + return vnf.vnfd.id + + # Didn't find the vnf just return the vnf_name + return vnf_name + + def openmano2rift(self, vnf_list): + self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog() + openmano_nsd = self.openmano.dictionary + self.name = openmano_nsd['name'] + nsd = self.descriptor.nsd.add() + nsd.id = str(uuid.uuid1()) + nsd.name = self.name + nsd.short_name = self.name + nsd.description = openmano_nsd['description'] + + nodes = openmano_nsd['topology']['nodes'] + connections = openmano_nsd['topology']['connections'] + + def create_consituent_vnfds(): + vnf_member_index_dict = {} + + vnfd_idx_gen = itertools.count(1) + for key in nodes: + node = nodes[key] + if node['type'] != 'VNF': + continue + + vnfd_idx = next(vnfd_idx_gen) + constituent_vnfd = nsd.constituent_vnfd.add() + constituent_vnfd.member_vnf_index = vnfd_idx + constituent_vnfd.vnfd_id_ref = self.get_vnfd_id(vnf_list, node['VNF model']) + vnf_member_index_dict[key] = vnfd_idx + + return vnf_member_index_dict + + def create_connections(vnf_member_index_dict): + keys = connections.keys() + for key in keys: + # TODO: Need clarification from TEF + # skip the mgmtnet in OpenMANO descriptor + if key == 'mgmtnet': + continue + conn = connections[key] + vld = nsd.vld.add() + vld.from_dict(dict( + id=str(uuid.uuid1()), + name=key, + short_name=key, + type_yang='ELAN', + )) + + nodes = conn['nodes'] + for node, node_keys in [(node, node.keys()) for node in nodes]: + for node_key in node_keys: + topo_node = openmano_nsd['topology']['nodes'][node_key] + if topo_node['type'] == 'VNF': + cpref = vld.vnfd_connection_point_ref.add() + cpref.from_dict(dict( + member_vnf_index_ref=vnf_member_index_dict[node_key], + vnfd_id_ref=self.get_vnfd_id(vnf_list, topo_node['VNF model']), + #vnfd_id_ref=topo_node['VNF model'], + vnfd_connection_point_ref=node[node_key], + )) + if key != 'control-net': + vld.provider_network.physical_network = 'physnet_sriov' + vld.provider_network.overlay_type = 'VLAN' + + vnf_member_index_dict = create_consituent_vnfds() + create_connections(vnf_member_index_dict) + + +class RiftVnfd(RiftManoDescriptor): + def __init__(self, openmano=None): + super().__init__(openmano) + self.vnfd_catalog = None + self.vnfd = None + + def find_external_connection(self, vdu_name, if_name): + """ + Find if the vdu interface has an external connection. + """ + openmano_vnfd = self.openmano.dictionary['vnf'] + if 'external-connections' not in openmano_vnfd: + return None + + ext_conn_list = openmano_vnfd['external-connections'] + for ext_conn in ext_conn_list: + if ((ext_conn['VNFC'] == vdu_name) and + (ext_conn['local_iface_name'] == if_name)): + return ext_conn + + return None + + def openmano2rift(self): + self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog() + vnfd = self.descriptor.vnfd.add() + self.vnfd = vnfd + vnfd.id = str(uuid.uuid1()) + + openmano_vnfd = self.openmano.dictionary['vnf'] + self.name = openmano_vnfd['name'] + vnfd.name = self.name + if "description" in openmano_vnfd: + vnfd.description = openmano_vnfd['description'] + + # Parse and add all the external connection points + if 'external-connections' in openmano_vnfd: + ext_conn_list = openmano_vnfd['external-connections'] + + for ext_conn in ext_conn_list: + # TODO: Fix this + if ext_conn['name'] == 'eth0': + continue + conn_point = vnfd.connection_point.add() + conn_point.name = ext_conn['name'] + conn_point.type_yang = 'VPORT' + + # TODO: Need a concrete example of how openmano descriptor + # uses internal connections. + if 'internal-connections' in openmano_vnfd: + int_conn_list = openmano_vnfd['internal-connections'] + + def add_external_interfaces(vdu, numa): + if 'interfaces' not in numa: + return + + numa_if_list = numa['interfaces'] + for numa_if in numa_if_list: + ext_conn = self.find_external_connection(vdu.name, numa_if['name']) + if not ext_conn: + continue + + ext_iface = vdu.external_interface.add() + ext_iface.name = numa_if['name'] + ext_iface.vnfd_connection_point_ref = ext_conn['name'] + ext_iface.virtual_interface.vpci = numa_if['vpci'] + if numa_if['dedicated'] == 'no': + ext_iface.virtual_interface.type_yang = 'SR_IOV' + else: + ext_iface.virtual_interface.type_yang = 'PCI_PASSTHROUGH' + + vnfc_list = openmano_vnfd['VNFC'] + for vnfc in vnfc_list: + vdu = vnfd.vdu.add() + vdu_dict = dict( + id=str(uuid.uuid1()), + name=vnfc['name'], + image=vnfc['VNFC image'], + vm_flavor={"storage_gb": vnfc["disk"] if "disk" in vnfc else 20}, + ) + if "description" in vnfc: + vdu_dict["description"] = vnfc['description'] + + vdu.from_dict(vdu_dict) + + vnfd.mgmt_interface.vdu_id = vdu.id + + numa_list = vnfc['numas'] + memory = 0 + vcpu_count = 0 + numa_node_cnt = 0 + + for numa in numa_list: + node = vdu.guest_epa.numa_node_policy.node.add() + node.id = numa_node_cnt + # node.memory_mb = int(numa['memory']) * 1024 + numa_node_cnt += 1 + + memory = memory + node.memory_mb + # Need a better explanation of "cores", "paired-threads", "threads" + # in openmano descriptor. Particularly how they map to cpu and + # thread pinning policies + if 'paired-threads' in numa: + vcpu_count = vcpu_count + int(numa['paired-threads']) * 2 + + if 'cores' in numa: + vcpu_count = vcpu_count + int(numa['cores']) + + add_external_interfaces(vdu, numa) + + + # vdu.vm_flavor.memory_mb = memory + vdu.vm_flavor.memory_mb = 12 * 1024 + vdu.vm_flavor.vcpu_count = vcpu_count + vdu.guest_epa.numa_node_policy.node_cnt = numa_node_cnt + vdu.guest_epa.numa_node_policy.mem_policy = 'STRICT' + vdu.guest_epa.mempage_size = 'LARGE' + vdu.guest_epa.cpu_pinning_policy = 'DEDICATED' + vdu.guest_epa.cpu_thread_pinning_policy = 'PREFER' + + # TODO: Enable hypervisor epa + # vdu.hypervisor_epa.version = vnfc['hypervisor']['version'] + # if vnfc['hypervisor']['type'] == 'QEMU-kvm': + # vdu.hypervisor_epa.type_yang = 'REQUIRE_KVM' + # else: + # vdu.hypervisor_epa.type_yang = 'PREFER_KVM' + + # TODO: Enable host epa + # vdu.host_epa.cpu_feature = vnfc['processor']['features'] + + # Parse the bridge interfaces + if 'bridge-ifaces' in vnfc: + bridge_ifaces = vnfc['bridge-ifaces'] + + + for bridge_iface in bridge_ifaces: + # TODO: Fix this + if bridge_iface['name'] == 'eth0': + continue + + ext_conn = self.find_external_connection(vdu.name, + bridge_iface['name']) + if ext_conn: + ext_iface = vdu.external_interface.add() + ext_iface.name = bridge_iface['name'] + ext_iface.vnfd_connection_point_ref = ext_conn['name'] + if 'vpci' in bridge_iface: + ext_iface.virtual_interface.vpci = bridge_iface['vpci'] + ext_iface.virtual_interface.type_yang = 'VIRTIO' + + # set vpci information for the 'default' network + # TODO: This needs to be inferred gtom bridge ifaces, + # need input from TEF + vdu.mgmt_vpci = "0000:00:0a.0" + + +class OpenManoDescriptor(object): + def __init__(self, yaml_file_hdl): + self.dictionary = yaml.load(yaml_file_hdl) + + @property + def type(self): + """ The descriptor type (ns or vnf)""" + if 'vnf' in self.dictionary: + return "vnf" + else: + return "ns" + + def dump(self): + """ Dump the Descriptor out to stdout """ + print(yaml.dump(self.dictionary)) + + +def is_writable_directory(dir_path): + """ Returns True if dir_path is writable, False otherwise + + Arguments: + dir_path - A directory path + """ + if not os.path.exists(dir_path): + raise ValueError("Directory does not exist: %s", dir_path) + + try: + testfile = tempfile.TemporaryFile(dir=dir_path) + testfile.close() + except OSError: + return False + + return True + + +def create_vnfs_from_yaml_files(yaml_file_hdls): + """ Create a list of RiftVnfd instances from yaml file handles + + Arguments: + yaml_file_hdls - OpenMano Yaml file handles + + Returns: + A list of RiftVnfd instances + """ + vnf_list = [] + for yaml_file_hdl in yaml_file_hdls: + openmano = OpenManoDescriptor(yaml_file_hdl) + yaml_file_hdl.seek(0) + + if openmano.type != "vnf": + continue + + vnf = RiftVnfd(openmano) + vnf.openmano2rift() + vnf_list.append(vnf) + + return vnf_list + + +def create_ns_from_yaml_files(yaml_file_hdls, vnf_list): + """ Create a list of RiftNS instances from yaml file handles + + Arguments: + yaml_file_hdls - OpenMano Yaml file handles + vnf_list - list of RiftVnfd + + Returns: + A list of RiftNS instances + """ + ns_list = [] + for yaml_file_hdl in yaml_file_hdls: + openmano = OpenManoDescriptor(yaml_file_hdl) + if openmano.type != "ns": + continue + + net_svc = RiftNS(openmano) + net_svc.openmano2rift(vnf_list) + ns_list.append(net_svc) + + return ns_list + + +def parse_args(argv=sys.argv[1:]): + """ Parse the command line arguments + + Arguments: + arv - The list of arguments to parse + + Returns: + Argparse Namespace instance + + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '-o', '--outdir', + default='.', + help="Directory to output converted descriptors", + ) + + parser.add_argument( + '-f', '--format', + choices=['yaml', 'xml', 'json'], + default='xml', + help="Descriptor output format", + ) + + parser.add_argument( + 'yaml_file_hdls', + metavar="yaml_file", + nargs="+", + type=argparse.FileType('r'), + help="OpenMano YAML Descriptor File", + ) + + args = parser.parse_args(argv) + + if not os.path.exists(args.outdir): + os.makedirs(args.outdir) + + if not is_writable_directory(args.outdir): + logging.error("Directory %s is not writable", args.outdir) + sys.exit(1) + + return args + + +def main(argv=sys.argv[1:]): + args = parse_args(argv) + + vnf_list = create_vnfs_from_yaml_files(args.yaml_file_hdls) + ns_list = create_ns_from_yaml_files(args.yaml_file_hdls, vnf_list) + + writer = DescriptorFileWriter( + module_list=['nsd', 'rw-nsd', 'vnfd', 'rw-vnfd'], + output_dir=args.outdir, + output_format=args.format, + ) + + for nw_svc in ns_list: + writer.write_descriptor(nw_svc, subdir="nsd") + + for vnf in vnf_list: + writer.write_descriptor(vnf, subdir="vnfd") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/mwc16-gen_test.py b/modules/core/mano/models/openmano/test/osm_descriptors/mwc16-gen_test.py new file mode 100755 index 0000000..12832e8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/mwc16-gen_test.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import dictdiffer +import logging +import os +import sys +import unittest +import xmlrunner +import yaml + +import rift.openmano.rift2openmano as rift2openmano +import rift.openmano.openmano_client as openmano_client + +logger = logging.getLogger() + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) + +def delete_list_dict_keys(source_list, lst_keys): + for l in source_list: + if isinstance(l, dict): + delete_keys_from_dict(l, lst_keys) + elif isinstance(l, list): + delete_list_dict_keys(l, lst_keys) + +def delete_keys_from_dict(source_dict, lst_keys): + for k in lst_keys: + try: + del source_dict[k] + except KeyError: + pass + for v in source_dict.values(): + if isinstance(v, dict): + delete_keys_from_dict(v, lst_keys) + if isinstance(v, list): + delete_list_dict_keys(v, lst_keys) + + +class Rift2OpenmanoTest(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.maxDiff = None + + def load_openmano_vnf(self, openmano_vnf_path): + with open(openmano_vnf_path, 'rb') as hdl: + openmano_vnf = yaml.load(hdl) + + return openmano_vnf + + def load_openmano_ns(self, openmano_ns_path): + with open(openmano_ns_path, 'rb') as hdl: + openmano_ns = yaml.load(hdl) + + return openmano_ns + + def rift_vnf(self, rift_vnf_path): + with open(rift_vnf_path, 'r') as xml_hdl: + rift_vnf = rift2openmano.RiftVNFD.from_xml_file_hdl(xml_hdl) + return rift_vnf + + def rift2openmano_vnf(self, rift_vnf_path): + rift_vnf = self.rift_vnf(rift_vnf_path) + openmano_vnfd = rift2openmano.rift2openmano_vnfd(rift_vnf) + + return openmano_vnfd + + def rift2openmano_ns(self, rift_ns_path, rift_vnf_paths): + rift_vnf_hdls = [open(path, 'r') for path in rift_vnf_paths] + vnf_dict = rift2openmano.create_vnfd_from_xml_files(rift_vnf_hdls) + + with open(rift_ns_path, 'r') as xml_hdl: + rift_ns = rift2openmano.RiftNSD.from_xml_file_hdl(xml_hdl) + + openmano_nsd = rift2openmano.rift2openmano_nsd(rift_ns, vnf_dict) + logger.debug( + "Converted ns: %s", + yaml.safe_dump(openmano_nsd, indent=4, default_flow_style=False)) + + return openmano_nsd + + def generate_vnf_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(source_dict, ["description"]) + delete_keys_from_dict(dest_dict, ["description"]) + + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + def generate_ns_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(dest_dict, ["graph"]) + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + +class Mwc16GenTest(Rift2OpenmanoTest): + OPENMANO_TIDGEN_VNF_PATH = os.path.join( + THIS_DIR, "openmano_descriptors/tidgen4pLarge.yaml" + ) + RIFT_TIDGEN_VNF_PATH = os.path.join( + THIS_DIR, "rift_descriptors/tidgen4pLarge.xml" + ) + + OPENMANO_MWC16_NS_PATH = os.path.join( + THIS_DIR, "openmano_descriptors/mwc16-gen.yaml" + ) + RIFT_MWC16_NS_PATH = os.path.join( + THIS_DIR, "rift_descriptors/mwc16-gen.xml" + ) + + def test_tidgen_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_TIDGEN_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_TIDGEN_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_mwc16_gen_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_NS_PATH, [Mwc16GenTest.RIFT_TIDGEN_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + logger.setLevel(logging.DEBUG if args.verbose else logging.WARN) + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/6WindTR1.1.2.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/6WindTR1.1.2.yaml new file mode 100644 index 0000000..e6f7912 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/6WindTR1.1.2.yaml @@ -0,0 +1,99 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- + vnf: + VNFC: + - bridge-ifaces: + - vpci: "0000:00:03.0" + bandwidth: "1 Gbps" + name: "eth0" + numas: + - interfaces: + - vpci: "0000:00:05.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + - vpci: "0000:00:06.0" + bandwidth: "10 Gbps" + name: "xe1" + dedicated: "yes" + - vpci: "0000:00:07.0" + bandwidth: "10 Gbps" + name: "xe2" + dedicated: "yes" + - vpci: "0000:00:08.0" + bandwidth: "10 Gbps" + name: "xe3" + dedicated: "yes" + paired-threads-id: + - - 0 + - 1 + - - 2 + - 3 + - - 4 + - 5 + - - 6 + - 7 + - - 8 + - 9 + - - 10 + - 11 + paired-threads: 6 + memory: 8 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2" + image metadata: + use_incremental: "no" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: + - "64b" + - "iommu" + - "lps" + - "tlbps" + - "hwsv" + - "dioc" + - "ht" + name: "VM" + name: "6WindTR1.1.2" + external-connections: + - local_iface_name: "eth0" + VNFC: "VM" + type: "bridge" + name: "eth0" + description: "Data" + - local_iface_name: "xe0" + VNFC: "VM" + type: "data" + name: "xe0" + description: "Data" + - local_iface_name: "xe1" + VNFC: "VM" + type: "data" + name: "xe1" + description: "Data" + - local_iface_name: "xe2" + VNFC: "VM" + type: "data" + name: "xe2" + description: "Data" + - local_iface_name: "xe3" + VNFC: "VM" + type: "data" + name: "xe3" + description: "Data" diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/Scenarios PE- Gen.jpg b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/Scenarios PE- Gen.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82e9a3bc73fef103293df3037da61a2496ff24c3 GIT binary patch literal 156047 zcmeFZ2UL?^(=QrCrHM#y0xG?U1w;Xfh;-?_Mg*i8A|MiK6a=Iz^)CpKE?t2Tsi9X9 zr4vd>XwoDB34w%g;yK^?zVp5Rcdhf@bI)D(yLb7(W-5Q!^rsk3Gcf!fj7%&{jEu~T3=GVy z%*-rjXcq(1SvJ-)XMg|x`$K-;{`)T4_Zdb8#^3MwzjhtB0oWPoZ=MdOr@I6=!A?id zPIue|5CH(_PSL3SP4K^6bSLOfo;pnJKMi;m2n`C3d>IuT^C~tjB{eNQBlEAfS@{Kp zMa3l_O3P|$>k#$G&kc z_?s^}0R8`9>)$#12fo;8e4U`J!AXYSe9@f&SN6!ub0*-#= z$H{goGE3z@|1N_^&JjL%u*dmPONo1Erf!e(b zvlm9vQhcuZp=2Pg%kJLW?%*VJoK)GZjyS3JC;rpuCLp`uXDNz^T6kA5H6BX}U`ek* zq?)zHjr+JiPLDm4K!2;fBOx`i8*H{>jWpLYE#;F*i8oE(+$yLihJnYVs&sJ5Tl5RS zNOV+7NqjRHE5hRFr0o_V1hPSXN^xa4mF^ZG*RUv>FsXq>aeB61`vz8W6fyZUUe`qG znm}gwR}lB5f>l&fDoVyUr2TudQx1viVDnS2O0;jrj;6e31NjHca4D^Dtda4*lKNYNgiL|q**|3|Pycocu`h7LXS4Sj^MY;>P(;xE{ zb7dxyTe4sHQT7bjsZyb1KP-KE`bE0|r?mFo2=pxE7b? zJ7+B}6T1I2f^@d@$d5>(MBVS3_Fb9Qfm!p23z0hC6hE_0lve2Ln!N6~ZS$fbu1eS8 z=LG@K$ILD7V*qO<{xqf+h);%}IoO}%0XZvU9?vXa5o{j5tlqGOOg6VM;MP~NXXwmd1LP!*a;WN|#-#YgkQNaf zZ@c$)^c(qjCtFbUt1U#6cd;J3u%rhUW8o35Ci>QsDxF!yqPP-%vU12;jn5>SXl30v zkM{DUmaTwHY||F`Mm_ueElT(Yq^AeCmX*wWTlNy{zHbyDYfZJdzlHuV)#3vD7aMe^ zOG-5gJdwPaW^y+sJ}Hjs249jAk39q!hGt_iBx&9*wpcq!ii8u>Q4YlxR~-eub#CM1 z#>5-1%nSl5Q}N>(^jcs0%j|Q&;X1#!s^OIc)~|54_aMV^J@>Pjtj)>L{m11#ucIV1 zFGA8TI4mLgzCx=bsaGkg1ZxT>=R!_z7bFtKEgzr|b;G|G#`}VD?+wOgw)fdqcjEdR znPN9R4n z>krGBIo#oP{!1pq;zEW=?{eE54ujzBvX|$jGBc26DfbB1>f^DFAbr#eVakE;nsw$4 z)P?ln0lv2dplkqGlH|Q955w(8hh8PHXyS8-2p5LfW?k_xWkXEk%CczUQsC4z%M%5m zTTy*8IbHO_=kJwGgd*hmKHSc1E=+A|?Z>3s7H0*v_Dn~cz8tJckO!8Aczuqu^wKX; zG8j#hv51l5a@*W8G&d_9=D$(6xll38PKqT5l2Xk`)s;}4h3$?obKLUk%SYZIc2J_< zjMnJW4R|Kc_u(sUS#E3=Y-Bmf*xYPQ4QP5YL;WC?c@VIsYS^ zkDxn}--3K*h@|SIbq^v3Tu&zEUw)AZw4n}wY8WC=?#YR**T(CrU(H`5tR_~=f#sFD z2@sIu~#J#^1pH0MiL1$ppq813Xv^&3duO zc2@r$C#i3cRpdBWyX91LwllLXp+M;up{#BPeCC1$P}-$=>rzWLQSsQKVuc#V+~w3N4d}P-{blyfr)2BdMfwdbfvM=Vp#jRcIE$^_{1IY) zBRv4o7$Cxdry583n5VfGNuC9T6Y4AqT#KX~YYPZ)3Ks*8 z_yQYd&^-aGWsA3O%X(`3Y#&`6E3Y_bD_?L7uoqn`5_XkE^f}}V(7V$2zNg$fYE*Qj zC-QtItzqXb!_Iju8pfC+M^``d_^ThJ@Wkn`T+m5;8~Nbo?8_6{tf7NgcpOj==v6%} z>p&VR4GqT!b__Gj&&=r@0~n7oqaSZqR$jW_b}4Ipxhqz!x@Q)7?V^JZV}liNr|Kn~ z$OJo5%gW+g``YXv_#`Xa+SFjq7b)Me;WfIlgEXJ+dZKPV6jfpuB{i0GBT1mb)0pFE zde^_Bu#8+)7z7@=>(JQ3+c^9I_-K3PdN;goHQ_Nt5%e;vJF{a>ONZ!E^c? zrT@Wzq=%CRLTvFuv`q7dcp}$P0Y#J`xF8Zw(JLV-cOC;+LRGPjFjGWp#8=&nvG}Jt z8H52Xj#9TAO92pxCschZiCe_L?Mg? zVC@n>k0E6(y`nL9!r9jEy&OMN+;kTLwS@FstxOms)%@+8*R8zSWKvfA1A{Mr^cgf% za)K=9?eeKr%M3{^I%~&uc5r_j%O6`;)wYz;His+Dx~bZ2XpDOD=Tn8b47r`&$}V zI2wcXBb9d?10d1!s-=$=EWytgn*W`P=TQWW(|F%+JdKliDePknj~- zqH%`tskbsV3grf;%&sOhlqs9-K6&z?L|P}ee@Ml*xRO7l9?`N-{CxBthRp!F8*`G5 z0c1&X2$lV_X!fRv7g+(VJapb>_lEi`WO(lO)oqYEG@i;_j`Ie_+&BiX*CoFYzC2Fl z=(q^h?5Zql_g3?8kQ=q1Rtc)@$ZoZ@>8jG`PdL%pOylG;Y(OPk5;mERqGo8^Cv^)9 z(l&4AM0&a{3~QYlalLPpj(gm3JXeD2qPoSRWWM3t&7#JzJ?h)@vYAVRQXRNYBrT_a zPe$=R0hiL%^^LWdbmon~-AK2U3ph_|9=Y zm6sd=m&^*do1`#6mrVH+*fSrW?p4YmUpWR0h=l@=0j@isBe9g`eWE5w4ppZ^Vdpi( zyJwz93aq^N(CcyDO>1D@L61J1Q_LM-r6&Ja%TlXh!;T|FR;%c&RnttieSJ=&Jln%o z(@u-R^@)xh(@q!Xm~3LIU#Y%+iDL0Vig|KvlpAd7i?}CZ(RG)08_&hU(h$aa^Uws<|6Rv((9XFy^V=EM>Tb@=}DqVDz4ByMpLg#`Wikg{XjmJ1Nd4+`a}4MSHXsHc!IfT7rvvdN-71Nzac^F*a0Qz> zdlU-mvtNvV-`}>#aDH34Y!YE$sbR@hOV zt08{=-U-$6n`d8yI}Y}XMG-<0!w0d|6dtN=sGcu(Vfb}Ym@pi>_ET^$Nw0f=2YDBI zdEVmXnN>N!@^he7J0YY~8N{`Wzo#;uFfWJvs8V?73TgEYdC-^Z`WH^^EvLsrmJ_-s zPw3kDXA4oalNy0%B@5@yNEN|s6&5fC* zmTa!gq&R%@TWRvQ6WyJpGP{#S`td`d`~<ak zs~2rIj>7u6NcP77H&xwZz;|oQtz!TZ^x|i$Sa=VLv(BDxIqPt@GZW<=CqBEY`{sI? z)KA;A%eU9@zg*6Alw6uHi3|r1R2>5{i(@`zxvXZpy=574hpHeR{4zC-$l-R)PkWfs z8Y|v3OY$b#lZMFRp+J(eOJ(Sz$!6)r>bTaM$Ye(XH}O;6FitYU*5KM5=5{#~MUT6R z-iQ~VzaB`5Iqwg!^m?HOwR|EZ$}f zh2v+a%<+pLvmC4z~pR5#isK-2R8kR1?K5#Z}j6#Czje2RzHa}ZM1z*P2ZWkPY7nW$fFVNk46){VP1P~c8TJ_ zp#y;^S#a<4wEXzyGREM?FYo0V3S6$;^>f95#l_@wnX&hUInWW)cM_Ou2mv^UsrAzS zmb=uNg2~Za;Nxv3>ZnNVa&MO`tPc9DS4a8W#soT#y+3c*r)gEszIgoZd1Zd>lTo)U zcRYX#`EZVA-+TuWvJF)Wk88HXk6d|FKPfS}RYyM3p6240j5K=cS!lk?*OB5%7rrKL z-A-j9g;*ru zhrC<#WO=AUmjK%qEBP+E1MS#?n9nJM{?PF*nC<0Y0&`qs+R3=yk@q8;?LtJ5OMJBw zvuPlB%-YKEgPxYs(}{BH0*ry^2EpNCr|tbupTD?T%5K(-9*?L*o?1_?6Vcj}^-`H_Cjsk^cGhk>#eEl+k$uo&3psS{$*F?SfQ( z9N@pwki`zqgy4$ywzt~zLT?i=-6Fi9JS6)t8t+>|k~ZHU+9~Ls;uQsNGNObqcDZp7~tuB{|l{3xqVF~F7ZYZD!Q9p(G_ zC;lvWG#iw)j8p`Bs*Lw;8k2dTo!#c48WZZ5Jr1>sdRnk^nzjuF<+0$xD1rI8?Q_R~ z!t(nDOrH-VEKLzS%_R$go5=+G#qD-SUR?Ai@*RrNxA05QvJuwco?+uA8;Wvq#j6yJ zGg+RFDIe}Loc!>x*YDFAr{v98GAAj!3;t=cXP9~2PJXfzbCzP(+}MCmj^4Xv=h>`! z^396GTfZSeo*kric)(7QwxI**6xoqtO%lQz+#`W1!iQm8O7K_n+_f=xYZ_}2n%I_m z_g=j=lqduZChEHcbxO(jwWaV5B&4NF+A0}isNTw78tgY7HM}$bE(zWYPnJ{iVi7zO zhY*-TIH~QXNg_j`C3N?4U|6)l@sIogQ zS&*XqDOZ7$5a~aq6@E-RVbHKV?4*{ndf$!Kkc-`#ZpFF|R;Ii_@=9(4Dlh`S8`;bEw;|_s!@27DTYp zcBxMmnA}t@S}WQ8;L3rp5cbKNE}e|(369E`lC$AK)$`g#{9gy)q9sye31&#~;zA4^ z*#jDbi$>7U##xg4LUK&FN_KlWPT|b_?WLOS2E+>vvk{~E!h>rLjNi)mdFca9+xh0N ze^c|MdCCVz72#~1#S{xtEP=iq!=>bcodX#VGRlui<(msXBtgYwT5W_q#H1MZq2ir@ z+&2X8HUVN0?NPQQW1MUlr}bw%H5+Sagx)q!`l?-vNT&?F>Axb#oh{0&E2(5mWfsS8 zcM0DhZIf@zdo=QVDo1-b-AdXv%N-anc=d~<5p(TJU!nFpk+&ajs!dD{_+?I%$h=aV|IR_aW6=Y_C{iU^f&I*+9mXUG}Q8S`g z#qi7Hn+P~}XgEQ+Lj-`HDXOvaKqHVb^I`XzU+N#eeCuxYYKyFX^{>vz*IGf!6wd9% z(|P&SX_0g4TvMS|4W#}BSw{}Gb3Yo#e}3Yr4tlh5FUdZzFOrUP+=UtPxQszD z&+z6|JjRaZI7LK53wo}G@;8z+7g|EvnX?R2)?&T+MII+Rf3`8vVx4^zHIEVh_JG4T z%cj2{7dHN=h&61Z?!aeKOYjEVV6Me>^ko}%4WDj7WW5A^e zvAgqjQdXWp41kit(0c^nt5AB{0;`c&7qTJT(EN?g9jN{h1pG6ApI^!IV#I^m+5D{} z+nN{g>Ezi2D-4B`Ws_F_~RW^~|{1&98NF>h+YW zk!^(0rJt4%nD@|62x3yc(GJ6V)DmVCwOODCVp^+bVV!00e!4-Qv2Qc>@( z9>%;-=b(v!UHB+Ur1^RPA*5%X8_y6O3Q~L|&uaqRi|B^;N6h2y;3lTkn$usg9>{3l z5_xaA1E+8tNnTWbg~eNU&iu74N#Hm)FW+3>0JmY(oub@L@3K7#S^C>0u9vrzD3c@TesVqLq{rY z%@bEFF5?R9i7uo-94kGQ*_W*8y^rRpb<6X%%2HWFClJkaJj2bVHD49EF0GG(4f;7DW|@a&e{I$xwX5~3_< zvkG%&Lvz}aH1UIry;0Olq<9+w4+0OgX>IbJGg~nzAFPz?8ntf-d^p@R@cPA+%GsM$ z?o`HIvOa0h5hhH6;EG#Hy#%^4GQw3xOU58sOA^^3CmPr@#dpIK~Ra!kec27Hz0eThPSUDC-8~ho4d$@D89Au zrj*9N1&MwU10bz-giDY#u!b%Usdo6V?aoRop-Vk_OZBx+PcT5_b+rU%Rxi*kKiBQ+ zvPicJu$#OT5vR_d}-$J2{UhEGi_Wym&# zTLFVheW9nwZlnz?P4c#J$-U)snwDdGkXOP|S-{htS&5yq_MSZ3!082>{D+oz!w>2| zbROuvTguuSW(jYHb5mpsJe_1}kMh(-okI0(dG<}~#ZgQ-^Ty4`%iv_qmKNESA zzXwFq_7Gsa({=pU9Bj^YVhAbLg`o7wGc2&mCLYL))_qxly!7kyZUNu?hv&QuUaC)Q zmOV8$0_)z-Ab!0ooU!wMnq{)lf@oN<`XUTuwdU=-TEHu(Woc?WE$h6mOXF zGSaZN1g|?Sg0rQ04A5pK4x@p@>SI83z_{WdkYt7834IoD(+Q;7P8Ft(MUQ&YKdxN4 z+;gk%{1QEg76@#=s4h#2T*&OGWW$1DThCEEMgk1BoPI8B@H%m)V07yAuC2S}YFimM zC4bh|J-^cAn_k+}BKtFEK4)PBwUC_I?-v^8#L_!dUTVxHlGhe3G8Ahpq_3Y-Nw zR+-OWpxli6t*4=aJs3pt*0c*@uq&5svQ>zr+}?P;Q8~J+fpG^Dn3z4t2y%&J7Ko%bqEkdTS=m5rBVTdWUqXFd}xUtdh9Vmp#UY{hXJ$kSD zldF_})8oljFN zwKVa0S7ep4l8nevk^bDEgDGY3RM#2GTSMWESPacOVM6M+)`c&SJ`q%V?G!q)*PhPk z1t_rhhZN1tqy?RCR(S2>yz?y6n*MUQDW*qFlKce9K+LIcCcwIr(cZX{P9Z;Pkjp4V za-jg`;+BV&t6@yBy@+_@Q}jUdt)Ji-u0=ZQb=Xuk4jz@e(FLmgnum{uMC4u^{gsKz z)JP)A8%>&hN*9f-vf5+VURE7AP{+x+0I^l zy`Wl1H*iLhsE5bIHM6^RH?|z(r zb|haIs!!0_q9ZFncTL9Cg-DTYyC9o&p2N$TrMfBW!HKnY)OF7b$rAI{k6d^7YDq^Cym8QqTW5T*>_KqY;q=zvM6tnz@bWZW4^uWfOcp@I<4XK0>xEhgbSr ziS#Y)amPJ9^4Nh@Ws}%k)!ayHZ}*DB-H$eE;mQgH#%y*rS$bL_GF2?o7XC{q$(Nc_ zR+bXXnxaw~ufi5GV09vKFb~Ldl0mhAqh={IwNqejHKtl&@lFJ#&7`6baQayv*Sj-Y z{rnQGudCB&JLV0DX2|iA3p$B%6rDV8VUear!q<|(GXm*p88wD1ub%0bUyA6Py6N2B zzS3GzwTXOdvYh_QPoF%kSAPw6xFd!fTC%NDz=@KxMj3 zz6}-aAyO}mQUyHWXscAEuDdJ5pX<{OtMJ-E*}HeX-jR;FaE^{%eR2>QsW_OYVjC_|BsFH0@^YD5ysR$Gu5o-jY*@&^~Ix*HAzNp=q#mB8tG<(; z+BxD+tdh-cNvH1Gz0=FKXsf6xmMQ40EN$o?wF+n4nGC8CWhjNCX%tN z3S?2kR#dh*EoMIC+lqU5Cb`l25r5ml`Lk{2>OvGcd~yd&fK=P%Br+1-UIE3eDFien zBMcR09^GlT_imDL33>8D=eCNqiZNQqk^X6>H4ES)xAN= z!;cCZ_Vz1Af-x4kYr^z2zx5>I2>uHb3&dw=;D_%e+t{L)Eew5$Vy`2v)#tiTkDXbf z<72*a;RKyf4Yd!$W=#x$yj<)&1`r@kw3CxbqqGre|A6u+^$!G0@vF{poFina*JO z+#ENNRCnhS_8P*tc?XX?3)d7Q0KHhMLvOqBb9IiRoMu&6AxT+}+B>>;`!3jOMFC;x zyW%j2{E~XDi9M%$CxXl6x$LL>^67T56sMnPa8cb>ac)e?r_WJFPt*1MhDyW(J(WUk z$!A+XgLpUjgc25kByon#eVWo1oP)-jk-UrROG2*XLTW)ywlhr${zRyouC{Lm-IcBs zKRwC$P-Sc-&ET^Zrx&adRg5y7`JyMjemxa2Kh1FOo(Z=_FyNF^e(}v<)9g~dBFUzj zrakv(E?u{(ob|b`lCsjBjde@EnwgHwucvx?7H520px- zTdky>$>9{XTbAQ(7(v-8ehW&4O0a1;MHEkL!Dm2D%&k!aqiE$%o3?Vz2i5^4>B~Xk z51b`u7oULVc5}T8^Xrx!W{Hz1(8fJq^DkE|)_T%p^t7+7>fK1mjejdUt>1sytt#G; z{R`A(0TWH($ERT08hx^f5N7mRuU=g57DwyM!>BXiE>~Yu3KP4LA+!=g$_4y(n{kW^ z^tLNp*RkyiU({rs%`T7C<-D?+S|NH&X4Mj%u!AIAQIwKz9r;U@PIDvla_;r zdBVFzuI#xP>-Dc{z3N|f8SiEQIn63O%e9RBQsU6x&A{Tt1HH0@VdKx^R3>Q8$Ouza zRaKqpR9>{0f>_nJo0gX6GsK<)mQq0y)P5NLK0F%84|5rg_M=55ANAw5uHR3Y-`JAa z)^ai@TSFeq{q0X}OFJKs&Dv;5plu z$Hn!{O8Iv?%wA5)xeHt1Mr+LjC-bjSxEB&du&5}xr$Dx~3>Ed{;;xH`+ParoXP73L z3tpt?sTZaMDPck9fD5Vc+R3hMK9F}KveOYr)-mxFTj@1v9>qQauyzOrrHD)Ni(BwK zv{_0q{;p^_$fj>%vCM_5x3U`T+ufNL9jZAT?OM2MDQyAomNGI;L!g>*qrn^1j`v{z ziuyMyH%WW=4t^rJ#6#tBZV|ZRF?fRU;;Hx>+;{jN$#w0?b-hb5d~B-a$=4&BA=9^0 zZk1!(h_Fv9oB=o0&sWy(L&|BHmJeV4D@LvwVFJKxA;e5tUr9$`L`znk)MEr#xx&@hRv4PXkm^h zI^6K#Wf3kIp1u=KDJ%FJDJ|M@w_k=*_o>cDEwNNP7)c`e7;qsdkRrv#1N;7jRB#M< zE`0tNuxu&vH%ctm6-_rqSNBo7Y+IYgJG63|c173${U8c^2U<&wkLebn0Pw5F02vxe z{|7>U@AM+@f(iz@{t%%G_!Ix>qKf>b}u*2{#uCZ{3IxOTEaOKYu|5@UH zB5r?n@tNX%b0)*Q}nlx`Rho(Z0)4dx>63`SK6@& zp`o)33a$D(ABJzD9nB#h1E}USkjDh+sXmmXy@0}_P&DYI6r9LiQk%Jp>ZBwqlg(+h z2zQ=R4>6QF8e;i7A+%~{)_8}?p^7Ytp#Op#(wFQ{&_1?= z23hP0xVv$XNwC~cXtO$*FkppJFIz6k+a zku-Qp!=)UUM9?o|DlZHRgV7MO7Z58?8b1cm3YM^eRNnn#0HydhP;*yUg~c2&5Q_z= z&)1IjPzRp>I^$d+w_vIiU>^{11@nu+c$+r)gT^%C{>EeS->2d?RGS_=z+kBwN2Srn zfcGk}FbLI-22TIRyI-3W4r%l+g(mOqa_b5patAHDF#>|57b-ZTYx-+!Ds+i+eSH&`b(z~@8AMN77ffbC4XDQEIEJHGb^c?($T8rYMZf;F|Tg%3Avt)+JZW|Z9)62DZ<^j>)IZ4 z71yM@T+Q|NtA)M+eNZv#SB|O`S{Z*d_7aB^6~Sj$oyO6zz^GsRT^{zTE`IKrDu~sfpsl_>Bh_9*c2J@S_UP^IWh|XZ!!1bwGjly4;-GPeRp9hYqnJY*BRAU$L__iV7_AB^tR^2 zZ6Z_cRO{|#+!?077Sp-qHv3`PR9>+mcM|z^k`gg&|Mb(Eg{hYA|LKN8jC0byS>CzV zLFY>-T&lPw_{mUv!b4!BI{jyo%fif&uR>uA?)ET6%xr3ncmGDFdH2IZl^Mluu=tyc z-s4sW76XM#p3G(`jgDq}K3n3mx!zvH?Qdkk4~x4cE?Ecuv~B_QY&{BzaQ0@H3w{y{ z>bz~=*?MYH^c$=$RF5D`)@eqi|Jsnaf6R zM5RLej1cH9DG+NDYQC6b2H`d+LEGnX-0rSQ6w5djaBCrnDW`eS^)K%ywsuxFeWQLO zss{n%^6FtHoYtFB<>hc=Q;RzJM+)bDOd!Ac>_9H*EzVwfRuj!HqG|0dHTYJd-p#8n zgx0X>{(tT`^^YB>e*aBV1c3Z9FEpY$rf#r&V?xMZ3M)72PRSu#C(-vqNJ?JK|6 zY|^JMmM{|;q~!+YzNH$}6xxq6mZelfz8q?{2yH}?m|{>Z%S>vj&U6lrjA?gUQ4mX||AEsoLkVO3JRWE(8$(0Gdl%c^Pn9lR&&ZK&)x z9tylP-xDk9;5oTF^|ej(;||(BP)hG)shOpbg|RH+&<_^SZBQEVH> z04^oWY4p0Vd#u`Z7q<`VR^j*JiQXYSpY7e}Ekq$VY7=BRT0vh7w`N-Mxx_bp`?vd$DV=_(e(1y$}+XB#(4tcJq6uG-5}Gkur3eT7#MSs|Wg zx)RYA&_{3d`{*u4${YsTIe{i3sjWv5aZ;u9gRKAqjrWGEcK;?s4^e}4Pm{Gu z%kvzxW-?=>7sr5y#?i1}s4nhxrGN5bqH&xss^E4-_0bH?6gy2)a|>yi155xy0c5@T0;Ej@=OnVGbm3B&d{Z{4VhY3Zh^da>XvDSG1 zvB*&f>@fI)^)L)?J{*n2@S!J+1h(!L^L7-YwO%tG-VHmA2?bL7 zz6nd!sY<9Yjf;1MnOZf zxlYO8%Jmdj1o8{zkOUzENg`OuEl{N=^#>5 zhv!))GB`SkP(D!LH9(eleL3(-k~UyVqV=6x+%5!EKxbf`<*WDwG$&&u-ieHO8`I&m1pvxiiE)B+JU--ToM+ePBDAZ>z}7}QxX zRc(3$ZQLF|%m&5vc8z-7IK1DudE)E*2u34oW1!YnKy1uZd?ZU1oJc>=PN_ATq)nW4 z=WeJ^6UmmWOhOuv9C67kBtczjcM~b7%JT-tK-Skng{o|=7Z+Muqo>89?X#uc7~)Q| zHmXL-9-y{p0Xhcj^${tZN}W_<1n30k!axy6Ep`yosLkSgAd+ck$ zNzsW*H7SD@5&RH}Wq==ePgS*)W8S86>%Dh~$pj(j&8q^A!;(!w8pmfs#g8hRHQcdq zu9?|TMMA)@2tkhk=?p{ly|3T6FKeHu<+x)_uNu5_D*T1?>~-|V%Aoo z0rR3n?Onm;eueauBPBKZh1KT>wzIV9xl6t31)oArsEf4XK`-T=ZlG!ut*qG~>J2NM zEDNHF-g2txMi#nn>7NXLQ#7^F+tnLvL7mL}A@P$|KCVkspa0N<@%P^{s_}nqF7ArI z5j7pp`6zWlU%Gyza-dvRw*0A#AFIty6vXIdaaGh8O)bOa>LOvgLo2u?{B=We3d|Gq zJp2mj_o-kWQs4(@NH^+SxX@Z7q;vaRdjJl|>e&eHI2iE~xUSA~HgB!9z??{gr z-H}nxre~rMaZ>fq&kOBb53?mdrH&s$HkD~Ta7J_HC!AFLn1n^Eyu;J!MbOO$y1H{*wzBKYR_;MBxytO z>L4WOS-8YB>D%iCxWeWLDc$c`+N{-Fs~PuSCoINLctaF}7xnbfUl`*_#TEk{+L_pw zz1^92(TCGpD?5(}!bOVL9zQwTf9Z{wRS&xPCiF6+wZ~f0Uyx< z-E*PJ-#4>XEhp627MI4{MrMYir6_$>TU&1<`Hk6QZmal%{6cRLDwDgm={$19My;Gn zHYRuu6F=mrHJmiRN9&Z<-wF18lY7>i-`Tytzo-g6n{}j5%(9utFW#9PNtO?jI;^0? zu3XG7zRFxeBnhnL*%n-@wm7TI)>-LdmXKF2a!rC(cgFsIyBj9=*L4wIH3gcpbDLwe zOW>YBGsNR{@as%M*xQ5UbtPiHmXVIk*W zjc(#fO2=DMxa^6$By1{vR7E)IvwR>LL!Lb2>!3`Z{HZib{Z@br?1sm-a^rvIkyR*GsSCj9UpYWC zb$&dx6f0%#IF`BM2sE=r{PpIsTt^pcv*68lNn-&54r(7JtV?V{hZa(@fV>#f_hHWh;UlV`naWMsg(g9lci_-4mKnt2G>axtkH z*%Bp>It+ZeKr!U#sH=XmqF`zp&P~WoUCfb(JOyhHn%9M=+TX9aNYV1L=GXX7jYR*W zvFM4w>I~pxM4j91Ub!nj%QvKri1MyojVOS}fBEc4%&8(Quo z6uzf?+cs^yGPtM3T_|(2DPQuG_@i(Wk_eS)tpe5&PttBwcoC3D-#afxlCS8+g+1Cg zYUHi;CPE4vJqj#xdf$z<`_t|4Ji-XvTN564EXXIBD$@)6Fc$N2;rmmvNQAlyKCo+way|>z1v}?8&(ky_ zuau+n^x?9CPt^^bDo2ASm&FYF!l&}i^lmXC0fs{q*Q0k7=S2m&(C`#iksh9pXiC7h zX4lwado{a^$|74^4_5yhIcXA}T=uhkl=rqpNNIkKsNNs!8gU1-4hr=D{S>kRZD0M; z8V%J5^TjFf8S>_^#V97$I7Th9UgxYo=ijr#)O#Xbb%XmEO3)6C8XGYSTpb}E17^}t znyUtwnBdHWUto#d9;b7pb^<5S`FpF>>})%BSODs8N8o+sK6*B0x%2i!PSE)*#B!S5 z;8&NV75*ZVZ-XB-nN@=7OYssoRXX%8z{TGZWLY4$U&DJ>JD;Qg|0?ttFGlzpm>VISI&BLJ#-@b7r5i!<; zm{|#SCfTwz$(D5*OOmAQM$C*QJ2Pb5%+l}bc#h*e zp7;5_zdzpp-am3M)4g5yb#9;YJU^$2)1&fVk&kFqOv}d5TvJ865S-Eo32 z9iHnM{K;+6X~XijQIv=z{za1$D+6(Bs&7=+8XBAG+siySxLT9zd!AZ{rBou@;G9fj za*OcR4aU_X{@!0i6V3Y3y2LjgvCP5@>#5{>W{oH086|Y!x`(GSfuj>14&5*%?OMWs zUg2xRsA%*X{>$yJyzXyoTr@YfFQp@_T2r3;_WBNHHg(I7&pM0Z|6n__uNJ(RFdhl< z9~!6Whk%m=+~48PZ08TSUfoQqYnkd3Tv93xZ12!NPSr1^NVSAm+`nsnR#ET#q`2vQ z@}=96P6Dxqc%zI{9LDFbI2CE}Dq?RkfNP@g@J-zD|M!A3&dMWGnFrs&=`W!6za0BF z7>K~NF8tVE4kC#fd0(73U&SyUV~^220&>1?oUQ8}S8eie(k3j}S`^PIU)uVxo%P*a z$Twqna8u$6#^((Pb<2O)O42CB^w@{NnDtgn&Druo zh3Eca9>eYU%#A*!)J!vA^4Vr#(my?H9szdF@eOv3KCVwamkS+D?P2p8Oyh`$Y`dvF zWOmNfv+5s@0XDmo;u!eNEkMY|UG4j@kahXk2m6U0H*TVYvP{3|M!vb;hgHxjR=L&D z!p-L8XCV4OrnP?*tv%dqRO;s1*65i=I952_-k3uEev7UKN+@B7^9Fjafuzzi~#^3T7*&+UTLN(#Ic+_BhBWxiE`&l1Ooy?lPzb4fYckHKJjoo0jqKBQ`OwtX!dYU1eND|-UT`;B}AkczX zUM6mXtym@Kj`H>Hxwx{FzlC!@vp({>Bo0rmLJ8`t-nX(vwTB|fEu=WK-ZX5Fog*pG zZYm)RaJ-(<$i21V_!H;U-yJoF-Ya4C<(rTFzJdVf3mA7+5r+JB3stSKV(L%?ib2>{w{ZFh?dKX0z@_8$hyKEp$nEl>1T0aachki zU*!LNHh@U8hh0MPk!ZqHIVpy8fl!|n5K6%2Ivf2_<2-%r^2+@^Rw_D6U zZ|w8X)`ov*TE0C(S@r@dRck~Ifwy`zSlHm)LnOsrNw1Jq-rBaQ-4S>bck=(52@6`I zkFnnCE2Cwhe43^?70y(A`xq68*`E)^2l}y&(dkQ;7)=YS_lfz9PY(;rr%sV8@~(fs z^AfpAT?14^@$h~6CrpZ=-^i#f+e0X7?9xbp&eg0?FXGjdccjUMO0PBfZ{aqTUQV^X z0UwUf#Ps9@Ugc#O032Z~86fo{wWtG=9@+5mcL+a9K84?hJ?0}mUh~?Er@l>VU++ZM z_ndpIxyfxS36dbjDH+rzh%Tc|yx#>ro-~?T+fN{~tohF`iue|N+CW^}n{-Px-6J+8 zL+yiebl>_r9fjj2wUxOUIk*UB$rdg{Fww|kJcO+V>gP=u*uEJ>ICKh@SiwF>TGG@P zqq2UNzHzf9#d%lUu5SoOF85Wt)k{iB>`%Iz>OB15S^u+ieM zDZ(rtVswS^zsI*GjV{O>ryhtnmKAWP3F($Fws>PvaLufI_WSomw$FrD-Zix)1BL`s z)hwVh_(h(;fU+0D=d~SV=U`f#Iz_tPPfnal8lw2C?pdF4_AKLjlHc>d-V_iObnA1m zvJ{3~sYMxee*A8?qqeROGmT0EBd%v3eBeeh77&ol-2Aq+y2MO&q4q7NZg zLY+?SNQ_e>h=k(SE;kIQ%r)e#dz6PjbEY{#wl=E*^UmiFSQ$K%SDXSo3)p%ErUv~l#(IKzz)~=$ z_C;;>@WuO3nvmRB{`z>0N=ZG7D>WtuzE7@x`_++IEOh2&;YXW$7muX3AD3NB76tlE zFH9Kv0yV8<3%oDs5+{0D-0QMh2c8}MzC-G6Kz(p{UH?QX$6MJ|=-Ac+USFT9 zCwm`E5^FLkz~bAvolwy3R+3(s^~cVjK~skveLihUrF~5_aAayOrglY-%>yy#iGH{! zglYwJ83i}@D;%%QebvadopvqN-}6F+f7C@A=QxRTCqM}~?JhHM$+$r>3E*EhS85+G?n;?)30*%RU-)Qpj9Nl*$)b7_ zEQxzl{HAn!q$~q${Q3k@OCt#m@zPrX17;7W<;d#Kj(Kk+rsCYzER)JpVJZ_}~9l>}i(&%b`$8<79VW_X89d6Qc;=;{W_m5-X~Qxfo)D zV$_{sx8PWMmq1n1n>xfb;1F^Aw|~GnLjFf*R8T+{s3%i|0~dVRzu_NvZ3n<>H>SlV zOK8OfyE=;80k{2U6P!%ZsgoBVfv)tAKo}1P@|p`V4QcwhlnFgUo(p|hYln8DKRUtH zlYE|VEZM?qGP@Gt^t%xM6b|muiT}{7RAP!zE1Ar^zkNM)Eav(q?ss*^q$~t3&sEN< zpN=}bQy;UrZS$bg_Ix%b`hnogt3Q_tZfNRj>Iw}$S1gMZ2z6|Gz|#2Fsl;uAK0{q? z(ZfgTOF~^8!$SIlt&L65W;W4p6C(hN>C;`PtiK#JDqVeXV0+oZ#_i0bh~GmdEKm&$ z?kFZw^x63Ry0KqD^BmV4Z7wZ`+_qX&_>>H3RFnU5M5E=s5TR38$v~XLy}eToIt!1R z)X|S*&s9KMF5N7}`Myyko2Q$a0~+~g-i|a(L?LZ&QJFj4(7ZnME=5`D%CowK0|$6Z zl+Uj!ygDv@#^Fa1kB6wi3_!9qnNm4a6d4}5qTG6Pp$aqs`hKqTRW4|F+~lT{9{x*^ zXV3KGECuOCs~>PdJo!1c+O;ED;B3}g^dagMRka5;AUJ#92hX=Tdh%0pW$3B+rx!oH-e=KXMOS1_b{nK7Ybki(0h-h7=^EJl75%$6MA@-<9$~ zoLw6y82XtQt&mU)uQPNvX#eP3&jTd4#AlBU!0?(6_P`aiAu9*KYLx)168ICU!Up*< z?#a;9&D9?@neR&Gx?bZnuBBxv#Wy0+^SCD%mp(|#irm);;;1lb+S#{g1CI*#17%xv zmW>{lj=XsIN%dr*>ttOm7E{r;0toT%PvBLnkrIpjK04 zj(MdY4&PYu^aZ65Z>HR5YLL*;dhk`@(>ci1auAe7Aa#cN=h&=9t2=MNzEjrcnCA$B z^Q&8RF`2zS;HUyQcfFtPk0iU82Xg3P17~KSf}4Xre>wDbBH`+;A7jipsJHJ~Mxy|1?knY9bCUiBvm6t8^C~ z^Ne}MDGBS^PmYIgeT~z84cK)-=)tv3eOGY8Oq_yC@Y7tbL>xCK=Pa%Fu?7C+TF00u zD7p-CZP%Ch$uy&44es-Il&qNlUW8%Swtc92dp&k8)z>EV3#MGGdh_R|{VvEVoIQc0 znt4Yesn=EbtQRoCkHrS@)z-C@-(2reUElJLh!tsHRBJLQr6QS<4h%2mC06NQ4p-8_ z`>nczDz#q_N&%AA?wS{xKD^gH-}O!vg&P90T?NQesate0$N?U~mY%P2)3L^=o!3qc zU~g#NaQ7ZZ(c_v~>3A1&@j%iQ;7XwHl;3Ye_b=>NrWBin%J+V{sJb05euCppmfRQK zr^ybl=|56aJp+!}y~3K#!DEV|lX4zZzj*ye%L ztZkPE9HMV~Wc0n?gO$l<-UdJYA{$uMkNGwK37+KPL3Nl=Aota5de4QHv) zyTkh@ux?M_>4T+m7hf{mr@ku|j;p(!oH?WuB_R#d_?Y|+OVqSxm14*rkkxZ>`qE$n zaH*?2CGjy`-RV}6eH}`cmLp!|2@jt=@5k(5zeJ+;nEVbmkZ>pon0NhT9@#}s{zTdf zokn5Y0BS0mZ#|EHu#U#ova%<&&=Xmp@7!zr1j2}z8Wd9`x=r$G z1g)NIR1)7R3AqgG%1$RoY4($pv+j6u)hKFAVv%c*{8+9{*v$vV>&|#1PM2)jDSV)7 z!lUDy8=gq=YZ92r(WEyA2m}K@weo+2kQCh%@7sB545Re9j*X+AtM%UAGjx+Km*BWN znm!N}b{CFfgEFbQu1Jg*EK)cy;#?BLc(^7cV*p`7sWe06+O-kme3Wv38K?UlrnD}- zleh|#>Ex>M|54#n)+L$1b^Qr#)tsVpyWpS_U4&V~53uCHFQI^k`qw zS8!YZD(j$Q>U?8FvuZ}8>Dbs!MgGp4N*tjn3~ThoLBN00>!?XPo9N}`XC`j^v8gs; ze@2RvoDZs7gy)KlVqjc3(!*t-3o%a!LJlvgSmv~FKCL&t-fcQ%hALzAh_Doh64 zXoRYF&Gi@8kLPaLwV%-bRXR}VWa|^R6)pW;>LRNZ@6ji=aJCw}{OXC?YIEU_ni*EN zAMIkFa4JWFN`7yc2<#8)*(CqK25C#x2~^jkBpI8c7oY5UvX?`;)T4LKAzpu%BzA>S zj`^lbXmr642XegG`hPHO{0xjcX+sOrH0$$LFxLX!mMaebwtyIPSRZ5;qaL!FG_ae0 zIS!(w+_3bYs((4cmQP?PNf=K%Jos3esR_rW`yu~Si*2olhFs+Sgrc^gYF9>f)`*hB z$er0BKh_aZOnVF7y>5oxFMcFhj&J-I<|Kn2r&qv9R)GRaR&M>}Fh1f3v|l}*Vgdjs zPKJ%$Uyd?X0iWkgP-gur_I;1F#pCn@*b_V@8vnr5dlOY7@Dc*w1t3_yUO<$h3|Ms) z+Vx=IN>J5z`No%^#OoLif4^wuAe;k;%TreKWBexSK(n|{@_iI?Ya$U&jS3?*8Z&!a z`G@qHbG3m&PhCJ^uA5`Oz@A_zvMhX!_x6)O&0Ae8^?^*}w#0F8kqB1PA2C6M?6v^&^2!q&v~} zLyMjj8vDcUD*RjHEdPH8q7^#z;Oz6*YVm}EE{nN|_GCgu@|<(U3fo9_W!3J#_MW|J)n z#ljgG8Q#oQqtKyKd^( z%1(*u_Z5%BXCJxGAgKWqS!}EqUrt+#vK_z zMsatI&;Im`W_KH_>+V_yT8HY`7yctT(*%l8VPrekZ+;ZZ1tA|7aF4*z##DTV4W3k8 zeGapJZ)EfbT8bK3zk}n2y!3t77oI1xGHOmW3%yyu($)&y{Gq=bdH!$lUv4yq@L5oT zkYqf2UxQ~I9ENuDpLm)IRPYa*QaqK}y$Rdiix#4#(~yrtD-!;4kWOoaZXfax=DNjO z@S2muqIARfeIM9~YnTJIAlTsy`=+m!gM7T!{IQRm;j}&E;KAlS!MM2;Z1l4q>9WK) zfyh*?osFcPeE2ZTTN!`;_`l5s6|nM=_tz44)>gX9JsRoxQx3?k?sut5hCNc_m*l22 zuFUKj5M#xMeQum=nL>?ud|-!i#w8VNbMfiCIvn zpSQECSCmOfo>7w4i`}MYRKXwzIqYR7i-zp%OF~gC`$Q%bKEb8!(FY;U0{fOMD7^9Z z$Dw9FUx~YYMY7Y*2^JSpvf@>8Td%2=SAD>fYl*)LXyVhZ1T1*X!WCYKBMhY_N7;Ie zRA4csAg|r7>3p@dTbjDkob~w;q1T$iqUV=e&Z9|hWQ3@{bI5CaZUAj_b}ez!@vQN$ zv&=|2Wl8_>Ci2p>(e^E|a~uL3yPNhQ^{0T|eNdca{w&yPnd1GHd7sKW)tH&MX;%yi zOWt01vK=topMXd_z1}w#p?L8-nfmhG5yjlTj}D3m^G}n#y~+rh?jiEx+E>91*#?HNMsJJnIM73}`RcKT}VzJsS{G&A}y z30;!M8=vKU5cBStrXgYL^?q0sT!e|Fm4A{%#x*NeSTC^;ja1&)G9NruBuoh4ODHcF zAiX^t5zd2aONJ21Q+*wM`vWsS1)t6ID;o`znVOi%TCqvhcI`8kCALnmBIVM^c&1qS z6uuo3Udd`3Ddkz-sSi|4gdZu^QADU5 zAx!Xnn33vQCaTR;+)PD6ilAkV56`>_uU3hhzP=S_9O?!o0@Ar<>=SG?@+rmzz7N!p z1&XxWx<>t(3Q)Iv{%kVTWX)kBsJc|*GF|v$Hx= z*hX#}iubrK1)g~CWvZagXS24 zfeLJ7n0O%4-e__9%kz(PK}TQ{x-h&}V0{wV1CzpC^tt7c(#h$yvpKG4tBqSPfONQg z0^SF-2Sd1z_IT}s12c}!4AimafHnnIr{&G`MrkF6jF;J0^}@Ik%%k|6qu^(_=UJG* z&!5XP+$2$}&R`@{Jegcv%5H4$3ib+S60 ziaMTP^sqwTlC%CYYj4G@xVXz{To%SN8B%m5phk7PL9i+jn1VxGI_Lc3PfY7r*slGC zIuS7=iU|$U#+5!8svQbCKGek&S)x0dAaoWHq0`z}vmaxFYBn4V*$dI3aTkA1jD_-t zfryEqi6Lhq1u^7uxJYfT|CUWdKU&c>pvjSDv@Vskx(c5=eZG*repg#A_bpG~wUV+Z zc$Tl}T|51LXcj!JjqQ>??}+-^F0|6HdW-ui@zUd7t6PJV$2BP*4!DME=+-6Yc(Ff9 zbZLOWT_OO4>YiSM6frBDmF~fLwV~emw!h;0!~)9}5Zi?rh93=OR^@V^kVR@mcV9fM zxQR3clIjQ|&ie>8B`*z?SaI8^-b%FJi}QByz>BhD(A`!%D;jeb%flw1b^*ejU4aNk z6DJS#`T)($5VPTYpA$W%C$8;!z;*rU6VY7`?@JD%nXFA84RZKc(Zw`8E&KMwz1X2e_SP#p1vDb*-&P2mEmQ0xJW3h9+)|s;(=ArM(@SJJv==oT8>J|B9HfF%RAr zVGBHjV8zfHdmjsysQ(>LB*8X&+@>1mo$4Cy?D=ZGWpaOfKrx#+4t73RF@oEs3Uusy zd@1C}Xlw7x6B@4N!lxf?Iz4Hl|08+URrOVZFzg2R(f2s>JRL9fz6Ig9b>t{WZ2_G0 zG4oV%CkE#CWmEAM=Hz<6!R(}4T0mT2^6`nTyF7+;;(*t53*-~*ysryw3TRK zMppoq{q}e4zpfH0Cul0^f-$5k@tcQ!0t8qR4(nw2^`fK6es1Vka1jn&|ATt&qrgn8LT#>Nb}RsO*MEY< z{|hjFXFeU!=75|YA>a=LN?B*`jF`}(QUcyMnz=023mN+IwXI00eTe3r3IE!z^+ivy zV*YsVO}T01`}12DE}t|_U#vq|b&=lpJ!2mQQG+La#jrya(-uNL*m`JD<^mO$R?WU5E14<6{!)2?YT`fI%FSYAf5 zRMAA$X;r)8v-GDETcrmcazwcG^+yyb*!rZQBAY?|^^DaoeWMW2?w!1iZ{B8rvA*WL z;&wEn)*=d48hsgZ`EcJ~UutLtt?+i-wrh%C=L>!;Kij0TC~sog#KpkJS-$r3*rkm} zDWZR{F+d71v0+Fs^gwYb_o^Z(so`P0Yc5nPP*1|03z z@g@b$t+D(09s}j&-lT7zDSR$5;KZfp%j0DIgODCWj%jI^%w4~NEQK2T*n+^<98NBr z3~awLmOnqT;*sNFWqL2j7GBjRn@yQZkM241?QA6T6{NxtVE5yxx7Nh$1K)IA9cgwD zO|8o82$O9+Jt~_Tz%!E>(MN47)|oIC|jh+2977WVqjz zsxlEMZtbQmx_2p*jhQRo82qlk*1iDQt^Z*80JY^w!NYB!2&+Mm{I#Yj#4vAWbE?G2 zwe;%`-QJOBW)dIQr5N7MP~Cp?h49?&Wue5WL|H^)(LdtTu4sfOP0DWgH<;*hdBl_k zk5|VZa+i^*X&NXP@T#puE<2hH)Q=_(oH!u3TJP4|YT_JwP*#D0`~(|MqsIf8HfFlA zKYjETa^owxBv1fAf8l*HpBIllyj%{?)Y^7_M!q-ge+l`+c$u5w7ij!%fG%My0l~V@{Z)is=Kl0|Skl+Y2}fq}=6M%H;0% z1GC7CpiKGCZyJ9`P#A0cm?(H z4vmljI8ngZ>jtkbS>b%myiDy)(D9F_>Gqjj?dRGjYWvE-X5!|xeQE>Ja9K1TNpcElfIQ7Me?A(4SCo%U#rZ3tDobhDWHBLBG?6!pHRY+^=N{ zcdXKNywqmZb!`=Y!$-Z@yfmsQn^$N`+V~O_GmiULa6%7A$pb{%DYQ>(TB zn3t31{(F}5zyJRKSjH3g6hp2;Qf(cOAl2t79?=Zw=_n(j?Zk_7VtG&`)h&SiP8;oCOu>H+KCW3Wzxi z7*nbATTw$3F}I9PS%W#$oZ1y@B1_#Alz_WrVM@2Ut!GW9`Jvn5eu<`n4TtYN@ktxT z%eL~lfytuvOff6^m1bI0G|hv8j1H4vTGbCdsI1Y$v_ZJg;oD*8*t93H#^?q;OlbPQVy{h*)HM8&szC*+;MUZX}o#H3;6^bekn{7L*M~nQaFf99*3H?W2t@q!N;Pi&LNPy zdji}9dP1*klhk8nFQ=I$c6q;Qp2n>fXI6|aWh=F8R+MVA44SH|o%s4CiBl1Fmq%j& z9*nKEbHIJP(I3{kc8|SI_uVYeI);}4uPP7hZ4hQA!2yQJ6hAWsYX_518c(A4Q00g( z($1+{n{{LikJiMt4zQ~q*UKSMO(_XhoyZy_Q<{%)6Mc}?9CpNQxR2s#NLhQr^b9EO zD}up{Yeo%%si(|+*|iS+YMiUzH@b?nJi1w%eqq@~r8na3|8h+Dhc#jQe;|J5DYmhX zhYg+GS!0Scb&w>`oS_~;`|~)PX_gU>=a0q4RE5Nt0`8$*FLneyX0gy&Jb(;ohHPFF zFvYO%GhWP3Pt$VbF-oHE4Y9|$xbtrOgN(DjTqh24UC(EU)o!DlkNnvW!n&|jOMu#%Gex7PjFW3f5jxTBE#_>&(tgu<<(ih09k!i_ zG`S=DXVkHa_I`4=@2#KNBt1boVaD6(Y>1QQM8U6I*KBu?8M)S9se%@JYd3CfuT)&^ z9dxK{y)ws}=dM*k75DuY&nqqtT-!?ZDEoLb;@KP%*tUZ$2rN%D=yzbS0mYeVe}r^N2Y zU*BGXrW#apcy)p7I0oJMKM2qNNud6JKQm8kQ$WCcui#?Aj;ciphRN9ls*vZ}F_DdP zG0i8Y%o9+8%V-_;;UDihGzr=7R6tK{>sg?v zT=1X{_#hKxCBe$`)26!|r=7drc}!s-Eh+ls5dIDfI)+R_TTm)5hr&CF z19tP0{3}h*vipC}QeDmmt6li;UN2^ED_t*u?d!K-$xE9F)RPzz|fKnp56UY_$=D)RdB8XQT78os2Bav`19E%E6=@+H;BN z^FAY9IwC0fu&j1_e>ApxDli1`R%PY|YF9f_ z1Qj;uc$RAZ8OIM9G%HMjG48>G_E?b)6`94euFu<*4Q`hNdA4a$#od$!0J}cgnC46F zTk)jzfEi)=)84|&YqeCRcga&2SYT7fUiH>NB@Lb#B<_f?Cycc0(m zc*jC);B_OhY03WxXU<;gczf%KGeYp9LCN)VhbF72A#ILF`o@_ml<}2`u%E^4%9ynx zjqMGPMgj?nhyVm=3g(ii|GDYST^vo_>e#vbGTTmCN9%A?0^KiRf65Z7=%!^#>G7u5 z`T&m$*T4{DA0r{TErPVMQG^AXII=kw`MD^x-o5I2+uJ{4G#pbbhb?e)1fe*+f})T< zg_KKa{Rzq*u%O*bspwayRoNV^HQwG`dQk1X7sUCNwHKVU3H@*g<7y5a+v@Z_aNk!d z)OO%<(%0eJYIj1KUV3sS!4&6aLgN07l8y+*JHhv}N3eBX1wqq_)_#d1l+}#JPXk`! zXr0KOud>m-7F#I-_7Q}`E{*T0b6$^*8GlEK6@hG%M@%KtW||Kkj}%lQC&W8kUc1`d z60VMTpYZH>`Es+>9}A#ELLnY}H4Z?56t9S5SXOSS{ru0x##dK!?s+t&{r+Mv6Tttq z`45rXeaFIlVETQH_0d~#S}#t@Io*Bp^b^NJ?)T;LXX94o?jsW~@8!Cfc;gz-uX}qr z?giEp8d1rQ6%$o#2OemID%1S!Z5_ues8^d0shvLAe)yrWJ5)gb1=fNKy`NQr5J68$ zfOHUbvq^F9-$sbf7wo+ zPisHzI)#IN2x$gT>5KVe-fY+YWt+vGH&fx=&oqSOCXUwQn^lmCrTUy%@| z3l`{#J|DZ!>Wr<2jB53uhV?$Hff_%}EuQwW`pg%JEx9j7`hep^wU@%d9lO1$U1WR$ z(~(hEn5w!T)@(`pW0MP_oHYCP}|$`s!-$(;X?Q6MYLZV4M$J+-P@%(SARD=aC^7>#4MQt8n@q zH>Ry63J+3Z&=F$-HeMW5A|g4?MwECA`z~a^PP0m%P7&5JV1WwACDcepEzTOUGQ?gV zQc+qy^hRxml%W&cV!)aWs$4#zy?OEsnRJ#D^~kJcTk#fd{*%f7^g`bN(T z^-<+Z_iPYbvX9EqpWeLJI;gQ72}JDl)w5-b6wQu1q29lDWz#vHQqkMj&%uFJvq6$ywE_Z$ z@QAS9#^Nmb1XXC-pf+`*UGnX@V|5L;P90WCcvudB&tQI8% z@}0YBA>`C`D!EISVB>h;Y;1dBT(Dq!xPJOwd^U2_ip}dsOJnc5L#t(~PKk{uqt7@8&@qC6NV`e7 z+8M7v&5iwKdoK!n?zgHkNJur_R!LR_=AYdu?d-+4D3;rCxO z2WBpI+`D%ec7&cVbaCDPoDZIQO$Aay5df_e-%U^MTOXTg0f_|?p_~_q>%`p+7Cy4l zDXs~-O*fvGh(RGhARWLf%5w)~7b@{PmaUKXAyi*OUWDa8D<{_UhY4Br-Lmg*Ijlq; z64?E;*Aqif!IN`k9At=?eYq`m@WULDWuB14bsiE}V9w^FJ?pxuZkBn%o87|J+;1~0sB`d!)>78Nk z<74cfM^>3Hdr(58?2a49Axxe2R!!-hQ&{s>b~!htHsODvLdfbhL91J1^IEL`%BO|? zf~udQtdK1j7po0bFQiWuT z0xvTQO+A*F5;tpus@ik6_08*qI1V;Y|K3>W2sc0jMy1*< z0*Rk@M!LcT`(v_0t&EdQLt13HW1XL+eg)WK&xO0{E1Ktm(5rQ|L8*JicGwD%3krWVDlS^9V>#46&?Tmq~{Ox%vUpr|}`aNrC zPR8ya?bp4VH7~iDg3;`K{6Kdy=!Kb%RUg5$Z(RogZX3U7n-zywwAs>&7m-hKL|F!m zJ?gke#ygsgv{_t%Ul?3dKJsMuuB*NoX4-?F$r7|IF532t;0)dsXVG38pLMy6o|m{{N~i8_sy zvhIVv><%lQr}%w|p?^7U)tV`f+YS`hG}lM14pT0h?*ADpn__2Ceu9@s! zQr%j+Ik9epeVimb?yx9Syj|1t#B(iQx;vI?EwG%EIoaaU`p=n4#t^ia3RDK=dnql1 z5?I>0()TQA2%g!_C{)Zv>7!=S)|nT;)yQu7{Oi?x2#kK<1%jIg5;wbWbUB z&#uks3cTfCkw#F@Sib}&wcL;A(DCXNI5~1Icvkef&42KLS^Xy~+&@y{|DcMq)X+a3 z%ub+xz;8WpWrd)2f>Qv^uF-?Q{QnWHa%ZA3{|Jq#NI+>H;;aXt zlBFG4VlQ&yJ!TVN!wZY~sGhEsM5ax+3O1Z_3|Y6ym5QpcjFO(l*Dp5m-auLR`gzFB-~LcY+6XHe2X=eI^T z?;+2}3SMVwl1!#PdP7mh^h|D+RXg4w@j)`Hz%jZuV$oO;2_%y88&y5U0IKoSF&%2C8i2?{0pKujg3tHqDXULd zAO#hj#qPuWv1tWQMc#qYUSkMm7}xj6nQAm}FbT`>X;dtpwm)+Pgqetci$?wfrT}Im z|KkcFm$uG@jbfn_pimi%;>n@l*)X6QBxhPrVF>q-F1TloQaRW0Py@vjbn@^iy-V?7 z75Hg)Ol zv5t}c7X$_Oqldpg)V?#E>?uGF1c8VXo7Z;=q+DFP91e6D_HQb}x#u@>ko@ET(kx5&Lmh-DG05lMyFEYQ9duETCm3nSpG9HS{MwsPhNmOh-V$31_2aMguV(Dh^N z?vg>c?F_hwTolz17m0eWU+O(&f>=8gxWmz==Y1pgy!E{~mgKJV#Hy#wvk*BUn_>Hr zCN`LP%KyZ_5l;!P|62>u|H=-7&p5fKu*@_SVa({sCBP-!5(r}oIWBnN$!VBseCE-f zS%~veY}`lBgu1UqS9Qc>_xSC(JuUX9xTwUiV$|N|SLdN&FvI#r$F$~G+~u{aph*1EcWm7gtLk8%M5s%WCrj6 zx$c4WM%lLJ&vccbov1eTt^2$dGcrX!Ic46}R*HGYDYhr=*`bYW_Ei@!=2w#<-UjM3 z!@A!5%mNj;F&S9zI)TZD0IIh$KVp$@BY(ep?WJtx5pj77-qxXP&@egwj%DV64{%_` zmn<|gh)mco5_G17ekF~vw(K^IN@(ohs>Q`+|E#acyPhS}_T__j_uO1VQ1|Bg;0REf z)T_AB9%FEePcVcVNN@PFoiDLgosy_n%eL9iVFV03=5XpT8PjmGRKLtR05|VMKg!Bk z#@|yJlcKD(>vExF35{ZPIkGAyE*E4TW2q-h>!(q1D5_Wd2_SOONDp(fJXz2p7S6kz znn1g%=)6yU-9$0W^_1&zinMXlD=B==*vmw50RF>pv_*fa>B z!$a7_M(-6fvXZ_LmDFh_31m7?D~d}|#N5|OzCd^SiEI7sc7%lIE^WDy2JmG@G+QH= z20Zm4lcv8M1C}}V=qLpzeNJ}st~P2E%thGsJs!jS>ds|97}qnW%}-lHNAXnS^`a1gXGuCy5*v=+}gbRs4F z;ityeC&?=s#$?Bs3KXqq(HG>W#%rVLX4DGLlg9F0ok-4;M;;d6Kp5Zd*oDp6WqSKD zuem|?4s{dv-BX;sFQ0KE><9dizQiQ^Csv|Wf_VW7r7m?;K_B#_%&Bq2KHrc%XNC;yb)6?Dg
    8m_xi|aj-FnX@Xav2Rd-bYueW;M2Qf*|UMW^65v zYcWHzHlwLvY3F=?bH7UEF`N8sy~zY!F+J^rb0vIC1HELg^OErPB8(=SYS|*fmIl^= zTLDmJ_$BarQp}~?&*mvK3MIcN9Q!bI&8b}QUM6dyGc3Z4>>Wp^I`C5vKG2L$rrsfFU4j zNU@UoSUpmVzQ|&%Zj2L2c3~^wxxF&SDgW*nmGIQsdp{*0g_@j?W9`#LK3pED2ojiuR>;I~^g z`e)Er>zjatL?S7Qri!NVIhwJEKXnc|oqN}&tYy=?rET^hvROJn^o!IzdOg@Y1lrNb zufxArz3cC<%o0Q{jY_81UFA>^3(R)UKcZ$mFTE4P=l#P-X!EE@uIL=cGN92SYxRCmo$-aYva^{Oxu~P|{=39M7J!Qa*t|Xr8T3t7RIAz&w%T(z z8RsD*@PlcAjA2UY*mj?8u4}mvc`u>t`R5B!dv3#o&rM4ltLwB|x=}EYlNCJSfZmej z`O6U?IdQ{nM-SW0=3YTDW%m8$IG#ui1FO~MQ%J^)M{c5L89W$*B?i2dX8<^gZX@x7 zf$qBCuWEuVJaYL|jh;8-j0I-JL;dLmE2-4mCBc*ruLS}R;);PfT*bjqODL)deqXz* zQ%HcizGlCpHTh}b{XrYYaO>l8`=9F<3T)E7nL;^N&myUpVPn5_VUl-n=RE78Osyf5yd-_M!JtQEhc4jzb7}Kkr#{oMYc_`NB^+ zm%c0AOs%jso_%bt?!BYS-nRh&7P|MShZ(JCwUQ>%i5ElMnVz_+WNZz{d(MY`ARBsR z{4h>G)ggDE`;>1N=l{ptm&Zf-zx@(LmMHs@QHZi8Qpk{mBuNM{6|#khh%r-S4YH*Gt1%g zu03vLS}6C6N_`c`D^NUubd%5Sg6qG7n5e31knZ2*Sg$rSI!3C!aSarqR2)6ylkxEo zL9d!2Gw+gm1T9S(3+lp<6wW^EFM#3;?)#P*)*%_-S7Xng$#JDi95p$m2PgJ24ge$BA}b7pn$;PoqLKO=C@?@<*J zyND^n{y~@Ai=PG4;R`YfNZqy5*b&rpFIP`JuX+>g_j<>yeV@?ViDO?+UJz>0)2K`C zhTxyXY{)?ulA1vp1Vy`G-7s*aUveXqqTWvQ2QZB_30p`(ESBt1KeVVadO{Z!v6x7kps%!`ZvU;>B;t>-^K+h$lEUeca*&YnFrN5lSDXV z`@aKmA_oE8GD`|EASMDpjQkCV{Qw|_{08FZ0T6Ha4aC=f1Mw?a{-2;vAT$I|bBpIj=e*M`;P#g?yflVKvP_mm4>}f^r9t0KKnb2&f+|_|GlHXA| zIdL0Ssl?m4)j#ddwp*TbluPgGi$y*_j+eE8^lBeTO4`Qv)n*01didKt&Qw_moc9JkK0uQ>8ZeXy6M-l6=4L|A)qG z16b4ogDGZ&rHzwh%Vk)Z`%CW-d~0_4=b&GFiPDGq86KK{{5sfYBOC)skUxqw8{^g|JK-!$& z-Z~gujA^6`cpwC-i=Wa^7|EpT%w6lK@=qB)CDy5x_Wn-uo02w{6?T=IRRc1%mF0m# zR=(HAaKVR8b7(Rh`U)LSAtvL|8!&w3XsZoZ9oJ;fA#2O(L-V&#bmfZZ$0GIo4+6E0 z)SrlVJ9ugGINTvT)0^#$LRYhBgf|b}GXx(=8|%)ny2p0_4c&>lc(AG5+648h5?er*Vu%1LOeK(HiUUl$_z_C%Q?CACgd@(6|rHy z1%kbYnSy_Nq>YvhXZmn@c2-tK;pT!xO`VY0GJnF;LMHDm3q(e(LjHE`8QpQ@7vPD= zdlRa#8Xs`8kv6g;>SAZA57}5aS{l~5`nayJ&^GZ_Ju1U4Do^zD#u@6dzV@?1L0tKH5sB8gVxBxg))W zkBRlA90v+6GwwkUs_i8doOW*c*EpZ{FpbIAeO~!<;l&w=AG>-=SFZaGF=5J3KJSG8 zpgSI|vu|`;I65I65atTtUPEvf@Mu0@s;nkJ=;o^r5_GuD6Yu>=ZkXj`Lg)e$fkyKB_r}p4tvj=gi@YATQ7S0ntEXZqb<#ZGivUI z;Fex`1C83iwv$HMqebE+4HX}>#R;kj53~+wzC~xd6ZKz_z=%JRgZN={5>Lwngoz63 zkXf4e$fj#W@5Rga&SD~@lFu4L>sAsT)j!o=cOj;W3n8;z_Ul0v)m-E(;NH%mklfh> zl~=MLs98Y1q6hvokXUT97hErG7zQ-EP&EfbpNtP z6Ef(V5TLuHeKYebWo;jvD|8DnjM_1i`wdJTKxk(r3d|oYwLs3i8{pG2gbJIzQmsPs z3OS#S7J+ewPQ-v9c`09G5v*P0;;`tTm#ZA_)57`Q`gn*qC)zx=kB;Mh8oe@Q8qt@= zJc^vlhvDzCIwX&nCIJE;%z8vUw$6ztEt+`8DC2qoWH_`)E*>951h^}(fO5SwZgaAI zmQ&7W-(z-lQBSFs{yR>fshNK*?q%9A*%eGT5DEy?aYW$_(V=^)A$c!caq{6dV&R}p zWByH;Oy(RlZdQH&VYS@T`$UF_wj$HRi5p0qv*cba8!j)t2~GWMgG!&fU3cId{y(;&LAs-#GFD! zk3)X#oA<*Z9xPm9LEAiczTJH^CJ4dbhD^lYp;k4Vo}_)p92xhvZ>TZBSwH5Tb`tjZ z`q*$@tTXDRlW@SIsm^yj_i7DB3S&cA733AwYm1%aF4yOOnDan!>kDnW`mx5k zOC&WK^9apEYejJ!iN{Vp|2l*|RGC*WB}KlKDY{GhX)F1}_Ep-t0pwABLZv3)D?k1R z_WJjC8*8lm{uQGzpm0tC57&UA^}DX&&-?BTk>$m5W(|n^U8jw|)3&iS!lBU1F)m7f&^-s^Wwh!Kx}1D)w7=?&|0fFon!-CuV?}3z z^;YUgP{#POZA+T$Xr{{1Cj(ESnP0UnYV02Hz}{i-PxmOC=vELsHS}plDB%z9W@()1 zm+gH=tTL>S4Eq?!ky~`uv)X@>ekX}?Jd?&U55kL$<}_Ak!G+@F2}qcdz-z&TR{@Pj z1=w@?)!8{;^>uj3rqT#9=;`BPZ5~u7s*)>wxS~10aH`@j<4a zm&5HlM*Pze>sDd04M`W*iSavHJC2^?TN|Dd)r2Jqr;4=|ijiVANMXWOmQYh&DHhLi z;VPqUu8+%VM^9{7$jRGo**7!Gezpsg98+QLkXbKuS?k7(bz_fZ61Zq5$T7jFV~+_B z>jh(oLuvdauGgkQ*d)nXkv-cCbUvHYu^&f!!L&Ezm=L>1I{e}@q6;O+W@*$a&#>ac z6uf(tS5^&oCX#NPXZjUO{+S4cJmnIRM^C1=qk3WHa2wLEIn+xVk*X1O4_x0dUtRs$ zY6B^G<<#ra{AW_POC?_LA6jCfd)8P)gm-(m6+Y)LmTrWWVO}BxNt0#-l~6bbmYu1^ zkMJzdxyCKh)yY5C9eJLzlBv1nhJ3#%DciYNQx7$%8kQAHoAE6^^+CH`-0h^e$&bKG zVpFF17k#vTK7i|xT(G|)5j-_N;HICJ8zg>!`nXgz(~=0{$h^xDYr>7fs1Uk0(ac9a zk!$1+Vz$rrJFGph7e)ZVg>0Oq2@ys&yqR9_8XYN4n)Bb#UFUS}^&odN24v_my)j?wYk`iFuM^tW zm-67sA(v%#qFAGGj_YKM0{R69`+K#3Je*rZD9n&NIyEPf~o-x z<*j)I1R(~HS-s5gV7fqtMO7|pT}eO z^o#OI7x!rF6q1#@%ZU{YIRY_N<{YD|L~CK!YcEk|Qw(7h_)f>7Nfr2|MZDz3DO`aP zZ1;H;#JTE?-;wiEim{;7;tx8O?IMu{6w(-m^7OSL;z>uhK{+5FDb?1!FRs^JOp*M7 zcjmE1W9Lp@IUqaCh6CmDFDn=!1iNp1)1Au`?oVvJ^^NkiFWv6mEK`{`NZ4WcwJP0> zallMoz@j&FA6qpd&VfRQR>Q58EiU%}=1ogJdOWm_9_F_z7*z$L`)B{BE8)MwB;URb zEwUf{0yWn4BB{*AY06ET^y>pWCS$`oDqlo;br60)4myeWnp$wi zk;V*%#I;|4_b$eAc(lh%fZQ!sZnoN7vuMvUJDgUuzX9t3QlNtuTD&!`Dy!>jFvRVVFhQXd-ihVd*m$buLlJLy~-&gEBx z2*Z(aff=caZYR4r2O;CmJ}gYp5sBA=GR#<03tH2lgubxi7mD8vb7xiL~;qprREBpOm=ZJa67*pcMD|5$ppMS);g&X_>L#>(WrO_>?CvH zBgwBitf+z7vt+emV`q3I07~tol+0BmgOdDfzl)(NQ6R9VHC(g_XUK3`6p&Ypy{u$YQu$FjjIRfoSgLe##rJbx+wIQDmKNSz6?XGnJ znV9%zw$G%sm_M@jx>dsh1vYNWp#bJc`2OPV}dyXvjO#7zs(OMr@9>b0cyi zoigf+sf9lsq%h8o&9eDPhkb6|wiAvGvPJ_M=L26cq9W=1i+Oj!)jQfbZJn;RP7cSv zHjMld$FZtiGXGF4@#L`9THMvZZwxvL|LE6weoA$#JNXC z7TeoKM&)>~7~FWQX2Tev6otE8bo|Ysm&YyxQZ9tX8kvV|lq=t0YDRDl7NLROyi^a) z;MMz$OkoF+iuKtq$o1fPs4$}6uYE&{(9r6I3Kuyy&=!xyFoib^yW@fOhPYyr^>)g> z<{6*JM=|FInVty}V5CbbH0Bri$i~~{8w@QC$A)Cr>#VTLJtEePEk4J&K01uVS5%CF zXy@E)A4yK*GZ<`FD@N;-=h;;!+FNTFAIBr;P3|3){$gN}If6&ENO4AyQ{-C?@P>0*B zJpzDRx_`WQ4pZN$7QGqSQ)u^aeHz7GawC1<+QYy?4n|Hr(V?B&OiS(Uzs6YGXG>5m zlcNdjY&r9BoU2jz&*#(QxNdq9)J{L35J8M-6#fSQCDN01%{}Kst zdh4mt&o9=a{&X&sA99|-)u|^fj$x%sdK=Y&1Ij4v-nI*Y!0co zvu^)`E>daE=vAl1#hXX;Wxt_o8#pM=_^Ngc<0QyO9ZaXyW?4TyQ-!c~TV8(JF<z|lts>G65JhCP(-YMb+J)Yc z%~$Pt?yg=^`viSIJAQ%23J5O9Mh&ksc)9p|t!J4lylOn6}R4<|t=eles7Ng{CQ zK0%tH6yEzt!Yb7tIHcDRJ(!(q2nZ+vbNtih!H{}$0_rV_zzzCffgaD$5MYbH0*b;A zvc(u$E>ESS34j?>3a&K9d~KGZdVy#U9AYs%m>P#8b^oy~f#3VL?UD3F;X+Cf6z^re z!NNg#`A;7{Pf(*ByG{M9AxiReVLhqcIK$=g?mQKJV*oRd*#{B3xz2qcEJ)1&4nW`d zJHV;nA^{RgU~6$awvWP9HD}ahsN=4#hL8UyXpO7fO}iq<8++{h+n%iGoSY*0BHb^Z zWq1=JnBIXmD7N5dGThpT+xN5gv}EJnDAHwwVo~f)zMiE*S#g6bC7KDne8Ol`z09#= zjSt1%3dO5nqeJHa!C|aD$5G>CX>?QDG;-mip2CGyqdWd_E}zOPqQ6huMkq&LY8$?G z|6Giw+TDa+%Q95D6`0unt6VyeG+f%{2)5xH-b}`=ftt>qUwc_eM&Z*9S;a1&GV&E& z9$ZF|Of)$Dpi4Ju22UgiB*QXk+KW5e-^B zHnAd=nQZ;dj<d1uHcR-&;vACF7^TWL)bA2AKn_&pcofeIQq#3ZYHlT5Oa5K ztCoqpy2~s1l3n)Oa=`+~KY&u>m^sY2^9yxoCYo{v#VO%_`XrxGz?#chtWI0CqSvK-@c-Xg-7s*WxM}sL*2Ce4TCcC=)7L8HjBI#n&7mj&m zK80LIv_XN7MNjKc(8*)QV5JDk(2#G2H)LNc#Ktza?M&uQloX zyKY7Q@jfQqy;j59(yk!d0G+7C?~3t{m)iNFHoD2Iz9z&ql1H}V!R|~+`kD>@5VVqV z0mm+~P*lOev8vD2&NK8R%bgkK5ndW?tf-i;+c(REoQw8Bov2t*hD|iLbJF^&Y(y$t z8JaFm1ivaD+?k{^S1HrXI;wn`i2iU{M)NjLibTQYEC`H2C#s0}P3a3;gT!TOWF3f) zGWt?uDXs();uBKmd>=i@;oE_6ZTvUYT%!|?NhdB?PD)!*OFnFbF-TG$EeV+((ME*P zdVn^%d&7Cecc3C>o!{DuZr|=!IinGFs9X)VM>`G`|kLNsT`a zVkbPc#1sN_9fTMemq>X8u}05(%{U~xlSczedHU$6N9e-PFUzJ6DR~VX7ugX8L zsPO2jw%d87k`R{XOGG~GXy7_XFED|wswu>nbt;2V)u1bUA3hGxeRMDWnTSQ@r_N1J zr!M1|mT#mWoDb!Ack2ubA!;*$e2MZ9Lq*Z}d{;74T4^E!PjuSHHbq%Wm-MDQ9Jhmy ze`h%Nh5k#s6rzUu9^}!Djv(4Ghj(J=NgTQ{y7Gu5%tc3;!9w++;z5?}Bq zK89*uVdx`f_7QW5JLKq|xPmInc(BpgS$yAAuT#$K5&&fPbq?PdEoX$Wa7bB|xSf~m2Co95X6(nu4z82B{Ls+C9!OFI)`p)xAL(!&AF0bt` z0!Am;Htor)bwf3V6BxH{F^QZe-EFhS!Qv1VQkA6cg5MPT~`uLuCKAZVcSDxTm zm_GeRgr^i5xX^Rau=2EYP?Bh$)O0(E@liJ7E#plzx= z4$1=;B2^2tq$%RPzg5%^-EK;3l^cEKQ8|9oI8n4sDDdtRsKpD~&}{B+);T_`ILSGYgl-C)Q$5^72J94$ItG-B|FBwXb%GQy+Y~Xd&=ce zAHBElWaOQXb3AC(z;^XL4_T0VWFh8$9#ja~Xft$(MrUA@p>vt%eItoza5;7G^VGD* zqyz#}6O2vW5Wi$P?CoO2w8om<>+Kc)J*W4r*e{`nQP01gdOAi|=I|2;EUC1)j|187 zeH{82T;WG(ySspIDSy$-cJqXMTY%5aQhY;d&6O~++YMdmrhCnYKG9z-dve`y_8Ay9 zLKp|+I0w!hpbHj?FNqba_QKmAT`6 zdmCq^3Q9zN;hRBRmwbf+A-Na&n(L5^u#7;()kz|{Ok&|}L;Z;D+YZ=iDII%o!3FGgsc;$P&8X-9D@yR%%IaDO#Nu*oEL8y$A)j%DP{9q;Lv z?uvZc{=Ac!B}WF)wN1_4rWQa72RAj$u3SGXj`B=dBHvN3P0Y9#cBxij!S*-#%iwSA z_g`us{+0BlUOUGGI(J4xh-y~I)yo;b$<}bX--KyGGayX6Q+1VY>8C+1W*iuSJQZ&AT-WY9@S3M@y3rSaAFgJfHu*?@cI`TLgEO{J}hX{|;OEbJ)d zH!JS}X_jb3h-z=)7*A?3pRgViDz20Fxa7MjCj2hyvxGfNZ?RK20X}EG%)@iTke+~9 zj|a%n zsl_0u!a+wHw>jwu*Uu>`u4dDb@$ObW>hI`($WQMBjj7}3ywhUCReVxA<_#3DFV73A zp>Xef($q=1p14_qkqxalD;|iqamw4P(XKz;=%|wqZ#j2a(H|J;fbqwsA?XMeP-o8} z#W$;dt}Mw|jXmmG{NPE!vAO8{Z$EiNc|-&0d48BZ_=juzf9~DXJHU*cRlla8=BbCbE|YYDHp5NuKr(&X+Huf!q3#VP?2h}=GJ?NSmj3JOEf(~>`G;@1#uK&no)?d> zQ9~QJ4Z)L8p=PTa5=L<{FgH9p6tHV3l@Qa}b=2LCxN7~6ZUTc>|#XKQ0a_Vn>~&R^eFftqTkc$~g+g$_>`7+!zs zW%*AHbN_#9hzmF+e@&zPU2x@Z_aQ97DmIA1-$F*rR!OeE`s{*X_b#$EH3}&+EO53t zTqR)Wcjdjx?fj^L>2E2-mS@%CNsq;wIzg`8<$8{I@i&?yaxMl^3Ik-Uxhy39E(bGc z@$P_MoFtl&{PBqPTcx9NluKne9fwhNw+U9FGhuEG3^I+TxN>BHL5P}Qbckl- zhyE>*U*cMas-0dY6Gw$*MDg^|F{)HXMrC#M!OZ3KSo;W_W6@F7)<&X%mqa5Ht6BcM zB0{0yo2G!ph}nn2jUmSWkVO~>MRua$=|6btJuz>;KG3K*pogJA-bq|6lCJG-b z?ntmNyw)gO&~|F<0zAXwvU<^F|EbVm8zg4U~sNks|O55XvnLi1Y2q znJ@Q)=#zS0V#tTA39+*qqUPhX%ng!GNI?yLM-7>N`|a{RP77Us`tLVF@4s`uZjvBY zaz|RBIgE{Mf8>Ro4LqYRWvvegZFKY)Xv z%V7|X4m5`&z)yE%qgI_hVVmfIkA&O(Xi%7RRy9kvFLcao8AXWH3_ovFRQI$`J-Kj1 zV4tNH`(DYvZng$GH-#pNeJ=q3> zIIY82#x+KF6rJ?D!shTZJo{#z^Nc7S)e^(7tXlQj8!F-?NUYDE_#A>xzZdYa(;w)- z$~L)MuNvQhI=sagMne2$wz9_MMRp|?M zT-d2{0AmGPK(1jl6T`bEL2tQaa$nZA&I`$PF@mHeshi9VKSl5f`-Lp8)5f_USrIJG z^H~swI@hFi3E0QmJv8=FJ3o=n?LbFwQGz3Xn3`;cyr!UHR0htms##7jviPLhKB-Tt&BFLpaPO~yN1XRTnnFMGnBk-Fe+nv zIPT8<#k~=JJceM6tbENa+XA}w2A9OBUz7Fk{?7c&u9<|}-ENvK8es)*% zsoNd8vNos;s83Ebx&C4$s##m#9dP#Uw2{rj$pT6Jv9B2YkHh5t4KgZFj*$w~t9bo1 za^}`L^&bZu+wKlS9n4F`DT{OzSc1N(8ehIUqWi$v=3-zcG(hoxr!qAiZk#cJKBzs5 z=y&I5_;g0#w{Uc&7qaOkwFV(rIW3Jt1yc;%RAqi~e{Y_evJB(^Q(W+}`E{o0nRA5b zgt>sT=!&T8eK*UD>Xmyy%|y!}TzdYBWV@i{%<@E3T8<<2OyvhVZBhj733To!<_)Bx zJ(RC08PcjRWQd>Y=;NNyzWT))N`8E#Eg+0%1B!RxYek0JNDz?GJJvVW?l$1B)aQ3| zS*s~M?MsU=s&L))>Fjx595b7LcE-xjc%kaE?Rw65@Q;~_EzAa>6dyiHkOXL}fO6C5 zFy%rSDaaP@(By=}@OOIPkn53QckXT42TpXIa)C}f-rBH>Gdln2?)4bkn?Ktych{9c zYZM*`@fr|!{M9m}HUMY9!oqpi2nLPkNmjQtrX*x)eQ@c}J+4lYtkqn}7UkoFdH<||nhRCB(Z=E6~W4P~F8v~o{zn(x?E?n!dj znk@1$5Zu$A=T2VyKyYbk;H^le^%)cRUYGZJw)Gi|>`LpMu8PL}d~djQK6+-{E<`#v zJ;8Ozfcw?L3D7MZiT}AwWAFEZinu7u`l_^OoFZsMc|6R&*Xfs3)8u@0q2y<2X)85P z?b=0n_NA&1bPx7T-c(<78ux0b^uZKV{6WXyL{{_J-$$b6Ui7WUVr8uwF_qoRfLf3+ z55nZhFwh>y;~~M;eAJs~Falj<3=5PfU~=!@|R-%zjI$s?Mqw|ZTSU~bIFapiBS zt8Id9`Jp(0t}VWg21-DKrHGqJMz^qXzfzXWO4sZS8xvnV^0ukk#;?!Yxm) z=28!#Q!#TH$S=hxvJ+gI)$!pV#RH2UT=om!N4RCrTo>MV(_G5G$nV461dgo;b(_}4 zP4OXF8pOkQ`XbWUIb6=oz)M&Ab+*cccT>-A$Kd!qK2}a5wiY!5LO}xb-yKAr4vIf1 zMB@a7lMougdKj2lxtUXLt*2Jr_mYosHIBH9_e@^y7N{a0_-Y(3%Kme}awEag2-@OelcNF@4!PX*gN0xDkOmU zJM+30PxU|7tZN1MA4{@VZfz-Pba#sqFYRbFQ#OukNu#5)OQypkNm8>L!R}-s#P_|L zin`dU2<4&)M3CFq<*(8`t^Qdzd_-E-P4)_6!2LF+we3O5k{YY)OmM!7DLAGWLawvt z)Iv^qaJcW>qZczieDMv$#_!);nl;$&{dS{G!KZ>|DVjs4;y_<3R&lEU!5LExYf64= zMpxR==SLj)v1a0K%*qEkmIl*ifI{>qFW`T};{QJg7QH;Wp`(Ai2~yM2|aslGt2`_Q0*K=!fb#(3U#~J(KKx)_v=BxAy90#as zmu(XoD_6k0eB>IQ`PMaA1cXy1_*Zb$aJOZo6W0D+`nj&V(a_+@ zk@6@$o=wNWnt%$!$&r?VXBUaieVZ=34uzC+;4$l2`7$(KxJRwUs)SY?tuw`b9Mbny4ieA^vP_O)+m+UFZT!xxx^W+KI=t#NInnzh{_?Ff)TZX*i5i$ zQFI|AIzdYFvD=_bp*p$&J{1ttGrff@oQz5GAXo#v_;@_}R{#3OCfuH^xvIkXEwM*U^1)pzU*5Cv*O%G&+8sxeFAH-+fa+aeO0LouZ64YDGt+i2;@HSKfAEBUrf{p^E{h*D3H4M@`BFHh>&__g8QWGaGsn5zkTc0j_KHd9fUQ(cVTX>I> zuj3w9=7r&1@ms9QTY2u65IpiZqueRiviWJ8(W3#;xbx=@St@pDOFd9z7CW7}8e0)B zoPSy4F|2)b3*FI2Qj;W30M)p2php5{KjkPAncj>m=oG^Yx$5hh8aEv|c_EqVhZduj zKp&w(F;zxQ;dKxK*R{}l0~U21+bwUQdkHl`ro!rv_CKgN%t=y&fMGfuNX<~9K5Lmd zR)rr8*SKI2cDUtyLUm2N(UaXXO3x<(@Y1`NF;9?_K`vA2Dt(mI>myl5}zP)BjwW{?Gx=0&2!Z0qi zm+e*;Q~v&7U9mZJSbGOupIf+((!2zH1uIhlZ9h=#l}U3k>?;S{CtjY3laTlD($$vf zxf!!j9etrqx-KK4=g^%(*I6YfpOu{2eXsl#1SQA?Ems=co9DeWDc zj{1SZN>(7j$Q<7gLW6LX9Ion&^4Dj!SB?BO^GtwrU)KJ>_94H5TxZUe#Ao;K_j2>% z#X!>g>)oikb|sQ8=@{cDxu*?Gf^-I#YU|R3tgP-$IAtem#Ck<( zC?Vez8f!3at(Qs5+NBX$8vQEQ$i8^(?78*;!ed8CFS6{r*s>=BLXYa=8EWi3MEI6i z*91_u6nbh=gCJrUf`5w&)wqOTdXvpZTz|OZ;QB`zIYa^vU4e-5-_W61j54TTz{_M64EJ#`n?f zFp}8CAa{H0v=z~Va1F3)Kx8|I{irgMeWk#|HQy3U0LkK6vU z)Ql{!j(tQno`yO=UL#8!TF_x>tR!FAh!BLhXXEH+9xIB!e5U6Wk-GF+;NMB4fOK53N(rNI*_& z@+Y`!+vEUM?K+YzSq9mpfjCWCYDTcbxM5(aT!|`xNz_c=E0 z=Oc!JFLjavx*X$}K{RAp1$mU>;e0dEMgq^%`2+20$60=3@?p!(LnZSM`YuaKUV_kJ z=E4zN(MUd05HV!bmYUUY%6FjE%c|a|#Zuj@WPBwLB{wm_JhpEXXOY9c$n~-?UqMr2 zd~lav*+t5&C_4f-gXPux9NwH+Kxg_cnprfEi{LF8MW?QYrg04t=PInO|o`{p?S{)QTo(&sj+ZaxV)GmMRleMK=5g-D71 zx|!bBCB5EtE@Q$+6aL#$FW}$3T8;}kh$7!udp>%3y1LtJvWY*_8tIQUT_|um~>^?Ky18ls+)P7=zS^`-f zRN`k+E|X2+VuSF9*pwK>??B(ts+L2|=p%i@QO)JG*mW&40XFu#Nv2;!g8qU2dGMeV zk|`Icc8t!|D%@!GzEoD)ky&A+17HV} zN=NKJDEFRW4QGaWy1u*-I&F6>XWGz*`+ZY{RMx-7PkFZG-_WEe&ZHRfW8YcqQu8k! zxI8{2bS~zYqhD<=4#GG~3fw$!y=tw`eGFEZAACeD<(e~ww7 zJE#avOrl@>czy6zhuGO_q5wyYBMd_EnCBSxG~y%K&R4Bta0Sv=ZoN=OhM7&cwjg+a zc#S=rmUGPk0Ms zex*~{$?$X#N*2xdAI-pv<%yvrH2}IO90a4lUo8mTu<{jG(=j{kvjP#Ye7)oCH4z(9 zp8!dDE)>Qq57#2AuF)8VtK{5An_nP`UgvXN`x9nF5Lk#mr``-aIm_6L6v9ceD>AV zAxY0+6xZ4?+~Nin>8~&^c;YchWs~3G*?v#-pS=l!S;GxUky#Wm(u+c0)cvnk)?!!7 zYnZj_xnlWFB|OKbIYxEDR!AY* zaZhe}lC4^RK{+*x){o?<_!U${IMLp2bF2=3s8P1tHyKnL7@m9-H7@>kI#gjp*UZdy z?697CM1#5ii^-+QAywGH&_-?{@{T!GHI4-8#hyP+xd?aS^Bo?52+gmWLT{NG3JpZMiukQ<5QW|7R~41YKzA*RG-jh zOSgE*zMyfT9H$Cf+M3+{!G7bvu;Bea@Y1q^(sNh!R0TJi;SS-lF{;#jq%(?<)&^nR zg@r*lmGzZk*jD7(ObmI-lN9UXZ>;5&81qmTNn=?;k5i1P zIZCOv+9agV2MPy1ek0CGHnkvAs($2+ol;f2wDb$nSBLV8J*{uf;_o2?(`0|pWI(l5 zdc{2~DHxwLcbM!OV~&@oVb|Cizjjk%c>b#*Pi@@fQK?D~!FS>BcxT4Rpz&F29|!1Q zWegTJL0>bHvyBiz=e25D%Jbv#8@yY&Uao}6F$A5>8FitV! zF-9r+)Q^AAIYxq5mSt1aMI#Q1(Tj>}_~G=BOJ=SbTj!TT!)=>%5?~+S-hJs<2S5Xt z75vDL9q^zI*CBCH^2BKT6o&^`B{`iAHD^c9t$Kh4 z_g67f&>NH)d^rqHfRJQ(6A0#5pa$KfhK(5?N7wP-fX1M~skfx>D=!rtzgViUo~(ZE zQ1etsQA+dqDIpXdHcOJ8`3wwjA&Dcy5L=`%dz;`8k>e;cF^5<^Y`lG$NB%Ao=;^*7 zPZ8<_W^aN382qCxHez`5m>u#Zb;FFw$T7Jhb7v#or&=Nz7_)+okHXd;K7rgC-G9cg z5Xea8I)u5f(?^nAjKQIJeT{5JHMsk{i(Fg+R$41Sv1!ruoYc>+^`)w%Q`3cr zn{xrBGY>wW-hR0(p|!qeo<*$ zLkm!@4)+$-NK=DumQP;HE9&~h8}fas^uztLQ|+g0HNUTi6*h&o$4tiDB1scu)x{pp zLBnCkD{5U&scRQDVozS2A=E*$J1=PSw5-Us7@dusN7?ZfTxpJUy3z9V!q4B*YywUV zzXKnt@asQlf|Lt{(2$MII-w#9Ux%i>iTlm>A_U%KUwT+3D^4VIzIH5P)(wH46JtAb z{=|!%ja_^UXaz_8)FA0wr3;HT#2E?ts_0gb+@ZiEY@8{NX}Ks5&*RM_DsRQnOAObd zHZGDGbY||nktQhyUo|B{ zPDehE?|x!_mP_W%wR@HbRcids?;tI&x_>4Ni-Ye1(wUOwtnpGaF#n@HFc%wgvwMIwP*-1G7?c&$)vs!-54YfpqWPiX__eH{Z zl{j9Z*t};2eDnXHBg;1<1WJ>PhtTTbXUIy&^Be+V3)CJkH04QtLHERr1rd$$qZ@qw z__kRa66{40{QTf9CQ@{nCO#;5&}TG_Gq5kMY>3wE-+!qinZ&k#Ze*XX?o_A{L5OyA zAVK2(sge)mCChjCq>7B<)ZA#uy4@}_TnD(m90(thJxH}7vGp$-1k~ItEKsav<3*`nIe$&O9-l88Ke40Bqh@2aPl4PJQ=t;CC zLH8w1!un#hfBIx(t+igZaNebJW9UlgT<$DhcQzD)zdL($rLsXzMfEv4i;H;W#;C}X z2fHS#QeZS&g2q-fA#gHj%K2O?(#@$=%eF6?bwkA^iycflB7wDMZ}8rjm`XKzN-ah@ zcvB0J4ww_9BQS0fs*%_rUhc0JNo8@w)tpy2H80WsDynf}^Js-un;=RO;TRw7zIum_veV(?LMjhGj8kP54!vIX=6&=hcYkdh^yD0I61|;5>3mK zotd#)|CrW6)y}%YehSWlPX+Cr&GElyWra278yORp%m(FgTa0F|yG&lJaT1UGey!up zCtFBwU~}LVr+{Lc^ud($&GCJ|{>jVeqy}+hqXW_&Odyo7yM{0+e%>fZ3CX{55a{Id zoKLYUV_+d7Mou4I#=?2;h|HX%T3BQIz(Gg}=&xY{yKf|H_T~COr!*wtxYnbV1G=Lb zYe}da#H-OHwA<&*oljo9zN70&WF$p|TI0IA-@xU&txG6J2qg0>U;kq9vh2mR5QUjX zaz4yiZHgKuHl_r58f!IKf&wAXDuA9M+zlU{0{Wa0t)aKe?P_9++)h7=yME(pH;@`> zR1dn|m+-6le>rVwx5HOwd6ELG7nGuKw`m;HC|rp7f^!|J`-TF`50Nl=#;gFX3*l zOc$$`^B+uwo=(so#1rq6xR};k$yZ2FEVa0zh!io|q@wJpnk5|yo;`y5T{=U_^ zW6!B(`NLtxA0Ico{w*UlS2tTG->!pq0}K>at0q;4D7Y!lN5gcbQO1ecTVaMdAMsgs zg>P)xZiI+>eLokj(T=RwztQ`3z&yr$9+Ca(uYwYPCo*xr;{S%6?_a6~|1*RCAN}2Y z=!xjC*M5;(dIWA{@nXb1d(9OfmMVRAF7PO$NHw|_6>f846f+lvD&HavHuH|wE;dA{K!uG)W{$2|Z`uCG;gV3$c@DX@X$8s{(6omW> zUNEucq%-B0zGeCHlau+UX`9*X14%qnj1nqBgKbb<=M$_kZ@H$mW@^Ivs%L4Y*B+yn z=oYi+OC4A9#j0;0D=rEao~yKhU-zv;ouDPzkyk2v^ z?)$kuo{vW|fYBAJeq7@<=sJ4wqZ#Poh1=tu18AWlg$TLqAA{MAN zQ-+hXEwX@Je*}m$gFMh;zV&tSjQmq+ivOZQ?qd;q>!Nw3@RkeLyELEVXD$87d!gT{ zwGPTSlx#z8XdaYQ=4Rntuo!|stAfX$o@l^WW1P^bE7*z;D(dq*Ua)yyAXeJNZ1-5J ze!X&{iN7ogf#gm_3j!bHOYqK%Ls!Eh426CY_G-~|I%ImfKsqzS*Er@|GfxQ9l`XYn z=$rQQJb${&-x9lWa=_7l&8*A(AyyFa^-PKpRL{~42CUJ8w4?OZZrmOWj9$0i!P!f9 zE~bznlUl!hj*# z>*>}T@X}HX)Xt=C?<5TyH&6=h+x5wBJ@^rj^psC(*N!=K4KW$#u?i`tbfcqmh&Vaf zIrt;&QMM6KZathjySkUu@ph5%rK0L{<^${{A+sC13~WzWa{J;?&hBCd3oO(!mJS(^pPqp>Z8w&g_F>(HZymivNmJDd8f^mKYCM)>ovxfb zul_#t{W}+*MIvMW2jT<0aojbZx|#Cn#jzGF{oaR0L2*~{v^lu6Cr+5H6;$Sp8YlU_ zaEUfs@jak)_sEJ_{u1_Pzw?`BB1MH3Mkh0~y}Rh}gc_WPL8MQPx(dpJ^9ognlIwr< zrgTX+r#010xoeb8n(92Y)=w4TY1HRu5aJ1RtGoytk3t&DVOAw(p&{|UPm#rTz9Fv3 zmWzdCO}swLH>(rQOuC83SAD)LLC<8YyZzP;fKf|ys(>8&e5uXo6Q*s$4@ zhh<;@=hW_42D&H7)=!873QPjO$s%6&U~m@lYW?)Vy5Jd?#B{4~npJNPxFIi1Z0F%9 z8CXRC-#iD4!`0wo5mZfvl$vsDgp%l_!;N{{t6b;e&9?h5DGpwxq}ufP+}cBAixg9Y z8mYojoFh=q>}V>+jA0LXoxRkMw%k?__$;~GPC1n^jj5h#{aVx=p~U}6_3cNl8#^rD z-TTn+w~)2tf_OLfD_W)|Z<0vqiN$!9>--Ul^dRmzF=aoC{BS@O*SuoG^MN0<9XR|g z3fud&kHnNT|^UALj{d7IojGs2cw# z8?AAwH+*}{{Yt!^lHviauqk_Ui+$jR|64f0e+L~Nr(iF#B^cfq0Vr&CaMvQrqR0zf z>mG}-$p5*1R@+>t9G2)8@PSou2Q%DLjnO1L}S{d%`KwWt)SI9bzQGo=3%)Oif z6!p4iZcqX$?gU}2G%H4P^0{RWNhh-npquCzq+Nf2^0Aldf^968B_!alpny=0gaGu1V2{UtM#DiZ!@3b zv6@Go)SRp9Mx%)tYkk~b>c}^;1MYo}_9)mrC5Sf0ek4dAhI&{J*$*k@nS^j_QJE#Pm70+4<+#p2 z8*u5APQrwQZGD+basWBn>yE{zQrzVMpIUAv*jy_{D?WJ~HWt)LOc&e=b*vDkyq zR2SN;nFGpyO%zHf-4pFZ=jX%*p@!# zJN9^MllOMW14HOd$zo`2MN5%Ui0j?m^EtzOH*@)RrCxe`>(qNNFZI}_WM2M%2Wv1W_%MMKzdeyZ!k0ZXRZY4PXOYo8QVBamF~Srd4W&r#|L z{I|9X;GnY{C5f_SfQcEME^i0)zDXyCt@mCT85ugEj~+4NB!ea|MJGLrzF{W6?;$|+ z>6_iTpGUtACn1cs%bH=$q^VES6o$Bo2gSbLI}_g>~Z`WeefQcO(&dh274-}Txp_k)uCHk}ElZbOXfQIUZSmKXJ^rsZIsKy+SWTZTU z)sR0TQY49@xb|wOVUq(JSK@b#^T}^=@UFoe&^X&Y^Zi!9)GE3JMh{^Y3gKBhFzSu< zPtUw=V3d415NoXXuJUCx&uh@(+|VlYSUabY+*H8M0n*sK9e7Whk*SNsEHlDz&+_3~ z2E2nK3p#5`r0ACX3Fv;Wd0L61bC>v&I7Sun%yX04`s1<>gf8k5g~;_fZK$gVsxRg* z-liG}I48!odVsHtki}8Pl0^OrfAgXu_HfO~*368p19E70j$q?<4#E!)9nR~(B{=Ig zFgCEN3^en3tCNmn{P<{C-n9?4DYtG<$8%Qp5BA@?_i(m45r6f{G7WNUw64`M+++*a z8w!KjOOs_~a3y<&V0Zc~OsJ3U&MY3j&DdR6y$A2xq#ZY_4!<`epGkwcTj^gdH5?UO zkV564RbHbCw?dYoFXhUHy4v(KKWZd>AXQ8~Hju5);Od5D#lqc5qy{@}`-f(g4 z>yt}I(yJd$D7&teo-ud+M z%A^9cn-?W3>wiXSIP}hKfMsbD!BHNEqS&I*P7j?%wYnH834s$`9~_k)Dn`iPuQh4G z#saeG>a?^EnxMN|u=9V1NJ;!l7;`O1tguX2De>I_FS{4XJ0E>gVu}NzS}?lK!^hsW zo$;GBAdGAtGgN@0d!J=Yb^J{7+5hOpWxsphFG`7j-%B@Is7X{-R|scSnMv$F)~WjF zsoBS!1U;M|0y^CRqWx057gm~n;aQ%TzD==)k>2Dt-kQ=g(T1poAN@V@v5(rM1~9=yp(cbbh@j%=YOx=J`ktpsyA#`Z~1$r7^U*d|J%p)Eo~2A|oG_oB*j=n+v1sCoS2 z@WNHVn4TIk4!2)rv<4YNylk0PIEkHIceXiwDzn?AzW1Z5j^Szfdk>G^4DHYO+zJ>x zBMS%~f+$A~&rsnGABq0f@73N%S4?5ko-#HnOZx*W<~qo9f5Ky&=dUmx;9cHy!IKOXvew!U8V#@z&0vl}15`m{8_2|$Ajo#(m-eB#q>?X= z!tj|&K50TLkLFHG&zn}HxXb0{<*S4Z)(~9KVwj^0_%fBc3BkjP$ie))(|1T;X3b;l zq)BE;9iIm@9BYzS)h=JgD^@q&c0pF4)>VM@{<&S-d>6bXZ&$Bq=7~-%$@s=?7ii*S zgRz*W@kurPk;T~(L8Uv^i_TKwIhkxdynS$K2U9mUPuJ%5l%Divx%DZfR-%#~Up78^ zAVOF^IeiO6(L6iA5v+wn$}@SdprCdcpK<`tYBrL!l%w3%3+|EbDR%|SyBB!Rx=)900YTbFs_+Hhavq;B#7cBy=7OTD6XpqYX-~8HkRI9`AoY0PU4 zPzXbe3Iju(6ZW5}pyl$|`)X6T&)SOoz(B&wAd-p|Z6Cf2+*Ae_ksl1lRpFPFDX^$K zNpNHyw&C^}O4)-ajWjcV$Z9D@He`$`&t!auX(`y9(B*+$0@U&X7g#4efwtNpqzvx_ z-pkv`W#N}GS}fRWX|nU(-Me>oj|z$!I=QlqPO(&3={uc@Kfpi=cld(=WmK zymaaa?!;CVGnE9tgyRRgv_*%I^^?Qeu-{*aZspE}fmOCncy)UgVgJ3Qx91Y;{hXP} zgFi;us@qirSDH1mA6&kPGm!<48J~5+wbuhFVW+4veK0pLQDHOx_&DFOA->V+Y z5d0C8NU)Z9kRJ?EIxzmHc5NCWQ;SmWQgZ@MZZDULS0|W9A7uCLjF_I$!N-8dmd*uK zx^r0PxYxPw&%TT}CH&`n;DKCN8iyCqaqOOFprYp1Hc}oDYPwT`GcFK)UOqQ>ncQY8I1DU#B2DZ zJBM#KONY~q5ED)`l*XtH=>uu%%Dx+Zud>!mPMFvCr-)tt6;0CU@nRq$^`K=ndNs%y zA{^Ma#+%ha=Pg*$nVuWLQ5gB{C+s0%E6H5H-sJeEV5l1}bvyCsts5W3A_PsqscZ!Q zbZRk=ZjvV+{Hur5(*qX^9x)r$gdN^$Hj%NnTij6552`T2MHX)$4U?)yk?l91Kg))k zJNWsah|T-p-rm!v%e@%CFn*(7)%u!2jG^+5!OiK+19oT;7ppev{V(3k_`jG3=zkU# z2%p?8L%5{~afoCdzh>pxBg~MREbEv>+ch$CWh8e3`VKZ{>Glt0S|X2O=Dott7=ce z{$^Oym<4s}aOHl-19`JKTz#&3L?MlJ5vGlKQcgHX1^+B>kd4NP2jG^$xT}W@ zBKA(`mQ4DyAgjnF?uZ?m;_JnoV6N8WqhG9#QU*tMRA1}vN#(+!-_nY0%w*)LLmRxi z!>lh{yEXn1i~ELa<0rO@)wJ%02IhnXT7wR=!e~&0780jo#H-QmE!P$Ycpp=ZZ*%s6jA zK=-)ou~7=di~s4(5yasRAlPEE#^Bu%+K1LL6=o2rdLb+i3SE!?^J6Y=`3xP;d`9%q zLdrmD=ZUof3VW@;O_iSRdDo-0h}}oe&8GMXPc4R8q_^f3Prgc)4c@14G5-}8%1!s% zz{SqBWCk*RgS->+yD}bFey+!CXd^oW4)??Riq4R}@QzmZDJ7qKqZ(m;l8ehso*%T_ z;hiZ!dFnbqhAidogAC>by{Re7o77Tb6_Imj(YxM?G4{=`$_HVllvL-TQ&#VEu-9g9 z$!As`-5I%;eT`AP+~r;ze7o;j8WqV*ZyD^IY}N`GhH{+*Z^67GhWookgA*@gSuI4X zzCM4a$QW(I=`&P>f@TLNS?xMxewD8)h6nqK+MD=>MwMSaC}}uk|dVT^Q>C7aEtPFFiRwPg-xReIz~kNlRsrdm~4V=A3& z83cdou&`I^jQHjY$1HoU3sT~<=(^SALlpAVNE=A<_3grDljP>wL%+!>3~8Z-mk=(5 z;nmlBMLi}{N90ekdM8dR?%g3bou!d>7M8O~79xNG8Kkn^*hJ4u&?JyXKGxkWGk+DF zDrU;Kd@bg#nFkUe0tA5`@VH_Mg|yTIk5m z!Cl{uXU`#aFZmjhslWQ~4zg?O({{hf!P-gkR9wJ-5QoQ#kdz5X97}NRFy<tyVQ%;|gZ(i7iMwG!*7grSNQF$hIoDC$NvJ_F1CX5eo|5%B zhOtk$HwMBy={(FckT(lnI~Oe6u4@u=Yg9I1)q7{D88lZI8W<9&cK_0%JBQsw4%MXuN*RTJf7KIGX#WT@SV8$vhoV-^l-VXw2lB)0_VV6bu^rr{1 zza-A9aJGpfnzoK>VHe#Yzjl&)q-Jmc4Q5d}902W6sH?6wJ(Th@o(mRU&VS@B7&$uf zZ$J1!Af8#D@EblQ$;8tU%VcWy3yER1x4#i$j>f(9TLRP%*rlC=ksX?q$!}Um0Z8_= zC!so~O;|LTXzS+fv}d5$G{Hwl1D0{b=y!+b4?7N0LTB}E!FO-o^$iz%kRt*}&{Vqv# z%y|NPGm6T@_K<4_A0vDSkFt;{&5|L^a@e)Y;bg4jN}|Dq&?I3Csonm0vN%tC47vJ2 z1SvB`-;GM{7d=K_Z){OG6u)VQy(}&vJirvN3kqcXgtXM!vEKKNQ@9P2uSIV0Zzpa?<8B%7Fy*(i z*gEu0qh#$A(|X(1W|62g>lR=4#wW>T125g*L>(-x$~n^JVYbDhuW|%UAn%*6y7iF^ zjUJhbf?oYknm^T-%f|?lwkQ|FqGlB)h$$m)uy4|<+cUF%v;nDHv$y_UUp~Z2#f9K~t&JV$~@26wg~Zb}U@%;9ihgwBNyA0DB5_ODXu=ztAHb#goU)xU4=#WX&HwwTEKprAI{2Ql z!jaB~fQnw8JZYg5jge*e5?Nl$j>fy}@1RIwE7|*po6AZn%ZPU~4ALEn?ODzZ0Omrd z{t*EEBT0RQ_{ul*pi!qx1ae^dDO;p?_JaWeUfJ#q;_YHq9}OwK`VN~_3bzW z1;jf%I4@zI4rWL#t{Qn)Vcw?J>G(Cb858#J`V&QZvFP#E+3kA5&A}KT_6CcGaU(q7 zkuc$aRl!BzX3KOX6KR^>Dp}q%9b5E!K7j#$5&gzn*RF(qx(`$Im7PLRT5xpBX$KC! z4S4c&B3qXFP{3hT&SNv<&tUZM>~&hl^m&$2%D|H-BPIBePx9`*77s?By*@Hl+zjY{ zA~`sPcWa+Fe$=EPH`vHNG6W~~6oCCxi=6?)J|D?DowNpH*mlK>Gm7_O)M3o}3|2u8 zQuWKJA0rP@i#lvcR$CoRm_T_9V3!*Q5AUuU(ZmTa)@l3WO<%|V84xiUx`qO2Gu;;` zPMFW(mLIY1J7R31Eh@`N8_o}fnhWoJOY{je(XnRu4HMkYDGi>fk8~u~UR?Qv4apNDNsRPM3#{} z36r{UO8wa?0ecgd*Ipx=lyAxJmc{S;hwI^ya@XUipL0G~?{n{mYeHri3(h4dTeff5 zO|XFA!KB-6AXuVoP>Xo&ImXCji>h(>5?H?2ui&{L{bE{A0NzHF8J$R-DF;J`c|wB5 zR*9Z7pt`W1e9Y>3_tvo8X~fz{?tL`a%=D{%6Wp_?9-zK1nE4UMx7dXJi+1n8%8UrU zCdBh9XSJ1azDjsrD&l$iUG$>Af%gc35{0WG02NdOq>KY2GSO&}fCf#pbg(v^SohW zLE29<@5%~$KT-?d?`>a2x==PFkzrJ4{w6A~@1XDiU@qT{1;x8ITLPtq!@d0!K`F!v zF~G(1EEu%Bh*oK@tcT8?GPULN&o=EVUVGhqtZ&(=dpuj?gux44_q+fIeAVi90^JtC zg|mVMI%RIh%;KW{;i`tm+%{CHY)wQ=1-9JSAz1Z%!S0%gc$H~CihszbZCyMj4%g;` zqkPK~z!+_3gA;R?E*DAld3d>n-Pvve4E@6`Y)o9QE zM9^)1`P{#hW(u;Ipe}oUzQP)Eqg(7tUu*O$aR}g%_jKagWDONK9as$pvJ)Anjhi4v zzw^*!>Fu2KY5a7XUCQ)r&iYnjoBUTh(N_L{hCup{_fY^GJG=0R4x>gl&I#}L67j^s z=(K{&dg9f9cui^jAeC1KKZ!%Ktb1TJ+X*rVPvKZe8B}}AITn@tIO9S6`;<#(N!6XKJWZh5u z=(l`J8zz(p@|>@wkEeJRPZ}SrO@D0e=eO58Dq8bO$*TuD&cdQX{UaWSSn>Su1x1!U z1nigk@r+;YIEF;G>8*>YU#hATq6YgD%V9gBjiw?8u+hOPZB4k2RRK@Z;S_K2==6fA z3-0bdqO{~0sjmtODQCILFPt$z*gp|`Q?~O>uKHrl#yd}`TUU4}LF|JXbU278S$2?O zH&dH2rIVt0MxJo078e=;$_u+aR zm*|H!k|}p8s2YcWhvExt8m3uNr#;MjR`O$u5-@JMf*+P7TV?8QKRjr=U$%gyTY30s z#C$|bdSxPar1g=bYNooORk@`dFQNAe3 zKU{2CD;CZRCJ2y2ZR!vH*wP6}w&qD9%C1!%R%e}3_na1!zmvS@`){vp3jTPlq1pA? zzY2-}f`0FRq~il4O#Ob)X+;ii7%0TGUAdzP;(Wa;^;L|Zn7I?YD?*A5u`lNgYX%{m z5f1O&Qe4}KL&Ro_IrVd-&8{rA3;A>Xf?jaU>A`pSkbByG2U$(YFw@OfwUMX&JlORew%gXjb9y#xabY#|W+- zK*@Z?{5n=1vw!qBTcpI!@KxG$_QRuz+V#RxHg55z5oJC#1#zNHpr!VP;O!UBk%uH% zn!$73h7$Ba_Fm7u&Zy6sZ6EoiW4Z79kO(Km*H+p41}Ws2--JmT z!NZJwGlU^K8`M+k;AGO3tJGL4;T~2Ma&mETXGw{2#dyktZ@t%AA5_Kv*3m#{<2+Iz zjr>`jPV$swmz)xPHKkQ<5g{13DrWx9VdApi#X>h%O^$2(&W|qZ;&$m+%z@*kG!}y@ z%Ka#U5+?kIKrwKQ?B}lOFpy3nq&ZY zf%2Z?DsgNPv033|BG_N=q1UX-UNfq6pUuY@AMHx5j7$$-Ld|Q|t+zjw1oy@I@ZYbU znwkP2jRqiytA>EoOqM^mQ>PKum2`4Svl>PVvG*6sl(tuNpQv|8{LWeFD|^#-mG;Qq zptZ5CuZ^DAdUxsQNYe3CGk8YVMN5tQ4)yp!!3*z-7MSM*2K?BajrYAO4yQU>mrGXKudWl^4v?AdzY-O9~S!c zu_I1BO^CU;x{2$^15UTi*HG6>2EEM6XN%EuMvUeF#8AgJ=3|pXPfK#1r~gttu}bM~ zFX!@u`Hq}dF2?IS_k^)1dCL~`5sF`wKHRkcLUf3tA$DOq&w^ z{bYLHMcJRRG9#s08>Y@uVxt&5J;=%YkDQYXWHgQ{430~_gBL0bLAo$F;e1VKzu_LE zt7SW6Garm^Jt{K$X?bFlgkp`R&y7Lkcdt;H!R8#K^(@bJoFOP}300)OXB4+qlG?ES{fa|~D;BJL>b0$bX)ig7r3;a6wqExzZgvpuUSYu!3K z8^u%@!Y^+|iWLMpmI=2U!5`XABzUh1aVE(;geD*^Lb1;y0Si&StC`aXB;=$Ec!_6u;HBoC5 z7Q3?2*%F{rA^ST1%5YM}^&=|k-QM|vY$J15Q0IyuV8>-rmcv*)(1%XB10Fc4Hu18d zG`-AvPiIr%daQz?NYhHv-ZZXXwTXIWvZe39zxZ={Na*E5EZJa1YHs#{xWfszlt=AW zWN^A1*M;fRuH7RVtvkaTxVZxG*{!?5lJC+)M3W^kl505{D%&N6%`d`sEWGm>uwGo-MTfRi2remc1Tl9v;38rw@a5?fM>2lgG*$exGNS2aV9w75sbwUF zZNbRw%%fk%kxJ)?iCAUKxpDmQ4U0=1?W-l;^{50_^_;{_(dm!Xda61($GoJ^wnatV z{^4e|@fbMe-Umn+1-CdWLvYj3+ft))}(4xvUOztGGyK)cmt zI)`!QHjJ`C`Tdu*U6t?*I|*yg`2>@}xIP5IQn_O=Y3{E-Vm!kM>R$-X+W7Nwc*$HB zi50c-@4q$k`kI>WXPOiU%Y(q9l>y&|T!UhZ|6sZhTa_HZK9e=J67*IlJsKmH2>9F6tb2e>56ps@+y8noc3EsT+>J9JAh9_5GeKb z=!BvyZye|^{PBW9H+BF%$Ao>+UrdxV9%t-i961*+XO-SwE&R1Jtb;In5}lp53qg5= zm4CmTTALD$mLPZv#{ph0Ei()d#}E|g(hfpt>*mF{*VhN{JV%t&QQ7VGjE>sf!6j`} zVcm|X@=GRA(h+p4!AT~ugDinPhPh1F={BFXZpR)<=rL4Dvr7Krv7F1CET>L)KCgSZ zI#t7UE$=!1yAbKTcOU={z70Zj0Pz0zAMrl6CxkD;?1e05g(ZIp*CXlMiJ4X5{=+ob zF>lUD-I(0+5pED9L#jFnfx@H_W&wm_=|Eod0H#`H}zO%U$81q8{zKa=}2d7@95 zd_&E&3IlFk3Y>2-OD@efBh(qhsp$oLn>XXNc;gMj`JosN?=(IC8uaMl9FD}@755`J zvS)LBYH_6_me!eFbZhW}=g#i@_w_x-=-~0EBWL;55S^{6Bl?nWkng;8)05MR?JE{5 z@W(}YUF6D^&!uz{Kj97#D76Ry?7^kHNkifKu~wyDi8GE?o+q~+q^P*9KNp=`b~Tav zt*XAvGm(zII$FMwAbs3-A(}KJ(m4O3T|xLNawh=+YR8@&QeN$MAe%%G^vvUFHb8u? zmw9|nAg7qs?H01m8TBu`4zkO5dt#Tv&(+ow7!L+I;dpi9tJ>RsM>hGiI?Q^8mAlZY z4(|;!pd@gB#UVv{d4ZF|EGpXGG&s0s>r1|B-nlwzuVE)D2KvDf=%EL76y^_YfXTOH zG{xhNYkgE}`LXyp4y5%HCWN3o!R;_S006Txq~GnhzAzW2cMkdb<}oxh|1dMckT+O^ zn)MdUaAP!HElXJ!FZw%(KyTs=hP*JB5R^*bITSIi+}k-A>k>5(Ej6T8_&#H$$A9BO zPy&QWXyzQ0+MPs?>A=dxH5v4q?}|0xj0Es-&Y)jiySm`CZ!p{%biQeG)Vd!%hJy4O z}P7|EBpN&TJlqf=PeyDCnx^^Vd| z<*$XkS)5l#N}@b3d~BPaBL$67bg;uIrC&_I1WfB(Oty^+^nPaJX@C2w?57k}?RT=z*Zj{c<(nbN&kO#*Ks66n z^CJmIDL~Yo$Eq++zE43jY`St&eEeLc9Uh)PUU@OL%uHT#5Rii7m&BMiwsY7B)-en( zeK4G@<1Sv2N&TQy>r@)4AI_~bxU|B0k#g0-U+y4qayFbVGm_&QVb&Iq9>qb>+^8Xb zrF&tfqc3CTdhad&kKdo1)n90hQ1n9GCW#(6(Pdg%_f$8KYk%UYqdi$8PV66s~e}$HXKIYPB<>2&dmfLJ~EVRcl}hlYq}!OPJ%pvlAi~n>o}a-V9cMq zB#y){Vy+>5C0)SLOV?(;spy?Me_~Bs_4l0px8Czh37tKSCs&-x_0YC+m5b>oy2_gZj;jL%JDzpv?+grmPui%wWnHPU4ZLo_zb6Dj{*A7;p zzYwoDD+K*{ke2z{gr$mWvtU2;q0sNl(oxCZ8-_EucqW;S$9m&+_rjj>1I+7Sm-VGf z=I^Wkw65Xvf5)Ek-;fdfpC=uNL{O>`2Y%zFexZfH^!=*Prvk|R%J!T24@0Z^s980O zKNeB(A7?#$p6h0gPSjl71?zPEqTi(mXpklcDS$6zj;_+~%sq^+RX4Y!C#U%pOgSxy zg%D9@{JTUiv3(zOn{8I0m_ys~Y(PsU^@>4g{`z?jnUaTOJ* z(|65t_)QW!@<<$gkdN!bichEY<)KDh zYQ&_z_WqK3u;(H#A1S{BcNodkw`19M!tn& zEbD#L&TBJH*mV{5FLKg#Gp;CJ;L7Qce^*i6L#8_-A8UZ2p)dLL@uj(0zfQl{6E)cV z^gYiDwfg!#&K{0fIfeN8_>}t>&&o>I)NudCSHc<)98^?<{|>pY{J-HOkgg1aLXRN(Xm&SD zM5{L-Oj))sCFqnT5X81wvbbEwJ$eMsy&VlolrVlAcWe>!;akoP86odiH;Z;Aao<_O zUmoqkpg?gG9H@wnU=$NR6o`l-h^@?zU=Efn3BL1`AzgG!FwZ@^tMZHFF@>;_wvT$2 z_uB?SpFg~(pxns%p2}{n#*wcrIF&jJxPnt)w4JLZa3Kqx1QW8?k(M5(T0;@{RD0O^ zL?#GA-we>fuqw}cTv7n<=IJTTKlBtRBy*Wr%rS>|__5{MIGP0dy5Hm`CqbtJ$A%ek zGPMMBV{k|R`{)ho`Rve-I^xRrH-X6M3 zSF>sy|E(T6y&MSJuVf)!nDimhG-E22m(G(n{I4~sy6+K#rmB6)C1lr*j;m zDzhuOZ}#_oL&OahVi$=y2~Y1;{x!e%V3+L36vDFDB)(3UzeOdl!*75h#eCt$8zq&h!R`H@@>_7Rp5L+iR(?18KF zi`p2J3649KKc;V94vmk`Sq|wd%HC3bOc8e{Jc~H*(Pg#2@^)`GF$!42dI`d=qRJth z7cy!_b30G}!h@&g;>%2YdGLcR5uG$aT1o5mz1-4HOP9WLeMU~r$kC%Ssb+zDhBu}J z4QjpX2`$=ni+s%uH%dm#Y6REz#S7G3kK)SR9U{!${{a+DV>y58I0(a2kYc+8jwE-O zfsXvG4mYn|&k(M0H23`^r?0{v7^(onFm2EFZ6JVVlp7(6qq+TxZQ^#|zD0i&fwB z6FpB9`C8)ZcJO!J^2E`v3rk`P@rn z4*655k)IZwJ7!_Lp4emANFQs9_oK3xKA5u<1AWtE^`iEcYCe24DKGRTc1w%7kp*s=ddS9O&4+ldgKw}o%Q z5vKt+KQf%71onUjCK#QZ$5%!|axYo)vTdkPl{y9Gmp#utANA>T9U}1X$+h?PtW0JY zv`r{i1C+())8wu)Iq9kap!B<5;Dkl^Pby!|#rt zi^p;KtP_ku*_~yH#S!2S!o1hLXL%lDEP#D--Ofi)kJI}lNyLw2X~)>wgQrhE7&-9t z+%3A&S+lr7@|E5m#Xuj1S|ImEdwd(@!+>3YS|~~I&ensY@?)A#E<5ge-xt}sWj&Gn z`cbcH$W>l>5r?tZ|0pkF+=0a*F)aR8E^nHv-?wdG$)&x)7=tOYFn_BY9~x-e6lR>; zFLB8Aq6?Qw@(z8}WO_RX_@L_UDAsk%PDW5iikQ|$sBmJ%>JCC_~gd3JNu;e z3Cl+&?lV$oIzg5yto(M}sVB@U?c(*G6ElzBNn7{oZ+mb3WgCp_BHW%@eT;x{_Hu^h zq*IpbFVCGLG+dZf>oYSPb5jhhKJ)^cv81+`LLxII0ppRVaGRk7D7on{M&^i}f*RD- z_>C9M$b#VPuFATsKgX)Wa(|s%-VwL%dRDuReB!<1TeaVkY>8^7-3Ty~Zvc25C+Ddv zT2vcKIu~5pzD8O=);gZe^9=8Kl(FveEnMi0mD5<^k$dCG49|bK@+eLMmn^eMm&iD_ z*gBabWDT~&vk7(>c+q7GRMPvClzV9V#_dO$^hVbM5^taO`F;$%8O%70Xb}iSO~dvg zZUJD23lfqSs-*u|{^j@9gTNo?8F*H5H~z=SkKFHhIPHrtx(r+S?I4rFBd3%1q#pP1~7mYR^tW5pL>-S>rh2 zCLz)Gfh|!dGt-%O{APqeaF(5WYT0f6SU~xzMBzGR_OZ0P%Y@IShvTeGn^e9<=2GD zIoi~LY3LI4*lC1dIB>qiDzHFe_;Lmnaw*I_+;|;~P zYm$gy6bVOnS7`?v9ctT8TGbqa^E*O;*s*&PY<<^Psp|Lx51 zV}}s+#qG#R+0HD#%TujgGlHp;?qy9V&> z=JiH4LHo}4Tp8_^f4Gk1-NCh;#42&V!uAtxo7N&p)@@DjM_8qtt4oq2nuSJOHBPzb zBi#l*_PMRIzD)DrfJ|&e3N&buz=XErF}g(Az*2myeNm}+ea6j47Lt;`404R*)n zuksD9?%BbW3TlqK!x(@yKB-v?rjK0#s(CEZX87htHpDe+-FUg={j#&-# zcUurtrJW6m{il`qpZide7OcPNlWYxa63&dQhf*IhT<2h&G@QloNndQUaQNN7w+87ty`+50?bS8XkpkC&ZBIlL8-XU3?eft&%FWxWFs9 zS8Th{`NYZ3srF=$q{ZdR=5?yg3Mb^AVn}vQ-@B|i)OS}&VkMON?E~_#K~Ql6{ypaa z5S$~otqPeGKxpG(HlnDfxBh*d>At*G?XNmIr3cc!4G)DpqW44-C>iMeIB#`lawodR zy)()3g5W|Abz1e>O`qJUaF)JF_+G^ zZ;}I6U+^-&kk6`(9C>^H4(Ry2`w#r=f5p*quf=eLsvwop;Cdz&a-Sbv7#7m39dF9T=^eYL1JP>1i`?xKcCRY~xynEL}Y5cuA2%jPTDM zKNb2(R^{ku=}ujlZ6wq|Pdf_%QA}sA0*k8M5t9`rRwDFF!e6S^TImJh)y>mLR7MoPS=^>dWmaa!1x57ttlA$VfzhjpmOqi3eEANiP0vG-ye!(-Eifc11rd} z!9iKeN}R%-Af}ewUcY3?gJUfaGV-A7h&rC_T?CIoHoy)rG;!^1+wT!Sm01^Y^I%iP zy`kW)PX5EC=NS}+Q$+Wfu#dr}xH%F3{*egk4bB8?LlguIYo&k1#r+ZQT#qAKwq&-* zK*|*CYFRqqG1#PvuY8#Gx%wq6)YQ@A&o1kutz+(O6R&kkxcl_DxOm>p67odm&{I5C z+dWeRkH+(PN0Ct08j|W9MHl1nMzSn9#8#bXZ~?;uW9*aOA%~}09(L;pRjcluZ?3zY zWqENYC2-KvE9B`nAvA~Y2VJofMz_X8M=f)-tNB_Jt`T8fxyKr;VVoG5)^tU&ND94Wn*cT>FK8--Xk=`MU3! z=@W;>7G_nC_E{wSuch7p50&p0Z9Ws-Wa33Xfbo8$a}etSrw;~d4c>k`%&53HL*cI& zw>Q<2=3$njrZ-&MJn~}VVkuRpp7KezDSV*OexbF2?YxSk6k!Fxy4Yd+>@zzN5_?9q z_?}M&KRh4Bbo8#DH&;@fIHbrW`P6t*}SWfxwc^@ z??8@F%pJTPuG=2*d2@k3_019fwp1zw^4(FY|`>gq#x_7-s4Hxt{yFpX+|^>wfP0|Ge-2egDt<^oesiP2-$7f4^h-9^d18Y%JY-0e}iR zP$w)-z!8frxsPgH=}}nYH}UtDR*)tr3gpDte=hTXZo2zQ%0>ZvaY6!lN5iFbpyK&C znWvgZyM6pcSvrPKT3Q@8nnalNUmSR_nbL$M+2co`ZD^tLf$LNTX$YRM3RCu4cd+pr z{SX{y;-fp_?0rflJWk@iRU5t{YDt_F#pJ+jdo4z)GwF;S-I}Cy--m9HjZFUm=YfY3 z*N&wcOzEq=$?)iZQoY;q`g=E+%FMbw>L{r;L3^9!MHkWst9vjZnz#Vr9)03BDsQGM z=qqi6;AoC(4{N>50%yfuw=TM@-QMvL1tZd+oU@j;-We_)c^-rNa^Dpf7s+!8_GCqz zJ$n&ol(B685okj0r<|fXkh-TGqjjZcb$5^&(x!1jrZb+k-vsw4d0S^?Kc{y1aO6_F z*7kg*Wu*TA_&>CQ2TwbOZUUBHR~mT(?EPlFNj&$MCjnU()phSxbl_pXU4oas#3j5# zY)-;R$e|@C>!n^MAM=zz<1d!cbi#83bwBB?6x;_4RSQlz4CX*sFuNomgW7xZCJ?Di zf%;@KeRw6?*yAW3e*ac<7^dk2%$*xUdCdrK;71vfVZ#`r6ge+)$=~a&P3yPOqTZ4p zd+sOmOR&iNT(~-LwD`Tt4UX9sHtU_-ZrW#^7IiJR7i&{C=8aV=JPgKP?=d|gI7`4q z*6Rd09lKUlAD^XZ;oY^A*Efg{F3>_)7fp?D-F}ZBGvI(7w6MZjc;ETq67iqt#{527 zX9xHCqf_xYRcozcq^`)5(g0D2G30oNluQ`dw4nhQwW;i?DD~Y`-3lK{oGgyNhr>O8 zm7(8%&7A>T`3!sW{*=%ZOr`E>eg+yS94XKxS&D@ zm2!o8j@HYimjjM{Ds30LVba%-)M*#-*6% zny36rHil&_pCq!6f4l6=kUD^NXkpweClQUE0%;5LF*IhS*dMZAZKs2U zodKt38jtBm0sMz}3UVaww$N^Uk%LLkH?({#$!%tZBl~S*@+>a*KPn>Bj*J$(s|Zyy zTEhc6%SUwY#9B9abqE~1dWj|l&r1|u}mnY$($*IG6db^-MrwCOdu+#y;Eut zI@Gdwx$@}cF1MII;La156pglUQDBk6=&tYJ zOl~f!IXMJAz&wncF!C~O!IVurz%65l&@YuE=n-E`aJhgB5R?blLusV9bfPn$Hq7M97B--u;$ ze_|A%Ffi_)}6)!d<76XJb0@JjP#M34T*~*{ADc(l;o{D~}GwyyC0P zGD}JDsn&OtO0smL22T5`e0iKbVIjc$gA~j&RcWlaxpapMRH1oQ*Wc+EOA+leY7T@Y zU0h!iWSj-uhR|u=AD^u5#_YA80A}qVsank z2VFfTCH$G++&+RWDDXc1+q$kKW;lh2Whjx^g5&@0l=G#jDBNr}LjD&^KpSSL63DgX zOJx74!edZi`!AMFc_=H8It{4##nSI`Lmx{a*-?bNw=SPUAUrBl?$7-a_xw+Uuu z0pNMuM85nh$K!Xd$N$ZvHHH^0F$=tw_WlB-&wIgr^XZB>R=cs$^SU+{Y3sYh_O(1b zEg2?%Cx(UILvoD%4d}85ZC*q$#G_%TfeqQU_|f_GO~ozyC}Ag-smlk}_F;-niyP{h zZ|S{2P;`%eDO%Ujm>>Ve0`EU)8jE-$rhTccQKk0Hc2m+2=>44ZiNd(!BSrG=qpmnr zZ(VquVRwLU^erv9Z5SY7A7J-B$UwvKZc6d!ZH(6(1?+c(4*A@&U3bLR*Uw6k^fU9Q zjN_j-%DaX?N}5Vw0%nk#)~TcZ=&WIeRMyLix`ZtP7|DDn(gh;+py_dcT-ct;Fs=T9 z2hB+to-=*vw!iVHyXgWeOrHB7^jAR-pTf6HLqPkj1z)}bE&V;lBmJn> zy`7=c;2l{}IC_4u_^jMJLM$w{rL{=NeDRqY(pH|5mr(RGJID3J<-oyo?a5QSEjoL` zkG}bVxwbM|ZAs0YiZ>-f+PDvmHesOJ!h1sD=g$}u{5toot$U485 zf5(gcZn!Cv9OcM$`d#7t2%SX}jkNHQ!H&&)IT+ zdo97Os54~y{KK)`7K4(#NnO4bT)QQoi3LhXFkUx6k%g4tsHw`tSih5K@$Og8?Ga0N zq{~b%?)4BK71fxGrLVeP4V~bU9)l&Gh-oRqIER|H52hZ;N2s2!I_0Od zq5Y$aGlXL`5hQaE|3vCBEU1rzxe~g^`uvr+(cO;R{?H}Is9^E0K5Q$AO1pP1Xnv{Q zdt)cS%9=ahH7TAeRH^>rNVc)=KVyhP^K7lB+KNsN06wgv+v8%LQ_$m0D-`ZMh%D+Gb<)1gY>QQ`<(6` z^4QO)`!>mgeC3JCW%-4S>k-b-hdb*#O2o$Jl`c(oQ$sc5>fx(YXG#D$p|EZlM##g^ z1GEgg8S59|qob56GM*kgQC{nj!JHddT<(K9su;_!5acCXhTR%51Z?%tdsvh<&^to!m_lvtgm15S-%vSc;oM$z~Nu#b&u>IT}T7o zL+T6NAh`Of4mE!|QP9EPL+Wz1s}9p6Ze|^4PX$ryu5cE z<4SXlIp&-sA8gf!DT6eMVn$*?vkxwexJqXn`e^nuwl}1j@ogn@^26su0}d^#b||Ee zDcZ;6WGh6hVsep6lQ~YD7sw{QoMmmHDno}nE^pZR{}kAgy{cZha8U*w4Fz^6S{&cB5qOP~Z z>X3Y$#~xk#o=m;j#=1aku!HNGA(UzjTvl1VbB{v>&dA&*7KOwMkzrU@>oGao(94Q}8%f z=RnQH?H-eEpdo;F)4~$gD7w^mQuEN|D1*1;!HC=0?XS&vA0PWvSpDhn9)*OrAGU_p zez8nVP}TXmoA4Ee@uc?4jO>aKM?qxFR|k(Tun61%`!-Y~52;Q0lM0t=_~Q*>W~%(! z29u)$L%fI{ft7c^Z&@Xb%saKMZ{3WMf084Kr6hacr5!FK&gss}X3ygQqvZBm#zUC% zL==p3McvrHFmb25<(69d{o_xZ>y;HJjvtgM37(dds9XTC&1y=@4;OH^flTSY5J3Rd z#yX}_V!YiHclQcP%cY^aiBd+N*D$%wwbjhKs!5o3u@(z@QeiG-oVA6#M>g7ivK4W? zdi%*`D{VsNDDY$-H4&}w?I*i>eA#?baY?i-YiqC3>p_@UeHsAlC(-DrO6GT9IrA#5 zx{hOF{DtxWaGtKN2G%f`*1JvR4i85wAu@Rwk1B9`sX=~ru`8lu-98E>d08X#U#{x$ zpK$*qWNs?l*$Mq5bn8TuFs`9+ZGtjd4JVi~Ifx`5;#6X#HPUd`fd4L-zo@UnhvLN^ zyGEUjd$zqP7PVmR)HXzXhi(p!!I!l?y_F5u&zTpAOeyQ=5nI z=GWr1p*pKZEa|LH@H7xQC5I3t3J;24?`xkP(z9MWC^LHA=nVqd&8QIA7&{uj=E*y} zy%oVi{sU3>UwJIh%Itv&E;X>CxX6xi-XkbH#3RKatD0W<=m@XTx(CPM4Nn#-o^r~E z>IfrQ10eCe#iNpWGc!l4gh=2KLmCo#+D4U6dArkKoHSm*l z8k|#9My|GHFNt_H?mE_kHP19M+!fyRZ=;w4nC*_0kQn?mNL(xlGVRb#H;#=URX_Od zO!13OkhWO*#iF?Mh0mAM#qK)|n|1Dvd^^SqHayXSsY+`OLcxl6xaAX;GmHl;j`w+# zC-wV{iC$wrPrNIBKpygRJIa!Ok1Y>;-^$2rIGOrpP;Jo8hX)bJCm2bQy3KV+eWuoIG_CG^gmEOM zT|`v>Q0%nAZaue>IR(!8QYrf#H81&$yPTXW$^}L3xOAh|7_ZkDh4=+ET1rRvb3EW5 zP!BKXC$xIN@kwW18x1ng+R^7j2m zvY8hHKJ75jyjAZ;J6j!%Q=(tS^Ndyi_YUE$XOreiW!Yn6(lvO* z(PwWdA+4KIgf54_-LxLHb6>oka3aY$L7n4u_Kwq&EbJ_o2cXy`qs-7Fp-$La-*5Pt z8w72Gfd96sgM^lbXs2%5M{O_wP+0G*)4?@mK2{v7kGd zVL3xdXB41$(cN42y+N8A(S-|F&AT7P*XU#3iPzdU=*ZZ_4caHn!WfjH%G!y)zOt!;zYnFz%ua0Sac`pvi=Zlh$O?dR@1KkQ zVlh65GtY+54{s@bIFZ6M9L8;J!Wc*X-riY#0OUDa{C&A8w?+MY&x>bV>PcRVhBp~T zpTB==Qkvv*nqk(^tcC;*ZEWDk03X@yt5=O1JFW1;@!ChcCZAMk@Ga7TePk$ijB;F7a_qh z;QFqNJ@J&nlrROd?9FSegIYzb#K;sag2>`B_|X4GOZ@K)u$3seWlDDAp0+$&Nt8hG zP@J}z)|`Y$b)BU~x##Y2kAadsF7iHNw@lS?j{Ax!mPS3!j^B#+wJ#&J#RlbjTBX*a zGx8cYG_9;uijI$-jj{O_=md~_KI?}pyV55c*k=RB73;85&_ zBaA0o{;A{hmHoM*>o6Xs_y|CuT`DV6w^3-{?Q4nq4TUbKe7{&wU|k>;am9K%;ly;j zbc~w8cveiWvWbmEsApfXrbTvL$eXN~BTx5@jkRC*lIb%p)#pe{`bsoE4YXfbtbm=S z^K~47dEiD(r;XZ8bAyiM)SNPmTZVnlcypAUX5zzLi`|+>fVmzxOvMu_8%=6H*y~vx zNJ=CGzZ99nyGl>}V#)cyZoen`d_w$DEAVD^t%7mZ#MECbHzqid-aiClt<(4Cd(y8l zN>Ez)8wMj1p;7_`y_(DHAPW`Bg_ioz1+7{#k19~$oTzK%f|@00>r@Yo=E=z4qsN=4+D4Osr^X_iXdk5wVD8bkyOrsUJf>&E>R?nk zwJKK-$Ux$9G9ft%Wu;8b{M_5^WsDKEqSxFZlCh~y$!?UjRZe@q%PM0 z%AnXmAt5D;@G&S*3!8eP#}t}BeXl>6Wx;U|csW1{FF+5&DjnnT9&mo!kKE~BOU&-U(d}4yFP~%tpnUg!;^~QKqEZ?PCG52oL?jU0up}eA# z&~}LG1#(r~RJYN9B38a2k6s?}O*M-JFDGTL&<#n6O9naUk%@AJ+~s_- zbW3e~6>bUwDS{FBzJ9)0Ace0CJHL5f1|e3pFmR@pYbBexM;UBQFKYbM3L(~#QI7X0 zG#yi#GI~%c`rNsN33bPmf3HgaZf%P3--TTLXd-f=cr+_DP&#aEX=B8c1p3v>r3XPpL{WgOZdY6gBbN5@8#?_mB}7Y zog)R5q-&b9fNu067%`5g$})H zhP&V`=c9Z87%M>Ir8dk9*FQIU&&$2D{TbiJNt@%Lmb|JE|3WCq^cTxL;Gk!M#rhO2z&T-~86krQP1H7*rF-kmxU_?)XRw+#JAG<)Xz#uU zv5;R$+=Ily$rBN3W4cNi4Rb<+I=DYJsDMF0D2^t~EcTEk^nHrlg;YKgYretiY9IVC zjMzJ0l-HFw^6Ykm?a)+WI_pZu;jFUXulXMwCz&6Gj}3SZ4E9Xo}ibji*3i$vW88XtczT$!%Vnr`0RQ2CZRw3?`p`l zL35I>W{@^oED455UKBe~ht|sQU4HFgk=UHcP?E#HlfPlKy(W zacw1v3$!sfQjeg<6%G$Ma-jDKn8A>rH2dUJcv(oOk;@&(Uo2NR2zvb+hg!Yb( z_=&*F9dpAw|AaO64;Iw6G9xH}F*$5#p$;hFNJu1<`+a(KD@M?(GEz(Vhe>u$z5FDm zGqC%^yQnTChe%dN!0DhwI=|7hL@DhS^NX$3;t+{2?VqEGxIHy?PUZaZ&9j2>4<_3o zTrJex)i2l6ur*8``r)32-Sp!(MqY&Z!%=_k@uJ`_dPBw`FP9a0Fm?q6_ zUbW-!5Yeu)KrL`wn|$0ZK7bbZ^n2#HQNyvGy(fL9o#19KLJwY%>$wSHl;2(Xzb6d> z-^clWv8418&rtq8kFgzq7QTO)$-!Odh^%bjq`TWxz@rIb zSf`5Gcg=uJy#zg~<)=E$W-VTRWEWri51MPkEnv*WEKOuT&O9<9V~@1y5tA=R&chsd{k#V0E$WvHXnG!krTpy0T!&=BZwoY$q=NYzBc z4wsx*NdfvR^uCqY(&+sXZ=sQi0p6$~VZlliN#C1n}{1$4B$KT#~c#yeUWchk$09^CxJQjto!&_XbG ze343OM{pR46c~v_Trt1<;ued*<~j}JZf$T()1?L#`X$WIc%|l-74eF2B3t4}mj70h z*AYofw_`A4Z?SCdLvUOF;V`JbOW^)lg7=r710@y~*ta4SxRa%XQNYgr%(vZ)GRY2f zSQ4NVEj!Ho$t)|N!lDb!ppPuMO{{>(2Z_B^3EZzj*FbgG~mz55d`SS(%td6%B8)m^f$Ef(Jn; zbV`AJ>KH~G&8}AK^Pc;VeqjDwRW|d?Bqa0lh4_Q^(bC%I_D*9U^Vl~qAmIW)!N3S~ zyVB#A$ThQA=Am@yQno5mpI)Y|_Bu3ZZ%cq{je+efBpa;S55K*Y{}9)0pW}kYKzWZf z?5D#(j`P~U-+y=7jwCNHfvfXOi!-fW-f@>rZmY#58A#{_$ry;eDZ<~M;903x4J=;R z`U%JxN6DAT_Ku9F>87El4rlW=UH1VZAjMOn6UX?qH-s+jA+CKgxkq%Q;_-Zo+IGyk zCJCX|_6S+WF~FE~{3!7GU0c?vrwRrYOtU%{3rCH4hvc@KGdS}i=amNZb67XMzH&aq z8b>(_yOcRR>5C)up*wsQ;W&UuljY5B0Ev5qU8-{hmKtMacNGKTnD{< zD;%nE8-EQ9*?t8YpZCd72gn^B>fO;IkFu8AnrEXNptQ0RP^I+UJEiEQEXr%nZljMc zm?&QUt7AZY2BuK}EHn%OJuJanJ?JN#WfrPhcdeWpt80}$M@ekX6Sz_dgd>e}RO$%82+>?4oY(w2bX-8BzJ zCTF}TdC7^3LWR6K%=|ob&|Yw~qxFPXeb6j=Rl+K+&_gi2oOPBUzqYd7+8UNXZK1%) z-yAtWMv9QH(<($6kl}|CrJ~ zGap~Cqn-S-K|t0tlD%K#slf}ye+gBygvZgna_Cy5&*SAdPs>xfDNdO#tz+LhxsK!N zlv(HI5tEpB80q5511Nyf;+J)WnV*3?wMGf*9lW(q-fmM_f-{?!nJA$e=R;ZKQ2s^pZMbbjB~<{@lDJXj-C+NGoG%!6^ZTt}I;bfLqQ{wss5{8gOn#rGJ-r^mluU~WLo!?6 za?F-WKwDX7&trU;^N^?`*Itd@)Yp&(_w87qjP6V1ggubp7*rG23mirST(Y*uZDc(b z7DJy$Si}HM8_)xs8tvvnI!?JYKX<5_2bd}A$M4lwcx;8Q7-e;On{1?wH>~2iJ*=4@ zCP;RTOSD7?-L%g*np*|bm$A~AA=v-#qag~LLL-E1&%C5vj z#)gI>?h&(a=ogpPw$YFl$4Gh6h@i zT~IEoNqnQyZt5j>8#Qi`!47*2e?DKdC^@v9T+$n@^#(*D|9qs?pLnBxwJM>hhrH3ua!Lb89cyt?*hpgHdEq6IbZQAI!vHre?wA4Y*Ob_9q| zE?|GkJGcHGTK&zDx<&!T=&)NI3nSVwzlb1^kamq-cEHMx8GW;H)I9A14G_Hk z;nsPxk8FlJXu;!CTB9!Mf;Lh>B0JmL@)8)T_+scOx(@_M8~0!c{N9qZX$5g_nFwo} z`Qy-|G`L*7#WnKR!ku}nM{heGfB?r(fu-{e&V2)o^JHJ5{jpwgh3O!<(!`Un+Fc)c zS>^lyE#Omlu4VG*Ets)84uHR|P^vtp8AFiKzm9ngDC&?YlRbug{x!R9*KHZxe(%-I zqR}o#Ma&=mW^+;HVMmK(_(KEJSi8IP|MAY(PfzpIgigZ*K-mkTrqx-jyK(uRznPCk z<>REE#fn-F2Bx@$`3!CcYY!j1)uU1r=xZeR)RT9Ec^q{P+z!Xd{1vYve3g1<@nEe) zLQ2pjMA{Ec_hR#^se+NeGNHqph!VI@pa|97g!sm3i^x1 z#h;HV29D&udpR3$^#JI$7}Mw)g7^7R`3Qr91ldSY_oOp9?lD09^6t*NpxeUobph}j z4{mD9Wp}^!OdAaL#(LJx)YLw`9x1Ty=ENfLZSbR?QOc2%nn$#%913)$$P(B|2G4@U z0It|?Ort|pp>?1icsR-VPm8BnzovsMml6(ERyS3sk&s9Sc{R5muxBw6@^4c3+XNO)mr4$zGzj(e8>xi%@!lyF zgW{Ew%|9HKzi<_I#tchfXL8(|!V-kqd{L^5bU(yrjOE+iTDbi|){#Asj_y6T1o+(; zsJGBDi*uF1iHl0WQn!JstZT>*a$+oK&DWqlf32JuYPq1&eqDN%sfQ{*Df7d1Gx6Gs zhoKXbZi}=@AW~D_NTOnzg2hW>^A1kPrE;3yp|(Fg7NsB^QhGv+i-j$q;-rWBcDe0u zCK-rnw&O>Jn!u{9j-bkE;kVl>ZqP)AzKPWz&ZO>QrIn zC}l=YI=9kA5V75dw#J?H$z9;vm0)s8Aaarj&D>e_+xk9zN4^PYFHq&!oAHD%p94!~ zF(a7=AKotZIBR!x|AHKw>=Luoditfnni5@AXvv1grn?fUdn{@yFx>7-43NoNvM-3HEQ9vs=~+yy@`~UH;wTzF;RlmtY7|P0?2XrRj116 z`>sV^es5RjyV?EeEdByhm;)d!%);zOv+b20(hkb{7|Nqp)|YcC;)Bofqx%{yOeSFy z_Z|>KFv6&Sf!rSS4!Z0h-N^TOl*Pb7w-`u3%%S1%w;qSg`=pBX-sn>M*R?{0;OXNjge4#i;>z>(fHY^ER$vJ z@_yyHi2KKNH1lyfW#Ddj(ft^2*7sCG39M5tgBjUr02^sOnRBk}W;gtj57Wb30 zDGag!E(YeF$m@U%Y`KWXx)9Ay-oGQM!=9@0@YXGx&QFGVqfJm>I+Q9$bi9k+TTNOC z|7oYdXKgf^xr)Snrxnx@r$6T+_O)?$WgPrD*Pz-q`>}~~?LdgEfXV1(_8GUuPMQm{ zkHSL5`j8wccRlR%KP0w{A9eLOXzW|bYHWCQ;sqDl9p2I6^OeC}ULUj7dvfVpg1f7^ z%pK|7HvIoa&K}BS0ykJT16&-!CsI>ai0qQ~oYU3R^ut}DQ#XejQx8dp&!0Lz875Bz zyBwHC-Gi7WDfB@CN{d=7J`t_UH_>rPe`qP&jm9wpll-fdG|I-4MNoC zS>93hR78=HCP5micJcfyE5p(+Tp}aAp^VB-_UxXR_ZshJip*tA@WIHozcV7ZU3e_j1P{YKV@h(HMC(ZfcHq*WV+1ZdOgQDE1E z05O5j-;v=&O1^q)(Xrsh-kZ-a4OrgP4PoSn3?a96Y(xNz=C4?c)eOjeSC~plbbfgX zlIp`OBXH&n;H04K{MGV!7^SQ0Jf=Pr@sk4&`w5~gP6U&aSkTF=g+wu-x;FDd44xCC ztI6wlkzUcSkS}22l>aAS?Z1uE$@z}Yh|Aa$P@9|eqmN@LHL$AnC#t*o) zhPXAaQ04O)ePWJ?0IPVT0a}+*+)#~>eF`sndmIb}Jhyd*(jk7X*VtPs1}W=+6MG;m z7Q7EBRc|-tM&BPhnb?X`uJnmyipK=qu?^Lbe$`<|Yc1}$!SG#y8@&aw2sTz%kecQ# zOPYzU;6@)3hU9tf8{-N!kM|QGZNve$5jL@RJ(}6`a$#JJwAgVcCe7UbeN^d==?PFz z9Ap9ZTZ?|4WDY_UBT6zkGUEX8;E2E1n_%yl1$O2pe~t6@Vv(QZ1wjOZQKaU54CX#m zb7j_h1-BRG(D4&bYMH2#_h-ED`xMwZ*8Ph`y7#+7BaISyLVyIy>p;f0Z(RDs0N%sP zvPX*%wnf{NH?jvhY{q83Qxk36`YnJyKQ^_(??GTIvJzPuZIXWR_L*{Io2txe_~H1o zt5F2!1jKonuo=`PU2B|1z~n8Ab;b|Wo0vN(m5Yy4V@<0FaaVfpcxa>+&I1wAK1UuL z%3SU}=ygFZGzaTEqnNo(j)uIm6b-rGQzAI%p(q<3@QdZtygT3N;^c=0p4x8@X&(LR z(+XxR;u?YIMWK{OaAfr~XU>PcxgLv2%Nxm$K3DYWu|>)DpIU$ zVm|Y{+pI#b6g!=hjKfeJXyPb&>d~H$#6%XVqiIdlvV%g5sUMy)(qaNW>q~ii^y~ur zt?AD1ul$Jl9%C67_&sLCZzxdC<@2F$1Eacn;MYf@d$E<;XgG+V!r?DG(zFeVJ?C96 z;wMa8S6_6aoZxLaJ^0BapgW6q|Lnx=Ou0v`DOk|5`y_H9-?%`?e?n(B=DAEk&TA$!V6I!6p?`hS|RBGPykXu?S&({u$Q)b*3*@9j8FJGmB zaE^bQOa7;|*BM+TCa-tpbT8Pr2218zrEb;|COh2`8eWxcdR4nEHqqK z=V^(bz!04`c)0m|`5^CBS%tOJ7+O{`k?ja_dz{c!6dhTttQ>|tN1ne1& zApHlQS}on4bYui1bR)Rd)KN6ON3`yS#H3F7PS~Vlye^cg)PBEWW^WN)ll&?}&hSdA zkmN6x#ty5A3iHnrjVL(?U5u6VG-V@Gfcl_#wkq`|3|0{nv&Q7vMYRpzkl072W7m5* z&oVEi)7lC(QapzgG)G>pWvm!Gi)O^#3I3UjqzL9U!AOizhmb^Wx;QxjsF1J|!n}X6 z;BT9l%F+4TDT!1-d6tS=94H)KrX|m(Zj>ARVhMe+)X((a0IC?-!)`MR-Lw#thqn|d zu_eTY`hg}%fb@Uuz`uNzNR^&>kBd((!V59A7Ha!XcFs`_A)Xk~;#CXeh}_^t8>!AJ zUPiT&K(7%iNr}kXqKdo<7jQhICnUmXs^eb2&vuuoJk)zi;+``xUQtwBqoWUAUhPn7 zH+rVjVBgOx7g?ZsR)_O{gc<^v!uYp}&+l}ddwgs6eF-t@ z$|UCs7ZPVuJ*ehjqS3jlWjV7e?xfMB4=$47Uxom;5bgSPWIGEElc$>?V&O|raTMEN zNx77SOP_GUN#=n(G48KM8Z26NqA)#O4!REszr;=2>TZ>Q3>2?>9U_HNyCm2f51rg! zV6382r21h|uNZ84WFVZ34iC(o9}K8XeU90$=Zo;&mnuAcJ*+5CGJF=X)(3_rg_9x7 zy~X6+Yg1{BCf=3sT^~?Ofz2tZT?ZfBz@}*BM(drpO&h+aUAC5+oynn) z>wtcbt=7Ezi^bjH(753j{~9Y3@-cJBdYts?s7`e9u#3^gQW;|2XSk2@o#eBWKZ#j_ zK)!}1+LjvtZM9p|OO{-K39t45TPFpY|K(kb3~1KqZV{g)GZN(e)xG`aS{TJX8Kz;X z6s;UEd@*p-p6lL#DbG)%vEX}S?&$-0(Q0;P=h~*U>O`r0KM2S!3->X7`TXTQl8MFk zxCi*j1Z;(OZXnUYcM=;{Jxc8)#`W5bwyL65M~WgiLN--UHFKp??dNC#ug-Nf{&gXv zKrpoHE0pvM(*)SH9`OC4Wakjw)I?^>)mq#S1*7kdn4v~sw_Y-e$@?kcgBefNm3Z}1 zex|O2R6f@O8gXx9>3>75Y&)*||GZxI&sq}%(xwwdJOC>~pa>ajTSJpbd@*A7dg5!4 zk6h9&<%(q8ezpvb9HO4w0hY|eUa|om7oN?VU3!&~5@6yv8X{9$1t#QCqeF>WW-t>H z-LiW8IGKOJ=aucJVky0S$?7%u$Nc#kl+uqs-YOkPs(&qbHg)RQJW7=s=t3%QBF$1% z#_Y4YZwKA>z0=Wmb<7?v+xod>>B{Ne?qg*F@fPZeXQU*r9Dwbi3+0bZd_}gjR*gHx ztSjkq+PNj&aD>biYs}u9-eV(Z87~>~%IKl#t(?6Q2fR{sSIb^ypjN)mF@5NXrs5X- z9>>@O(=V(g51czUXR|oXPl%oW`kde)6?3>4F7uE4;s2B8|Hr1De@Iz%p$r^m@`-I~ z#`G&{F=;oWUUYgv5b^Y9o!h=qPp^UcdisN<5AMT37H$t~WNVd%F-(rw>tviGfEqDm zm3B}&j?qvFO)#~~K_Jyn8tsxqJ3Yc~-Wa>1k=n&RG_cma)DA4cGAVE=n@D;4_R5!f z4>yT1*wyO9jlu-WlaHJ~C+b9h#Wx!opE5L=lhhVsa-@(Dwu}t43euPIlp4ya_%hqR z)n^Lf-n7?Om_g_~1iSZTlP7da`dK;+h&r^rgHWQV=2_I1mTIVb?|R*Bm7QoLH2eK@ zdG>A#oy$W_cy2Hu4kipnTU6k8gIvT+WZIh4kh?2YBUAS9(CsnXH^XP+c6&<^f_&CL zK2u3HRED|v#IiE?deML{Y1RxXRIDaJrtV`25)wl%pTNBk$pS9pH5QlT#TtF@COgN! zsc@lgXD&QtkZ9VTSpKvHSI+^XiZB&>l>tEb>9JI zAqQ=%0ochfRV>wj2}Acvh@@=XnPVPI^u*NohKe$+9lI(LK0)xWe`itrC&|{%|H9Ay z&#MspVP*amWLaI=JOPi!dtDk3Wx0$$YW{YmWnvu=mc;ON)2UI2y!Xc~QXR85oeQh%+a}dgig{*^K z>Q^tvl`;GbThyD0B~!x}p zV>Aih!aLV0A)FU>j3P_pxL(Vswnrps9=oS&2`zeLne_S#^a-A1vO+!b3C&G5%WK3( zi{u>s(iTe$wGFzJ-1yZ%kkUrA%kdeoKJY}IrEtsN5dt>5K2V@z?q<9}srrW!gJ8rx zxrv_g5R<#Ot>O%6F+YxWCJ}bI+F1cbVcjYW(Fy?4xM9By>?OY@cURT{v{??%f zzAMc5o5ahg-6fWi0-H3N0*rnHLsf8WgK(W2zk-h|lI<^Un(lf^kNuQre=H69K=<($ z+qR_0r5~7)C<;JM$PKbbUR7ip4U8Faim0;7G4eq3B4lgYR^Nez!Jd^}{nqp?pYh0z z1Ou*;yrYe)dvsb&{+-rz-}Nr+6nTX(LrETjxn*E>5Rh)of|)8q4F{3WQOeF9fyI~` zqju*gR%vH~TvrcC$|H*XSco?053i&DL#K>Za)jmGe#=nOUKNR3ZiCiB!yTGwO6 zgH}7IbQHzU=yQ2c*v9g@2qA507c8!Ir&e&VYK*tp20MPoZNlDm6Z9Pd`c{9jsBd6B z1g9RwP)A^sxKkG@ef=3xMD+m$Z#wLyCtB>u!a<%oO#bIQ!Nc&?BXWzk=$padstqvcK1%?2$#@2Gv-XFI~DW%H`~ zv9o*0j|>Q_Hg9A~M;F0Yr-JTp>+RJ+S*L62-jjNC2eH;Z1#gGNVag5bXfB9MDeBz@ znIV)iY5L3%N)?ee$ETM~y*hW@f}Veoz>gh6xL7Tj z@%w8Ex~40Yc(qV}1C%LfiRSPnBc|EXS_XS{cOhN15@W9(s3XH8_*SOmjYL{pz0ke2 z;fhb%MsL-^B^#>^&*RdoMx4?o-e8vMrdJ3Xlow>RKKCu%3{uqwn1M;Zl5N{i_+#GI zr7KJ6Qvta+J?hvO<2bX^ADT`^ECYf=1CWpQ0W$#q1&rOC^8iLU-K zLv`X4>zFh;PhM`?_3N5PsfolE8^5}_U9`?0{g-oHvwHBP5aQUz{Uvp7yZ3yA31rLr zOO^WOR{IS;Ml{GPD_FZnC|39KeUf9zd+o~ z(N*fBwB{dosydHaak6SmLla1AD*XGRcYjR^(8#I^A1(H3CW-355%btz;43srz<6*^oVxdF^)W5C^%6| zuHXN&gb&|{5hmL9-lR=mX3yyl8_sN{4I};}@#g z>bSphci+#DBb8dlF8bd-%iQS{OU?;9A``VpE|Xu*w9aN#XilzfeFcUK|6;i{l`&#Z zIV#^>fhCj^(eKEaYs7)X_lY~M()IdU)Q2J^$!t04pWj^OXC$}M`O$%05|M>S zeM$?}h|B>r0IEPATB}l&?NWE78o3am?7@^Rs|l6*^x~oY*Q3Vgk3TwC-`>0=Im5qc z8(f0VoCp#@=jHe?FHZ)Qh_*RaU|aA}wi6M46Z?>BO~Hb>(>1$vg)92~I}tYYv)|fw z+GM{)L>{t^l;Xl5vE#zUQ;vjoqE8r8dK3w>raqqJmTMhK+ub{%|BlR^Bl2i%Pxh1d zsY{Yc*p5^=f{@k?!?pu_n|8JV_)h#wqnB>!J3b@bVqrHQ)cZ_2@_1r)ECNurW4cwO zKHcCQ8H2h~bZ)npq_k>r=71^9R7%FaA=%Oh9;)ic#iJ#1M`geS(5h%h(D1QF~5rDH4prtB3Bq$kz=H@y@k zt5fGS((KOUH~g5`do(BK94tWNjkOerqB7<(4-M3x%%y4);jNAm2;D>Qedp8Bj|k#L zspjsr1#?z+&TSALS6FK*W?ILOJw3oDw2S-s5v7F&!hL@$hk#AYB)Epg& zUX6`;(u8lA(4DxaXlWJV<2%kSe=Z33v_S$mS#xbU@+As}q!h0Wo9up2`>KX4wQ{lS zi#AWHr8zQrZc)*t`kjZMX>NIS!v6iLojSdSnuZzmBhJyUQJ27_yrF{r(I%(}vb=SC z2Q~ZU0-B{?t&`iWDcO419zn*qca++|g;J1|zq%= z`jj71(!1i5`Cu#O8He}n7&3&QPgy7VECP-TN2@<4Y#%brG1hh|Gb*b+TaT+4O2|Ci zyNuDW4zoL!CitO?^El$c-AkD#@!N^;8Ficl-4{>A{9@r+A=eoXR#^MdK06^6R`xQ| zj4B$Ek&`u#vj)Gp!GGF?SqdbdmVzC!(LWPE-BneFIkbqKcww{fZINE=h8qt(%H!UJ z_+ba6b%&xRdtQ+*!yvkH4yc3Gbu?`}AF;A8G0IX|GGb-d|3%$X|sTf-;{B50Ie z1XMtfD$=CJMiUW1dW(pFNRu8RBq{>Z6$BInL_k2mNUsv<3M$ei2q8gwOF+Vg6u-yc z_}*{aan5<~J@<_7-f^#gWJn-8JK6hL&suZMHRlQPZ6EiP8PlFI!4SF2N5+%28qIi^ zgccrhQ5!baSvK_|C1R8=r^JJ4lek+jZuem)r@XicUs!GJ|NMb5H8tF4MgX}kWih_j zrYdd7r-_+CEYTRu+F{lhHo4ymko!c}q+LhabTSnooBV}4i};5)0Uh07Qnv8yqzry? zvTah%SlA(Y<8)KYxNqsZGxCt+L9^16pUOrWU)9(v5B;0jc)n?PEpGW3=pWG_GwCSw z(OVUWeHZj{i#v8BJWoGKJ}sn`Fy)3`x_*)d&9lq$#x$Q!U&)BxlGCl?Q*Bv(ERbTt2G$#H>wxyez`K`=;&8m_-n;%4) zaJ~z)bP)a;$PxGbH!_&de#i#|esGC~UqR-!;r$YWILoDNE2@T7o8~{;sAt?M`}Q^F zP30*76{GKM6{7!-6ybU)tH6$BVc$d{s{%+Ut2CjymT2uQ+ zGgY8E)?W<{0W){m9~*{IfPOjqau2R1W4Vr`wEqKWwRv|%n)gpstUF&w*Z;K8ERc}X zX5j^L`Y9Zp*KAsPc`vD-2p3+!C#ACG)<_NnJ9uSft6ZNFe45c%#h(3|$A6c!ww2c} z2`U(icu+xChUN zSs$flLMGe<&y0&2@v6O;Od(RL$eeZn*Xetl?`SD|Q(t7`ZlFJ7s<-}dFU+E-Ei&Xn za;6ZANt5xuKirGilQvZcyYmm8J37<@_uMrPZiv6QtL!`!#FTp~CB6gAK2)QfcwUGp zzBJuoABO)Jm+HuAQfgWmZJ;2Y5qhjGO*;+ky&EP_fNt00>4y`=)`MY({uniHDm@x0 zoO`&P3Ho8<&GWg3xyF-MIBJXA6DwxM*+XlB^VNI2{Gohd@s;rh(h z5&jz*_u;@o^8i}R8F}1+6UQqKouywZM!xM7oBbj6dDP|n_!WM>2_N6%HHTiUxba&P zv)i&K4BH6b!@8%oI@VUQ#YqAkAL$ByC5R<{F?QxaZE`zRAfsF|gBzwep>!hQlj^#{ z`_7?SN_B%#MsG~-7TcM8o&FOuIK&2KmbpH?y0WmX)Gy{h!i>RMTeK)e9oY!Sy5^B% zP&u(zY5x-5=W{7lQ@$KM{?35U`eKN{2Aj{^YL|`Yp4>xF6ipFz3{oZ%4x564BKeV; z!nFD60-^f3>$b&kJoZITb)HN*~q} zL*i^9bF$WsNW=w`$M~fd`O8KlRdOyXE4zAD9S^OG#YD|XqF8BgU}TQ*RDxtCI&w}A z>cHRJ9tj2Jy(!4o^wtAk!?pu1n|#ErD)!?Wr(>QD&jPlr+m2ELXL;ZFdpRe@Gtel> zcJ0nY$5Zkz3@f!rAoC3fLXaX9Qlbs(ujgUPMex!LSbss#CXE~TL96^;is9FkB;ed z*N=0rK8;9HC{&mEv3>1H0$~ix1XL1Y?wDEEj&lVO!Gj$9_>LTsJyn``dfiL2Y||v1 zJ$ps*b?3XMj@{Jfqsqodyd}5uZ}29cWxT-L!t7@bBiMey#gHoI$(Cd$&iBNFvj+3y z04auU8tkV;n@F&`8)T#UlK5XbDWYSWZf;yZQQ@3 zbT4hX8l*HAEH|Q+E;iM@vVVWjVcLf7jNs0V0vA?2E8=DyT1|S(yHTNM;(}mhVbj ziQtXLT~9i)Tod`i;xhd)ymgpi+}k?sDtUd4-l+Db!J=^ZtnZJ(5dvSzrBB7hMQ?nb zn(N=X>+)}##{bp-|KFn56^xZ*)S*R}#07A!X7OS?+uQzKD0u@|rC`csAsG zwIwBoSH4;eF;Bmquk%=LSmt<64u`jj|V!CMg7)c2Pi#8Ioo2-{A zO_vm}`cl;12V3@%sUvg+-x%RY7qh(n#XVsB2Cd%j@ybSSGN!U`Wfw(dn$>n35_hcn z=7YjaftI>zrcMrTEt&b=waBTjKFZdI@4>(|vb!A0*`bj{silP? zB27Nl4s$fqf=dISTXoBnkMh7RWpd9|Uw{AO4|xLM0K2-`myolYlF6&D- z4`D^Ibh|R!c|Gy|DX;i(+9#d*m*<93m*&|efARTVxiYf0K5Nea-iNXWS|_#feP2M9 z3oF+Py+OOC81u3;p9KN2Kd> zR}S@tj8+{}aWv{>eZa+wn|kKVLh*=0A3}d_k*bEyg%TrRMDYi~4u!+lTLw&^ucik3 zdd6)HQezN$XMGigf7##nKXO(zFVlQuZlw^cMZ+bY#SFaJ2atz|XgLV1#rqSgdb8g>NHol*7Rxag~at{0^+?{|AZH3qL zwUxqzQSecP;FW{4nGK0;x7x?M8c*iKRRa|7S_{z+ddbH5r@UyA+HycR?LKKIHG%7z z>%oZ9_fnRKejTuc!zgtKL+ULTC(R`-`>v_2?O)X54XaHpFnnq2;N&=_U+g^Ip(Is* z|D|Dup7rRnf-F}%y#hu`vxdi1-#sz0{vbjO^C;x%TF^G~4vBGWsB9ck7-*pYW`u5%- zBMEq+r*P%q!?RTExoZKD@nhA@8Mki|idMXh2+I54EGj?YV@)3j{B%2#Qf%&_W)1at z@<2DqBgu8dr}{UGAarT%Dz3S|{8u^LEY7;tWa*^5qO_N>6z3f=qiJNik;U1MdUwd( z%bOID3xg;PVmN#kGM@&U#>S$IiMgVec2G~c3l?gb%D5p?=7KFuk@Jh}+_n2sI@*r* z_a85MZOR+$6|+5Zb-~i#KV+EDUy7g1psR7GHK58jD(ij%3(&7s5lQ5-Jk=Wawp+d>j>GQeWdYo0?Heq^7hhl3&l6pk-dN#dXYH26j-q*2i1)gp? zs^om1b{W%ZttW8{Ui-C*%s!R;VGMl&IoC!F3#lk`x~(TsHxM-Lqqo9)xAl=Hhq2T{ zjUKeq&)sL)i?gPaS8Dbs638#ndoTACKJ-UjYa*P`R$P(i${9Nv_T)mI#^+(IbOKdJfo$6dSwB%wwa*-QzqRc9t`!j;|VA8-#Z@z-ZGF=t8SQr9`C;SXARyn_nDrZ@;`+w#TK0A%z;b z(;3I^+{j_mar+i^UF5*meaBmB53_joJY73cUS2-qc>=fBWkcF@Fv6Em3fc>C4co0G zTr7sKxT0H&2f3pA+ zl#ft5lVg|+??A(#rRfmLiPSf?fU(KBB1wilENZSy$7{dWtT7p%Nns1N%v7rQRB)k1 zr#Rf$Msp9W^1;;LPp9}!)gzfRr2@h!ej#hyauFZ(dJw}qQU54|^#@C+Wj9iT8cA-t z#Z)0rW|24#hA0f#rR)x?4&c)9VGf+q+a#NH;~Y_(CrD#D!lW&%uq}H;qp~jT zj@F~eTistHE6sTGb4<~xHtZA_H4pgbzNmy!yPbLZ-L7zH*cY5s*6UxL#O$&+uyEM5 zEf5$^a{Q$yb&0FlqAK6n6YKiy>r8 zu@)CXX$&i(xsk8^>&(fsSPzAk=Otog>2oS)Z%D(NGTp`#@*`^mHru;^Cjw>+WGDJh zP@d}Lr-M!5ml`wRhRScmTT5q^@Br?RqMe}MwOgr4e$ZGHzv|m!T_15`q{nk({A-en za(VuVt@A^hYv1RXoIX?H$)ChhNgrLweUFG&L6-Ab%~YAta`Rr(+3>W~sMVjNV=clI z_vh0J3VziQm%6PgQ&;;u?o@RV3@r4~dr&TnM=daBe`p+&i;>l$;^~$9!?bWQz_Gl3 zr4nADofIzL_6DT|kcqRv>yfu9X$~NjFuWP@M?=rK(uL4d_|AC^!w?bHq4wO{qGxpd zTyA5|iYZWhGp1ns7wxGH0M8dzO!QJ%VQl_*B@%w-`1y4ciQ3(-iX~I-u&B-8D!^1W zFDuBjRwyr*>@(UBqPLi*Biri|T}z}&f-=8seSFj>@}@q?>?B^JYmC>;7z^y>D(uSFtL+*wNT>JwhltKFFsgT`R$cL{%6-Ko%BSorv;P8_6|5!ztC8dto!lv zI*bc^5CaC5DXuaFgIo~M6&&#}*q=j9!2^xSgNn#no^%Ad;hyWr^{tilCI(gvu}U4i zc!%t78n?_WC=qQt1{5StlpvEkGBJ_Aw98GbTdIH|ugvE<0h>ViQy(HrwJUZRHmGf< zf`a@Ucsn|924`e7yAfdzoliH7Hs3A7M?C1eIf7Q$_Evp9 zVq(3=;;ay6XL!34K*4+&D@65dpT0-!X|t-b=L`oEFODxT6=M z-+z0(Lf2~21eRKh@F5tOp*rA~E0mrWv zPvD_zHfDR9P8O`3$RpN%av~XQ|9o`B*HE%}&x2F?0XFRodbbi)cJ{m~U1)r&)s?+> zrmR%0=Chal>Aaqg38%|och0eu*}Sm*$Hnr$OXT?z|}pRPK_r5ymZu z9wb7YDHn?iWt3witw;HH{1hx?WBbtMzAC>!&({Jzg6}hfF=J;3I z@c-&-{#VgLXVHpu7joYRzZd}aHM^8AFvY*ARucll@5&^pipDgFeF*R>NYRfK%Gzal z6YLGxIp8e;cx-$3jKY0j_}LBOT+G{5X|l$HVA%3|^|k>sWwnU2qR9d0$v+#$1ebT| z+BeD!2BOb1C5XQ72VgivV`yBTS*U(;9KJdS1Y;;IS>nEx+Fky7= z&+enb+`mZy((opQ^O;JqKB~Bb^IDf*-y8@!r;&!aW5glZH1>Q9xnFB{;Aqi;^dAk8 z165>LWQ*7WeyiKWnC?JYG2pAuI&V@Gd3ivCYx}b6vDc4Tqi>v_J9_Xmx_kgW6$h_U ziq^UN5}9fYU3;~lRTQJ-imUeWoI5P!s{QzrP(UTi*)tNweDek`Br1BXSGr`7~3vIiTVrp!^YkL}ep{KyE#70+PylLhs@xs7@oK!x* zxnCKikND^*vt_^exT{0s6t+i0Y)>)kNeN9MY!42S#ICfHa-kC*E43|R6xrMSdP2jS zO3N^TTLZtUpBc|6d=M>&+L?aCGaARt@Q4Z9_=3^GkptL2#G?8Cq@guthZlCNzPx=t ztt-}N6JG2y{?V&&VoyiCNa(Djqz7Ex-?(7-nRx#ZmGaNB^F5S$uCk(I_vg=*$dN&^ z3A9Amm7y47?>t%n`UGfm&U|%oBD|wPk93-r1i#7OUO#_uEX|T@W&5kjhWlGgs~>!- zc2XOfp`C>^sFsZ9o84r9xpbTuwAkvlsgpJ~`MxPQU}C$xD(IUumY*iNdetugRllRo zguUsZx&?~Y2iX9>s>28-Cx*fa>`f=8v11<8$IU}}5?EzD z@g$c>dF|tYq8$%UYpl+68D{M86o60dueWFM`rXkSnZb?UEO6lqM=*ri+g}UW57dbE zQxEWMI7fnu5zNTh+;qYnI$o1{W-PdHvOT$@y>9bu??rHtB2kA%Kdb{;A}N)@6|I60 z!p^C3*N-$VzzB%%hu`XpX_1yyC5S>AS0}d6U&l9gvJCR*OCkiN&VCnapxwJv`? zv>$Vvsn!fm3iO0pFX<@IyO{#G@pC-`KrOUiYFcnObYqFFxKwoF1_5IugoeR(WmzEj|XV0F2;qBsHL$o zJdMTMOnX7B=O`K-uzj|WH{{y3;W*Ll`cubr)>yTm`1-qIeB$wnfKbIB8k4;455B*I z_2H5TmFz1VG$7P!&>T!NMDY!@h|&#ep!)^$J10ts5nfkwjfajt80x5GWTLkBK0p3L z%s|9sx;Fx+u2%)_l8t&{HK0)g!2n9Yc&!#&zEYhW9X8YagmXjr@`Tim!436(36&-z zKq|19S|%?23Mj6P@lebYoyL#X{E%@?a^-%0*v#rc!oBeOoh&E~njUk4AdGQaOVEP1 zBk(k1a!7_i!dv}}hkD16o?}6x&f8Cof@m)xntb=?b)sUi)TuK+(?V&TR~AW_TjKXx z4l|1M4lU)dQNG4{kYHTM=LwbVRXoALhf647rQ_NB~DL?wSY4TU;e4{cL= z1cv0*gaU~;*q0}_uUl&i=7AzPc>WP)MD!~y5_G-t@V;4o`3qz`{@j7`^2U9nXkLyZ zm)Lf{Nn&ArSFW0r^-gbBB$LC2(louK*4-tZw3B~9b3!qr&{(ZwkJ!tT%I9Mk-=En; zkb%#;7a86rTmD9$l+0VX-hOqMOUCS7o$VuWvcx8~Ql^BO023jDvEcEkoFVNmNZQXJnggv}(-+5_MY3?9fb?MW6`Y*7k z!f{gtnR6?iw9B{Q`%%gf0BqjJ{$NvlnNWdOadjB#PY5s~nyu}w66}vScjoJXFpXt^ zxUtnj?WEB5cwIk|M#nxn=bObtvD5u!)x)@)&!_gtudD2ZCB12Aoz}7GB#_Na;$eRP zVRN)VT_UPDuZHs3vqYp5UvlRJ5Kj(bP_?$Oiw_9<^J#?6T<6Wj^RluZKW~RKIn2N; z$RC4|2kt1r(Pr5kNl+nHE;)~6S!G>SN8HrbDTq7Mr8Ad3ym|D4>psvtJ^Qz9#Q*5$|A}oz;5*;}d2=w57pxJIhWOp(wI`$ic2mzjnd_$HkrZePiA}A<-@( zy^=}G+-LF=cinYMF}P^d4-5z_t=zA}l*rdw!(L!KDYFP0KO7G|IyOyrr*z8(xjdpEi zfp5(OQzhUVG_utO#+pMA98B>AXVJl##>PmKdj_L6-q>KwNvXod`d}+=M^g!%%1`&4 zjYX5?y~5(;j5?)m-rX#{Qe$lWX6>Z<`08XiC}b{w{8x*QfAw|$Yl;?fE+pfzGD^fk z(zxZhV&oFyLU@3q-GbIlUw)@6E6imt=0_oS-85rt2(}FeX^oN$O7GIlQ5xiMM^i@3 zYshvQ)L9f*>_cql+d`h zb8JRBn#4YJQEjt_-0Z90qV}IYmttY!@%D@u$RYQDtNuWf9z+J>wi3kXDvAgZjeP}O zJ3sUl8y*ovHuFy8xx@4aHoTZn=k(;&J%N{+x~35w?}}HPtAqLvNqW_=4{nx7?Afs}QXlBr=4IKDQHb+p z-r8jsy1#FjuIYgtGxm${^117$M2I8tJ`rCm+ozUqnsN^=iJ58y5+=E91eC~Vi!w^) zTY0k@$&AY&_&}iXVE$pdMQS9&yW~(?_i0$x%!gohi8E>>eldOqfVie`m?~TwVvOp5 z5QGG#i`o&p#=hU0RLM1|_VVzOJL6hkv+a91ga31Dfn{)F#@?Yk#c~=C%^TTnLKa_+ zrD#}>yiO8TA8i+o%`WH~5FQ&+((OHcPuCRvhsoEdC)zR$pLzotgo+CJPg@HX8p}IZ z`_tkiicI(Yz$_J1Bo~*YRy&+Uw*Va(+*B;+G=g(VC9u~VP%&MnyG|$bw*G`tJGMqV zk7Q7;V_xa=+v?wjozSTW*?v*Wt2Ch1y_`>O>ab&egZ!pMmN!)(^uUGfXG^$O%?%?@ zH{E06mJsACnERRQxp1Q2LmT+Uin{{tm!19eCJvGqUEV1B2|!`C5noK)xLu1GJ?OK| zFYN2$vhBhju!PMu=$P3DhNE~DF5wq2Z&G}dSjS{`Y<`@?w6db43x5bTYf|L8@T$1B zbwjQ)xL}9X)-Z3l;SX3JaBB)8bYXV~62S)2F<-jnl8sex-8q!OeeyR;3uZr@n1l1h zM0??k0Zqw|r{n#x9HZHkbF02S25@$|*RS1i*jqdIw|59NMJst^Co{TKE?ZC^zrB91 zY2gR=?CxUqDkFBr&KF;lF1SAzkWw{%%eY55TojLcYEmDbsv^9j#hu!{17U3vh(H@{ z3MWx1iYdQCpggItfX+{ruZ!xecvJ{lbA(45K&1(93Py5i}PE-X&nz^r=EKXwmB!!Eq zfph&Z={3$@v^dI+*F=5n{wBjWGl*&3U*0TrXAmhcEs&_ApcpEry4$+1>4GFYTidX{ zMvw8TWfumLj#)9C0{eNJ8qLV*_BlP$;#S5zwAtMKKS(hvb(^el2*R~8r4zMIj4Nd^ z_ewRJZf82i=L_xZ(nn*kD|=}nNI{<=Js!x+L4R?+;?ieb!?86s_*!mT4vuUgj`sp< zd0m*1sTTeLhg_hBey0yh@rJ9j6RK@KfK|JwtA>v(u)+b#<->G7NP>0}>LtnIk#VLb z?JIt+g|`I0b@~Ki5kJbla4A>n+;=tKlPn*zdOU?y*$U2r22d;6AG30driK0{ws#cg zEz05vf)3`EoNFoMWfgfTZaByU)yeJT@Tk{_8Xx6(JVV7h+ry$Y?Iu2l-@>N8A$fj3 zyj~6%9dNd~17Afov#|}L$HVC?6R2}K`ABylfcg0^_#De^y)(8aow+TgKfUG0>vK4W zn}h8Nf1?s~61&n*@%VrMV{-J9jGUX~%ab}#i<;=A$&HQ??ulC~MR@#n;lut*QKGR< zI61aGL2FYjy3jG2D@q2=i@WU?Yfcv!X^{sb;(m&0Q(iFGxU=bvt4<DvcdpTjq7;tguTZKdATC!hK>)0`sOW;AON>bgo(Y6ohx&yz3c<7gcBr0X{70c@UI{qXa7_V!?okqwm`K` zZS@a6_)&*3=@NVF(`naPX|Ze!T?7YCdW>R2=i{-XjNWgNU0@3JqmLn7B!=aAF4d)f zQR8GgCdv4mVsxSN*yYDR%6dO^naTzPyx^x0$nRsH4m`ZATUbYMgK|s8^7F901W0y` z&M!hLT6O{lc4cI@?1RqtwQ)m+q_mM_{wl@7qF&v_`1g)upMHL5+4h;Lm?HiU#-t&B1GIh&fZ(vvD zoof#2qhGXD^1mDRW|U*sYx=$=<3-v)Cn}lA3iVzfzisd*VtBlS6=u>?%g$#N46Oc) zN)a0Jb$l97rspzPj7^765mqjk0D-@Qk$IhYc2HoaGi-QMl(j4U$w#+_RHwQZ@mX0V z`yXt4eSNZ*d(7Cioo^h$zksQQ)A^O`2VEK?siBaARLzi=yic}{pS2J>yDi`V+i#g7 zs23U4Eo{twF;ys*tit4V&JgbkKfq&4anB|_xhwH~>8jmbn;TL|=kKZZN%-1aKc~<7 zv)6B6R(b(&0z&e!ZMo1%5Bd>Nni;wc!@=aNvZE_!bUDk!chsU~tOlghKTLXRZGO$H zba21klk}MFX!<8jLAGS*_#FL69;K9u2VM+j2k4enWGCDMI(%xvwNoJXyVk`g&Y!3< z>85X--Nl3Ij@-Qe@vYYPP|d&-s(ll;<%qhjOwKKGrNG#0q#0>1y(o&?!M?35qC#7I zpg0#&u!%(ht6dvv1f{kX@zAf@q>s*~?a?>qDE3u9H^LmTt8Qh6=75~XbZXMH-=@t* z43^nXoJ7#JC0RF$3u+lHOlj0>vwaZv$*`&K>iwJIxBQ_%tB*VH;Ag!OpA;xGHwW8iz`v(p5&G|{>8*30|!R4}c+ zbj||jL;+$_6c%Mn;Tybb{fg65q5Njxw*$R%Mwl_%?e0lYug8!pjT2?dXx5YQoP;y- zS4B+7t2vXtSyK=^I89PCQBeBhh5Jo`LeIFk>)BdHpmQ`$B+ryo%otsSs0(Xf?5+7( zU1ODF&iqlN2tw!ywQ-AC6XNccY!|jCMeSZ*Tp_@xxa?{>_gO!3f?NlnJzHPUjN4j1Efp?m z3Mj)^yJioGe<{1;e#yna&b1Qa)YxL19& z`)-(Uf8yDrDNU=Hx9F5CKYiEiK@43qjdT|CGhrSV^T_Qt%d{nq!jFiB&x0qRt0us3 zpmYhK32X#(4~YIB%SmQ*nYTFT4FZZ`r`iH|#GxY>GIzpItgs^`@oBl@0H?J>gKGPg z$}8~x=ahc42>PYu+yI;iu|7gM*aN(AA<`~N2qN0&GP*JoTFt+ z&1FqHGJ7`$(o!L9btrK-dQKiBhepvk{qIDUzt6YVu)Z90d-^O zBoKs^rn~7I~0^e3v@Sju3slz1NP`H7oPEw7_)pqYYp)GdI(Z%{&)W(JHgBf_y0X?QnkP zxL8cd-BG-A-^u~?#YqQIv&$PAcM+ad*%l>`L7Sv4`4v4f7XH&G4){=j8*Shj5VZY!16zQb{lA8`HZM?jMwObKLfHY? zbkfOgaZ>VrS4D~KrTz&d!UQ>cEi7Rlhri^LZT_F{TqJaQQl$zOu4x98W|mvQYA8<1@#L_x9qMO$=^sqv*!BcIo_#>pD5pbC*x@x3=HGAEJK z+&_))Tn2fnTG{$eqSd!w+DcrvYQuc9JiwrKBZ|Yv%gcwm{Xn%*o`tIZE-S0EyUzZJ zKUS*KZlO&-%0iT_Q=+79-x-j5WAlD|>BMI0+N(>l%KE(K_D2Bf`k(YE{{P|c|F^J} zZD#!FN5*OUZexU)JP>ADAbPp5WeBy4^Sjrbw!*>)Wdica*(2K){L+k!+YF0hCGWhU zvYER2IPLhmkGmx;WNtn&YPDv6uy65z=annmv#exS=DxpjwgHSQEc!8B0V?q*bwa9l z*<|UDcps--#9J7Js&aR}Xm*HobDIe;*oW;m!b}m@#;#bhGx_-g8P^X5KL|fLwYrk12c#Lh)4^6u*ar#9K zn#^yO{tJ!ZMEO7)Rs;lj5yn%@eBK0RD(=sFjJD_g{p;Z*o= zS$#m&DtJ@&gXkVt=55hY=DsxyG;wtlK1ITfAubL6Ooxjs)~&uIiTJQBf1LZ~c9#AH zWr3L*OcvIlk0Ab(`AHb2wv7&`U)i#dQ8j$bVQ8?RJdgcu^*2dRm2r{T-N|LwR*#gC zlOee}x`2c=_=xxBi$0+D4*)q1l)6Jy=Dbrq4c#(NXxgwcwtS?%4`^t#`rfmd#F2e5 zbe>fqqsvlRyURu-sWLtRjmU`KBmY8i`ln?Dep<(3XogXn!!G@aI6pp-4PoO z@bG_Mu~#R)w-;ZS>EpU~j45>J6g|;Yp9Pt*0=B+AG{}bL$L!OSf&8Y;xIA%VVkh+7 z8!Ky*ILAevZ3!w8pDJ44isvr)dFXT5e(jU=KR{F{ruB_hcbgJE^%6y6g_Xi-29-|1 zqgH-GBJFvrH*4#!F6sE`sBk}aC>XdrJ8=4wBu+*7JSe4Z7r%W(k`i3c3&{V8iC?H< za=MaruO5mg=XSB~KgGiQ_HBpSZ8p$SJzPB>oTyG3nH8L^t51BDuHn>s{8al%(bheY z2#_bVUV*#953@H9r$~& zLL%$+SQZIY9xJgxIXQ=1`Dt%QRcq12mwmGRw}nv;W=GCXoTj9MJH&Ywi)pQGbS?qK z*imp!4Jag|T*suarYt0Udhlh)27k{i?Prpd_kl^%0{Q3N24Xd@VOwu9;Pl4#Ya`=` zVcbV#@62#Ex`sUzpL9p!tw`ZME z9Q(_w>_mhldO+{s2z=@>22`eAw40E@Kp+tzj#N2YML{X2)_OfDY~}mmZN~D$9!&`Z zvE-Qh3c?lBy2#n|XE2J~=ZpCnk$exn(6-u=Mv-2YehnI!O3`7W zlTkqcBbhE~FKU;nzDE`5>e@6u68b9J_HNTyf`;pE7 zr%pjOnf`HJ+xv?VT{9uJsLhbGPi?EC+9<1Akc3d@S~RZBTivI71u|g~+Nc9G9n9!# z3!L&r52uD^MWHE%ov`sG>M1b5EWf6pV@!s3g=svH7T}Drb)j(g^7_p8-^CtO9+1-1 zoc>kCgoRl!_c3zLz$sg#gOTv{KC`#>Pt5c8ioWgGuYay@b}3c=yN(i++#5HoCx8^VAby}3 zE=|>YDne5qi|3SS+?EO4@*@6biO1Ax09kr{^f;Vfhn1#^ZG{7Mu7fSlU*8@TQ!pX= z>oPvWRNo{-`mK%$?Z4kO?9+{AH*dQOr}Ke^;{h05fR!{t*iW~pB!8_SE7B~;lei+s zTch0djm_!027KJg7bTXf#_MGq(OY|>EnXGT4|g*U6od46DpS0s8pBgVc!bx8FU!4v z>sM4oEJt5B{^OxY{Bt3r?NgT6V~G*@AhNt+Bs5>69Rs7MR^1)!pk91|Y@;FC9#;2Prc{!!;@EU)kSB$7O2)$QDzuV`tZe?S}z^Tu5B%`N)={jG9* zc<>|4^h>K98LZCBSO0nOX_mJLl36Q;i|#<<&794eLwJJBv}#k%A=W!sSLO5gEF*KB z%_JV9Y;Hv_5eRd02uOjM>tB9VP%ahZ6Vox$K4evF+-2gPWg6699`bE{4Qjyb!z`A~ zdx7=Xh=|Pt8n2(}vM#1xBqd%whvu{XQ2j47BQMwQ`=% z`mVPOdT14P;+_?M&hhqof2y~^NV@nxe~t%HsmUMK0CwDmX#oPuLp!rPJg-k5NnwV0 zDz)S`vA;47&Fy-<7mFURjHh35U8mKByEz>cK*NPw|KH?Qv!Fc1V1 zGWY0mx+4t=fBBkg-DXPFSG)h^Yc43{(`xm{*Ib|LY;oWbnE`>cf+u7>0Zb=_Qi?j) zz*vj%t{a2&MiAKspA?Vf+Md@lpgDS%vL>tW$4MIVBFqkL3U`Uo<`xF&Kw7SMh+2VL zwwz>xi_}%4y`1%DL^U()-o82P{oiL5@Q+pTI53|UQD=dtjdVMF2SM8Zr&9ad=emkI zVTk^m5gX~dsf)U0ELy(#!i`nt`@q=O&mSS%kjH$_ zGkyvGLC*WA3uN~i+F?wqVY4>Po?wqa;)v&we$2ygLH(aCg%MLmV_&~me?5)~_S(|# zeyl+(xFM)HzH9*r47ueOx;c6#mymqhLl4iPBc)imFN#JdHy7KY4BR9&%ywbxGhdb1_ ztT(>gB54ksw8~`DWxW?r^nJK{;(g=V9K%+u7H68oNzGzBMhh{=oM9E@txnk9dA&3J zK})R|ACEWV+x@L~4nH=^mpD-KJwkBQa|+Ami8)9YFN81`23M-z3tX&?dQK0foF-zF~@2`^5S7Fv!Ixq*22e{B#Kwip)<5y}q42dm~WEYihw<99dOy}^V z@tGgKU%BGUjl4Z%Va{H8jAD!n?jf$WJU!Wuat-XQ(ez+g%%ok=%y2nst)e>bVwC0{ z-+j+F@w##&{x3Ex#~rHT&6B>oT^U)udD?K-lZz=w7h3~U*o#T&>E`pb7nXnM#}3vy`8EQPIs$k;zSZP2e^WH_XF?0y3Or^jdId)}OZUqC2fT5&re>7Y?7 z%I+BNTtyLxqq~$Y0=zp>aXO(hP^`&cQ$u~!JY7oq{>7_&$L>5ajN+_qDP+6^4G=@> z4M_hja;{rzA~%MvOdNL&3=a=LTZ-~o87fIA?|<~Ns%kGwDnbGz%_o?U# z`8z)eo#GTvvaucnWV;-+n*HG|OF+f4Faw;d z*&8jVUx82_$on5rKK{Mm^Yzk=AthpNJmyBK;~;8jMF~Si7kc?CBo)2Bn}s4&IiLOH z?>8$ei%UDccDdBo;@qN4$y3X;g13q9pv>=KaWx!gK6wv%zRgs7ix+cZG)*TzKr{}Q z2b;w@3ntHL4Zo}tJ6gp)i>bD4=T&y>6tp?E!j2bIHCHHjTdqRI9AwHMF{G||#XRDK z^{C~8kwDL&ij*o_N7+){nvTnR9y=ZlCUCu}Ja)$ITBjc%ittRqW0=R6%)QJ+M-oY; zwtL@W9|grG=9KseS7}{zbC#~N6Zxz@%Bj#)zn6r%Q8n`dEf75RnQxsP`WIFBvPQsp^gtE4=PtdLQ(ZYNYb)AWL;V@tDik`Um$MpK!e~?Q3 z5A*D3ZXzYq5#qww_9eebD5Teokf8W#d&oOw@|J2S`%k}`uFr#c8_Ks{Yb5Ib;s*F$ zlK5ezW)7mQLeQCV-)0w*C&7q3{2pIX1DISJSIX~PIO%|#eqdL4EbMS6VCRd)DivvDrWYc6wlZr5WlJ5e^t>sIdnIFKm2_r0>CC{H{ELnrYb{r@ zF}-y9dowRqW~Zs^45f*tHd{T1HXlvf%v{Wq+$#RB+Ijxz9jm#i4Y3}Udk8?B06oco zz`(=bJylUcxiI?6qXzV4nq1*ac#szkC7b`l1VX0?VC%rS7nE95ZO7j%i<;$sa8wlL z4=hWD16VefVW`#!Qk2|IIHF%{#FM!K>TE3NZkt&F&D>fY;9>MH8_E1b*}zwEQ%o&j zO+UCSuOmmy%B_I9yK-+;4l3>OIz4~uaaC_gFAp1m54ZZkqDitoXGGvu2lK|~pReA2 zEfeEMX7(h=VQ+`~?vjqeRBp|DrZ7(2bJ+SW811YQ z@engir5Z^we&jX}MT-f&dcu%1A0!TYzpky1)?AspaHPWMeu}=tUR$w!M<1VCJYY^1 zkF3T;PqO#ZwO`>G$%fy&^9IWl=-t5jO>pGQU|`CZsH>+tQj?5zM(Q$a7F-%!OapbD znYfYok7d0A`!~xC7GDU+^GxV#c>S97X}E+B)bNs%*vPj7+g%v22C49fRnujs+Cx!9 zWD74uT3`MI_A)-aoOle+$=6^=m&r_eQ zChI_OCdU%ZALTQG@e>zAf5C|0h|Wus$H?rFWjcnxSvZuUR#v@7Hj7f!PRd)*%PxJ5 zQ7$-l`KFK0Zx-oED{|2asKfTWqO2vsy-X3)F&~nXw31ub9y!Yol%wDz_J)3+&3GXD zAH1!T_SpTThNRG9k?WD0qlVHS2;;X+09#R*bIM*tk)Wx^H?bY(pV+ST+-YLl(WCZE zKHjjAwU4RA9K}={t{nISmQdD~=g0_K$d5eRM`R}Fd)a5iZoWSq;b5LAmoIYJhuAl^ zULs?ar2jKd@AlgEu_4UVy+-R?AC06SLIRlNGQ*$l=nj8{G8_$#^godu@}%oEyrTF} zmgc2XDg0cQE=~LEj256<3FOO&L(E=6wMVSpDTp@B)Tsj2v%mp|oyrY;p_$Wf4(LfTN5im@n=&YofVvzQ zFg&U8_!bmDV}5u@F+P%75|c4|Ui8I95A;J>=}Flp{XYk}8^2^&ogBpKL8}opmh(Ll zHWWNZvJY(hi%G5d@4|%t9)$RBf9L4eVRnxx4#k)2m+OSuYs8gb=cl$D8#tjJR@i*| z^TOdYWX{A*zfJ4#@E{le;CCC;r;;Sv+lgzdxt6~o%>Iiv!pXN9 zBfhU5iOI_n=2T&1Sxp8qa**@;ijy;p~7qJ$ld3 z6_={O7(@dk4WqV8v#g=&M4P7M5MPDHG*H8k}i2@|U=<*uu!6jkm_xY2) zG+kzPsOWGt%{JdV<+Rnio}N2MS;MQh^DRz?ERAC~XR9^+@4AH@qJABCJ@J&Z#=mum zA?0&d%CoT9yadp3V{-m)$=3fA-mTR?JL^~D>{Np3I#0itahb;4Lk`|7|1o-sG2v%l z)99)HF7-i$?H9j2DT0-f^~ZURGcp%B0!!rN6ehbvSof70e=V&!<0U;ThyN6uikE9C zFD@~*hs5&;$i`L7BlOX##;W0FOFvBbtI%T*xlZ#QdV zB=%k`PBu@>>W2UK?$|erYHpP#w>od7{Bm+GpQs)81|?*E!r#`U|D8YouVK*sBQ)Tj zd`@;Lb0#BN)!~bYMRU#IuL(W&hQ`K*G?!?+q(z@Z)1fzSUZ!;1W04%)OE$aELUFDs zMQ$Og8taB?VrOf=mHXYeHQOKLcGEV0U#q=(<1PK4Zzu&l?$I2f5h=PgXcbK76s0`5 z-N7%XQ}0tbvOTm1H)dc=k3aV!7CjbtyWixnVPd(&dmH8I$9Hq`ZGi%${x%RWqtp9W zOc=@RJ=G;vUK7{)3xCcsR_Sr-sH#wK1N_$j3xAj1)x(9+Uw4#ZbxZLt$%uF* zbw<7unf_$_$K;B+Fyqb1!zPA~rf#m*Y%r z8BJ~2*hi+xi$62snobjwKPC~)OV2q&UZYFe+y7?y+&;C$v8w)?Wi&!AsLp@pxrABe z`_&b!WUl+U`;)*kOIWER`GA9$v*?KG`*6eN32Hb)YgN0{lUiXgCV8{eCGF`H*)r+n zNZ}o{DkJ+Bf*YAW*YW?b_uf%W_UpDNRun}<=?GD2(nRS!qS6JVOO4VY0@6ECklwq1 zz(+3uLJLTZbO8bB0zxR#2_%#dAc^O7?|t^YXMJnkvClYrpR?{6-yaVAgM{~2-{+an zeCCwam@HjDF_=4^Dm5=2b2K!z&g6-m>gjOIns0|QEuWf-_}uy4y;6Nu1W2IR46}bx zz#ut;*_Dk%)sM(nzxTmJbri? zjA1NdYE=3RI{A6q7fRU_%3@UdW)V)Psf=$c-76{UPiAG*GCg&pT1@8n4^ognQ;Je? zN`*kBoZb6xnzH{X8m{SsaZi@G(mG&aCx~$5rOL`bMi+zyEArrTkbadH6%`|q5%;+h z67S`I5#kKbPo*p5Oqs5r5~_&_PKT{9^VENBz~GMpFnN)G>AGJYh4wUVMw5(!1A43}1{M1z+_#)iGGJ z2i}MdKL06^&i^~=`Jes&|6k|~f3t!A!Hrwuw-~E?PlrQ!5k-s}p z?pV|}DDiQA%1~0ti@V*DcsKTi!J&@_ifA4;b9RUZKA5S+VB~ z?2qKjr>D1aU8>gY(ob>7Y@6k(QFT`};;d8UtOK@<{;kFT*|q*(`F&GRjQknMNUpo>=nJc$KH{d2QW$N@}ChRUc*u z_WR$Gq8x^6&Gb@7qw1^eizh?|P>Vpz$`zO!j$eU13@C3B)avvT_Z2;E`J`zt{IH+a zPc#%RPmNci6Wglp_LUE-H1f_-tR1hL9WZ;MF@9S;(*$mBVCQ7vaW#>nl&`;E;p*k{ z*A=HQn)OIT6j3|)DXzc8J^JpC z9%K%R4GD=m26v7hNMV@}F@DTPhHnDlM#_V|>F&=}4t)9liyl_bLCIL~)I$=N?&vS9 zo6*8Psv(I|6%?9tYAP@WK#eEw|9jl_cc^3LX4rE;L7|uCl&u1Fcx!|os3(AE2Fb7S zwYx`vz`#@!_{4_t&%Cj{-S2+nXNW~gQiQYwIsS=xz7mL_eE^Af;UUIULk{;~C%S)z zKM?n^2qFX>;mIxtNbW7fU0GW0NJQ-XqCg^kMy}2;YqLE|&6|O7&()L)WqRE2vlMrm zKtxNg97PlLi;1$|cJJ4n&gop35NlPNG*BL``?SMh7%pn1O5eS{|1zZWX@B+H;H_-y z66IZYxoZQREP~mYMbk4ICr&wMO^jz|pH_7`-nge1L;FY{AP_zW7-Ia4yZv*F05j(t z%*4|csYQyq0uXxmiqei=9=IN6S3~Aj9sRx~IQ-ob&3~WP|D8Yo3&LWQy_}Fa`3J$i z(k_r?Le>x@TF_l@uKUKogKw@?Zr2FYEq z04)xajYojo6ab5J4xW&K=URY;fsdi)`HJjAkRhN8YL5o~ssZHwGL)7@?LN z4(kRHX|TYm3)=mi#{otXXDhS@3p=L77qODxZjcU9$M!2|EQ$y~P*1^2ab(Kh6z$Zy zKR?V=$%t}o+CHk27<{S=05Ol;$tP%H9Z*DvkOcs`F9-)9Mdyf8Kqb={{I?GS9kW`{Z*1VU24ILl)cfb@jr%*#1NgV&UCCYqSUk+(6>9xj4jL#hrDp%m{r`4;mH*BY z`Nq%z$bk$I`~{4BfIgtq+N}Ua`5W{2Yr+3o@IPXAL(*%8iRm2I?C=B`X=<;SE<_?XtR754g4Ln-jEvQ>%vE z$-!zZ&5g5|o)vFu9`_eLGTcivHi3tkX+KT7sRHrz{C--OC{IW13Jsj=wxu2n_=u!4UA$0%Ilu|H3|=WaiLuoDSU@Tk*_(B4MyyzV z|5__6&H$dqb?A-``A!hF56D8_HG0s4L%TK=|KXd?Jw|8_jmnX&LEOR)J|n*!-?=-l@7tLP{=cFT>-E}HxB`pFcrKcxoG14pl;&Gux!k% zkWs%>h&w~x1%MexaBCgt3jxhbhM&>w8)UO1K=r83-6ZSoUf&|^&Z)<@Qy=nBI3^)y zFj3{@^X5v347~Om;E*!t*L5P8U06h9CB&83H@&~?2K8_(^LF?*Or^41KP5Sc>Bgno zk%vwDF>VW-GuMzlNm3hOD zW1Ol?akd6BssP^Ygx;$qUi)N|uF1XWt>#`dx9dBUQ<$PGc~a>X@c zuC0u-^Oyv+YE1q&Nkb4?nn*^`%dWsEC%Nu#0d{DViXpI0uoIz-x&I2yHADmJk}dz z<>H88UJHJ;R#PqsKwPKQ%*4xCtsY$sj6ap=0Hn7tY%}oDQ-GtYtNr)C%lzNc@`rzu zqTWEqKn}FXjKCrS)S7+=@S-4L0?4+3mK85D;}O8YDf)+b`G4?l|D*KueZ#UL=_Zh- zq6rW*z`4z+lE3TkLTJRI6(d)Hj<3$Bt$~g1-$)Vxf4D0KEi$lfcxR%bFQCsBB_iO` zdn-`|*Uj2=8sg*C4k5I(ae?S6gl?OOfS*Px36^-wix6O z1lcqi3>DZGSW0+p?EAr<9n=sFsMNe83DSW>3~fjRugqR|;88_pKwiPbjOrfx(t%J6 z1FQnW5pAd(AYp=H zL9OQyffT%h{EOljMe@X^sJ>EEWK9x~r7w5cCc`IE96qTz>3I20K!V^_&0KlLg1;e} zKTSy-K|N3mHUcI9Xg~SmX9x7>V`2CY0ir)maLM0Uhku+j|Lqu+ z|L*GlBZTO02l;>U`2Drq|9%twwcLLLg#LB9|Mdp@>vaEhng2t$|DWnI4<=v5A&B=8 z4BnkREVdyB&X|gusv{Ud(Ex`4U9#-jc##jMkE3Ay&Z#RPld|)W(+cREx`)5N(J0}p zr5Voi@yYk`d}W$TeovF^$DR7MQ_{p#1(%utHHv?cr~m(S1o#iv_x}+uz%b1}04Pl% zVW5sob4wgoLNgAi;`qvF&}rOqGm(U(TcdQvT9hBgNB9V$q*g+znW0AwcS4pX@)6icb$hTh7C2H_%D``=lqI^F%+ zug15%-mz#K4rdWP3c#y-&aNjmJFX`>u1C{=VBPzKR5E4U8Wt7ipE=Cvbc{KSljd#7 zx`p{gQ7>V5>1$(11;8f^#cKdmK=4&E+qAy`)?ZpKy20>X?GJNP(;qc;Qw@eU`&``K zoa#RptsM{uvDAxEj}H&0S31;?MDD74kxW(qfr`?J4@ zMs7;&VXXQ-jn%+!sob>}ydC|}gvT-?YDi`i1=@p!<<9oK?e?enK3v-zeBj$JuVy*` z&rmKIF?Y!&kJK9nB5iXCXpJT;O}J38x{Us~mEq{2&s@-5BM4&0#vTG}7M>LE4Q9{! z80g5B%6N3!%`nDyh){dO7)0%y*u1czj=6muu(sMg)`aEXy3Mc)Q1-i%m4Uta>*Bf& z5=ZPNeLoehI&#J#&)9U7NU#H!K59a@V5Wtm-E8OxC*&kjHbJ zfh#_hd*Fzi|DqS2CZX5>aEa4~zu>Du+b>{n#L}R7z%nDYbE0@qj{Mc#!9H1A@wN9q z)n@$9-rWEGeTEYrCT3UE8{66%8)|E6BZ@zL%2v4bTCUj zr4~n|PNLfOXbSUq8kgwocFKGPs#Dh@@?O#JcM-)u<3If=Q5m6|>kS!Bg|^V&STl{I z2h{DxAoW+xLURegGTKP>g39e61%xC3Qn6L|3y_XF&HPT~d*oq@^phFtgY@HWD>Hpb zF2&XclJQSRjTM+I>NR`CHUYt*VrGY<-SM3c8-46X$2F{9H@y3#dn0hzV30A}52F#ip@Z`e9CMMs{PBV?+cs`DR`z7F#5?p6HUL$Gn&pB$+$Av7*)Xzq~F z?c^Jcdql%hpPO^01<<<$n1+4*pwFE*Uux>?rH$bQK_(&Rd$66ZBY5N0FzclNhJB6k za*PMH#PfHU$_gh7quj=bad^-AXE;Ca?b7qtRm2R{0N!yPA-2n@1#@aKjS`;o6%EbK@ZUS{(Z9#gFYqW@S#ZFzAL43frQN=FV*i}ej%Yz^ z4C0TF@Uw>R**4$TyzP4V({1pzj*gb4TC%NXf1bmJm_@1vF{{e^8KeY_} zGk&Ix4j^{C2b_A!Ky~P@5?PZZ zr)tNyh%2cz1)MY$ldHb0=Sj}Ku^CqU zaNzWK)eT6rFHQh%7o`8nU8UOE5B>GZ)W-Blsj=0*T7#t zb2>0IvmFqs@z_-R?SOUHjTGaQhfay5+;ggpj){vOYYU9g=EwQp3H~=Iz{}u zoe-L#ruW&m2)W?5^V>#<*hTEun8jkpS>{yN4rTp#VYcc-BUZatK|Z?#0UnHDDe*oj z*-4NHqKV6G!O<}^)SKVOE=g}Ry|!4r8R2Y^@Jz{yOV{biZqFO&)GrFhMp+3);?1L_ zCqj5FB1aB3bS{d9>-o&fSz73d~)tw+{Bo4v+_E zT?wc`_y3}Jd+hfR6FF0;w$bFIKL36tZ#FH>(L+OR2LI`exnaUo?@D^%Re`)z;jGJ# z-PUwg>+&Ubdgr6ei8?DOOBM8fq=(pHeEKNN9{es_Jgf^%9mwbH{A5&=!OTF2emzV> zdZn&@|8nYd%czwM$ID*V<*%AbuA*Se-3ahiQONg91)OrwwhdOfla-YpKHx28B;Hrt zk@VHZ=SAOrx7!IdRu*2qI5V$qBL?SbldkCU@`^?Eyz*-bAWJ93Kws~D3>rtn7!o+V z77&bi4k)AfK>W7RGkavvmTYBeN6H#whPQeP*Hzb1JXG|eXfI~!qp9l3#jVp;qU9^+)K>Ts4@P4V&;TFw%3-f7@!s5Qs=TeOiIg0Mwa;60+nj^0KSp2BLBIwU=4>un-s(F^*M{f z5p7)9l+su^t!`;>nDO$HkrkWgBU-LzdUbx^8j{&G4`)n28a8!h<&?!7E^vXaWc9t-rlb;ScYir3WVgso7~~&w0r< zyrtV_yj56uD)&SG_l#OcS8!0njR*8RNe+!S*aBH;~8F|Hj{PJ8t!j8XSq`|w9{b(kC%?DFW@!$s(O9y2&wvYG3Z7E}L7oH;F;Rfns}$p-*nmB_-g;LGWH z-TFOTTuN!OmV62MmsQ8#-^emDeqfN9*lap@Z_e~+cp9(|X zuf?q?2b4gi0lzeH-5x!$x|)LFnn3TEx3cUXDlN@{8Eqo)JR98+YX*hESjBeF&jRWH z;{7Tt0ExWnyDnq?O0-6)owk9W;gM_1TkoN`Z4)e{8${vQcHdu*kbW&QZ!70I0=0O% zb;P|da6Ch>qD4ozYxG`M62%ZxkzUIAo1fmT0x(To8ix7wW#F*!QLHS-A}pAlnP6GI zaa27!Lnzug+0}2XQ<;}46m$-MWKlyA`$tRx+d!C;v z@a*IMCJUKbtlp7RJ-*ZMmVx;RZ;UlFi?qR!In)o8pBY|_V@K=~op)pJMtqtlm{t=DpaZFrvG z3$w}uGA=F{o}VbxLZAWVU` z$vx(*dXK~vMYO~Whs+=&STE?1QK%eB9(Ge=M-UkS>OyyqQUyxCcS+B6va zFmj6lZupmROG|w|H1B60J70g0D#rNmQ8hD*aBk#r{sqs_+f|==f+1NsmjiE`lSd%H zWdyV{OruOMAY+721S%XcHRZrUzCh2V@%#P{Wzi36C%2mdb{DZaYEa9 zX*VPySeK&BuU64MwC5PTe?M?M>G1S@e)P?lN`=oCt3qAwxoXxN=|RAO?gcZDdth@b zVD@m7*o0pcmdWV)7g2p^tx~rp^9i3)#eR~-$dM+%>|6Fxib@ zs4*w_bsC$8IwPA27T>L1ZaG;zPxz9^HX5D#rSCi-VQC8?+~auZ?r5=5aZtf9G@<{j_Wbe6S<$4QF)5*6V|p*mbJ#L=zXKl7p1=pRIZQ3a;hXJ@ zo2Dg#yI0338ZsluWBXPQ9h<#ZnkQ4LQ#O=jhxX#`CTfrFxoGYdB{nsrOg0guoGDkl zzcwL#M(~m&)G7 z!MAYlbI$Z+o4wZkAAKm;$Nt5R?b$ks!FWagWt@;%Jt-WBN=hf^`C7b1fp!mnXDspC zW)dh>CO};)`cpifHUCn6v*?h@u9*<=hGB=q{B45$GB}#$hrD?MF275HWfUGjc+KGz zJIS%#n2d0&L^gMi>jzfg8v%NXx*G)d9JMcV%2+wW@!^iU#M}XH>+5~I# z5?Wtl6j4kNf^Kd6qG$l*Sc&Mmwe7HF=S#Eg_!ynd&vW)sM5Vp6Tp5-belY+Sa;p^hC=c55; zIbvIJg*RxM8OyImDg;z?VX^F#lk|bTOEe&8{gPR8Ox+)!N1o-M_T2`=F3io;glMVU zQ2eD5oTq*@Z%_oCNN7d+2{)wc`&HR}V|~NA&z+kDL#H~+VT~d1yrc98Y}m3Gxafk} z&6$YA0-8o%8k#LZDgs;Yh6Rcnge}puqvWE$|112x(Zt|cQza9^jaabX5I+wAwCH( z!X_71jKX{~zQ;K}p%pJbwl8WH(;4*74l4}qhyL{5ZlE-1;4FPvjnR)eFzoK9e`OnB zF}^g`no-*bFl^2M+y5ivBG?8JPD?2MOr|DxP2~ltTQ`v>+Zwx^9&}obNxQ{<`hsvq zFpzR!mLS4&cBv#lT`8m*SKKpioj(J!B_O(fQOp?dHYT*RGbVqEcl`Vs%iIc`f&S>w zjlMf>UjnVEZxVDrZByRgrL&=T`GAI+u*#aOsLyESWFne(>$ef}GeEJyOnKJ28EEwtLO!8%X6JO?~e8y&^W=^=Xrg zp{W`CC?L{8aQ8EnQNIsQ*DlV~=m-uxHEe=#ai?HB^SM)IG^T0EL%@N*7I&&ZRA|AL zXI~~{E`Y9^Y^>=84fn8v93LXDSSMH}s;GEfG8Y|%5rOSWMs z`{v&gQY>{KD&?gZU9|vP#VLOw7k>MbXxb8L*3Jj}Hy-spyzg1c|Mu;x%xtVJ#tpZH zZ^BVVAlj%1&Wq-83y3S5wRRSOz~GoL@i*Jo}9#<0_NcCzUpUvo0B>AoM&=HfHRbcs$rmRcEF8V&1a z=9RLm0C^F7PK~`KmU_2mU2wXc4PM5@p}^kBdTg!YX{L;Kx5;3L>0i?S-gzD)d%au5 za;X7=H3Ek=u%C}V153q%FG-wp({yGTlipE zgN&3@S7}b%ungy!FmP4!WnuhmDp&`-)wy&1-tOE=mUXJ#;a%;wWT zg(a&VZhp_uMX*SI&{CNj;;PCr)zQ^jIs;cJ z3tD{zsn5@)sc_)wY{AE4fNJ!qlF53cW#X(c?!0#Qb-rqoOMmRAJX$|*ZhJ2Bg{-xY zAM*sV=!da-jT7*;nnh@V1w94{j9jTa%qa_W8^c#33YBLx9ZYehQtge3N_?`Fyl}b7 zthtO6ej{Kq>%AFf9M$%fuv*=_jNV|?l>A0qXxO4><2Q~MpG8#hp$S@Pj53)v@;ZjD zDLm^rp^%%Rpy=!~qb=HtF`DQX-#khfR)=z0Jnj!iT#pniK}J z_CZ;~-g>rCW_E>jgREN{n@xtR0z&kjo%RbnRz3T+E1UIJ_5}k<`pAkt1MMR##AyHK z=KQ{MZPWI_1Uu-K)yIeLa&=-&%w!e$K7K5nOiDHsPAbLR(}}iazPE8%vdx~ePU8s7 zNPg&TyTH>q_oUlgXaaK}cTWBJl!V*GmyLJy_4OD$Ip5MNSZGm91$jfsRILk{70!xb z-RzVAuXR=xM~4HtqK~5dgd9kXsj{6qt3ntft9+l zE}M1CiV$K`>o7XQ^+VdaPZ#eOaVI|frygmG-YtkOxCg4d3?klaUJk>Y!LDgz(}48P z{>xzw&1U$gH#|!-$sEp_8ilUTpTRhS!9N z`3~D+Aakg8NblPB@Xjsa49BMa=h2*bKj@|DEqOP8PN*u1^(Pv-25#Y=;9Id%?WnkH zYAj`VxyPXVa&Kn-$YENj(02NcLhtmmY@5RmiEq!bUrwQTofO;+G^nkG`3nK=5=0>+ zfM+g&==<_D5N6#4v)!V;`sJ1@BB==~_N)$hcWZBK#OEgG0DRz}Dm!y#fa3lt0IV=o zh?;1zN@HM9NI5Yoz5=5juS>8U`?H&NJ2~H7(O2oeT zL&OdeM5nI5KKs3Mun_N zVw87=A>KLkUC{x%E?dwMP}^~U(-5CB=4ob{o8sagqinpukk%$x zdOt2U@Ct4!4z*B|#^M#)#=+AmXQ35VEgn-c@W<>H5F{}&r_M`OGVxU>4F(J()l0Bl zF>pXSJgkt}mk-o0HoOPlXNxr`+lp)UAKyaHId?KW*9n}!VWg8@YZLsfOvIoHW0m0X&JZ&lAP-&*F#l^T5! zZOYqbZ97Pp*)U@-Q_|2-)>MPm?TA(GaO<*o$mAnEzVRx{=<&xg&a$#^L6KR;daH}~ z`lQUMdoaA>Npk$XfSrIg%?08etk7Gc!m=Dkk34k^Zu{uEB11z_Mb=R4O9NYp1kuc- zJAv9v37M`7w<6oI#YB4DMRF^S=kYf&t!qS8Y(QKT_XBTO7dr=bf8#{HrK==*w*RYX z`MZ&nEINvTX8YTQK%@Buk#E~A>ZgwbzlyV@WL4%`dMt;kR_s_9S)_O>ck-XTgue^4 zp$9EKjyYwuucg-&Lqxgvy>gYe%Mrf6A{Tp^nJwb5w9BxoU4JE!^g{}1Qde-g4wxbB z7FJYFgQ&51aU&a+ip!3PxvkwgBplz1GZBC%Tqpy}l_l zX$dfq3hoMhRbXe%GRirQ6jV{D8u}9T{;XJnQ9~24v1gmsbZUF+H%ftx!%*XtPCsM* z@ZS0yC|p}d{-(j$ee!V3X$MntCiMYZ`#FxFC$x!YsJd*(Q()v&WE&IF5Np_h4kfdW zdhCX0%Y{NSjSk`U4WkEeZxtcgcCgRAnVPJGs#gshE*%o@rn>=0)OO=QDpWU;U`eW+ zVW_OvSit$zeHL6@g4GeHubMc`OF13B`0BoJT#5?m*qrej;x}^lOqpn@L{25$UiiVl z&z4d1$26bK!Z=hg@i>`7T{>!`@0mqas=Mq0Z%uY>g?-EpWMD$PLWtH%w9yM7-Z>{O+fb(qfE1na}*W9RJk#`|D1Nd4OR_J{o=SJIo zoJ4rwU4Q+lIvyiYAEFDQzN|?mNOS!;-+fg+wB#FR+BE3ZDIQ>xmDI-`;T1=7GbC3- zof$Xot#2}$J&V3{8>_$u-zwuzExMSQYY+qW7`iX9Rc@>`G&KU*k&hEvj_%3!2pM{~ zpW+pk=n|F2rz*48(^tZM=oyQt&wPDsb40#;6Fa3n?&#lr@ zo3s;{-#nnYc4?{Ugx|s2SnucBr2_OyYpd9-0qJfcHB+E%~P5U*jaftLR1Hiu(jkBS(kI&exOhwkYh@5Z$o^EC!8zK2YL(4 zzr2&McYFPz8D)p_v_uCOz>IACAn)PSQ{l28XM4Hm1c&V;bq~5x*;vkd;jLDZd}T_M zE*S^?yt*Bu#RFd|2K;=?r-*4ktfQA+kI}919TxmpV~Ml&X_xFVHXh_2g|;5aYt;bP z%_Hw?W18enXLf(v%+G;-D*g;)L6<+6qKhE4K72u1_Z|+P3?4JzHxV>P1JQvE76oLF z(^Jn0#xFA!uvv#e_V8;7E1&W4r8;nipVp?1Y6 zUNLY@U}AXHJ~l3U)gAcbrw0^$Wcp?e+#dMzjbX^wc-~DyhqseQLPNRkqwMz1vnyd{ zJ$Gk^JeTFFyM5*}PtOs-VfaV&1dfs?Kz*JX8Ij1-`bpbA*4BPfTR`otw954Pd*`Xk znHVT%O3Gr{GqhFxc?*ALRkpIF{dn_gf4e6LNi`a`%VDT&i{S_~Dnja*4h?$AtB<1= zVo@|i{l$@DLS`?BvBhc86r%t}E9+mIK^DeV%6hz%@B^?^$}1yW_m6lJW|f6i~2^uya^W7 zo-^GvyJ%K}hO*xb>R;1L5R;-`Z3IC-Bob%NZ=<4hA#SxaZOe1@TFWTv_-wOf)WY0OuQ)HXz~9t9%WPTID|9xQ!b zW(#>CYkpU^s3PBQZ-btvCE2|u$+M>Z3wAdQjnx`HLlh%bwmiY*Yb2TcmWJ<&AGn22 z)S?|*@bO0pzxbxad2Jq0D2o>1zt$vjrFNwV=DN~>}@X)tjW%b>qB^t) z+;Q&$Y5lE$=&2Fg$eg87GcM?(D45A}#V)PNieHUGfL+?V&Y|&>3>a)}B8C*=+}%9JL}~3-!hq zGjV>4s7~ilPkhH#&%6ZAiq`JwnM_edUKbJ2@hXG2ru%Ncj1eXOtnMFt^iKbKf zsSTS@C;q|4WVmm5-lM3G9+KKgKPD&W+REE%a5RDX^gkspC_;6#_(6*qi4&MOaDp+s-rq))$eWHu~xZ>W0OoXb*pvoh;xhE7y% zU6O|Xm>nhk8m!iNd+-nPKsNJ~D1v5g^C*>kd+I*&G<2LRR#|mNd{^&Y_<75hpKoV3 za<|@y1SnaeGqd_D`H&=AIRVeRDO4YfT^;D9Ow*JH0!>cLX2-E;c3^X~fe5E~b3RI|@ps@1MIxZp!+c3J_pRAk3WwSR`XZ##i&( zSnAff`W)x;)fFEH+-`QQ>OV}gpzmWfd*kx)0HKywo63;1+G5xWJ)U)L>Mg#1Z?JIt z7~Xd(?V8Wpyf>QYlFWEyoMf=Q9R!f>htL043W4zh1wnh*^phu~V#hiuA>=TVx1lli z$LL_;mO0ZzrslXn%lKts{dg}5*^^>MV zt+{BXm)R^T zYP5ZO`dnM}<3I%PDJ?-_!M2@-$zKjj%;-ji$=bTXTwGWOSye6WFVV`rp=F{9e|!Gg zt!P){Cd69+cXo$OuPp>dEwC}H=`@WT$k^5eL8obV%#5ZZY@X`#-1ohQF+e8e(M8MX=;g=?-m)a!l)2-gq{yV5QAht_DN+i`29TS7Opvl& zh@&=#GR|nkbWL5pUo7Bi%8)WpCCGAj;b!l^*d&}h2&;jG!mQC0TY;=2JQHFX5?w6F zNrY!pZMTeQ+8G0to-ZT*Ew|68@FXQd$AHVz*JVW;a;0g{=@-Rn=gpi|$q2&~hf2+s zBts@3JPCVD{C%rWUUgc zyO}Dv{vcP+Z>?P`h|EsPCNpoe(PEJWlg8{Rt4tiJtLhO~R5=DX>JmjnL{xTqd6-{Q zT%_16DJ>4kzwFk*D^e^D+}$F1RRrSGGDXqtc?@@Q!BKLeBd=3T6jW7TtOF%gu z)H%CLMHoI@#*mpEca??}DDHWY z!#>f5AJuhT7aqMXUP?t3#DQbn4qL47!WWGMJ}^N;Gr&=zS#k&Pkg?TB84a81+V{;$ zG0~Z>51#O{-b|`?*-UkA!z|b@4%2S?O*nXJ}M9Hd=~Hs-DRy!H|#T`&CN zRaNKYdhVjV1ou)ahDb7)yY>O+>BjtO?bfs8s;$(_w(6LQfq);c8-2{Cgqopt-aPF& z1fJfoHah=jW3pzYgykhvmzkTwzGfUxw5%3)K70IV&&V@uPgzux%6GW))dy!NXfrTG zo>P%^d1_KV(Fpf=a@Q~kf{O{M%<^*25|`yJkCc#NA~r;NmBG;HKXCE5PSA z@kcq=kj|JGF@cqsyV3df zV_dOPEnigmL^z$AHGNp!cKAo_;3-V%nd$y&!-LS`CGvg_b&d^_&*G+b*%J0-47zQu zX$+ekMCk(HiG$Ynv<@&bU@Gj8+I+vOjSa^WVr4Iy1?y~zs2bmXvg}))Y4m1RS)a>i z#?Z|+ji3r-ha*^_4x#955FzAR<1*s9_tX={#{srx6WyeF9u>&1(;{@%+9J+v{Z4fqL!=Nc=x&rd?0_|dbwXyIENX3AE?+^=`>SDhyPat^GoA;VS0_Dc7^xI})?%e$ zFCfWCq1t(wUiA@G2`A=6roD-$%lSrNCxi-cD-HEUhsjEmo7+i0HEgOx*c^pv)r#|R zrq0mrsk&CIq!__GAHAG|55)?*cHNWwRB0q5imZ*PusNK7o5^ok?mi@f-sUDe@6zoW zEtUQdp)eU;=6bKp#WUIZQlGI#^QlaqSpU!RfHS6X(QPt-dNL4k4^t7qh7&*`9?Z)6 zE^j6v!(M*o<)b~~X2?y4_&`rnd>hl_ zj_xKE+$#V}0ThpZz!{{@z^dQ#O|5DX?1XrazKIkf@Jh7njgbWb1Y=-{KkHS?LmuK? zd=jkVZiHQ^ey79<2O3QAgK5rS%Wmw4JaCOl-5Rb&=T=$nEOc#=ePlUom!4d4Y zCS=`@Dj2GlsUe+PXJY$iEUfEm-ha^es*tO1M3cJ6RDZ=VVheV?O**g#R2SAUC(#SK zQMVfvc$JV2hjqxw|Dw1$v)*8=F|gd92)@(yQuTbQfH*wnkCaydKZE3z*1hejBH_l86?MvEjZ1ztBufRO_V(J8z-iuA62)wQwyC zm$pb5fgAuuvOoA1c-#9i>$#F??`*;Qa@^#3VB?x-Inq8ne&r=CeKyeGhmSt}VZmn8 z^Qe>zjlw*qNt3ztF;C5@$w|*1)MfqLx1qwVzAjlRr9DZm_{3mO9fRP|CM2Vnc*?l3 z19?#~(8pT~KZf(dI6L|*UjfZHFFJ1n(y1D<^)o`D&Vuf(PxMkRtt2RR@6N0kgc^SC zDAKhVr8Iz9kP=~zuc&1aJu)#RmJ zFF*eNyq!^6DufSdsRpvI%T9HK43Sth3bN|f{frtyrRpJJDGsB-?kh~=k@$!)H-C6~ z|HoJS>T^!Xn?}fh;j)c}n-%3WoMEqBN^K<9DY@HjlG2emGYI?;DWeX;WK7h=qC4Nb z{$O9;#*Uo0-=clHzwYRD=KwP9t42vx*9%_P{_kdUN84Mtu_Zhk%)h8s$omw$NHFG+T5G75*lZ143LWVCor%;1f8CbrQ{ z7!Q62+jN2tgksb0wHY&?n59|+f2okw&!0&trKcnQ2twdUccl@RAWg3PW#Stw3PX?G z9PZAho&eX&f=gtp+dc61j>Mjr83|S{eV0&!TO~u1Un*RBqP3n+q;qt*xit?+$3Y5olfcVpq&th9d4&A{3D+oXcF2i_gMohVQnD|vYS z238&}ANKmBPC5XOSW;d7&Y6|ll3*b7(v`!i{dN6z!utW5bn_s%vAE;3BYeuyeZ*ld zZNWDk73$wemJsO2(PrHeEgTdtL)kR`;J92evYMhnPfC&MEK5nwI30~Vwj^KPJ$(ty zVe*G94~LTZa6<1}yvn`lW`DdO)ONZmO$Oen`QTrDq(sL6#&>gS6;qUMz zLyXX=vp2hjzC5RW;JI%^DY%)Dc-cHl-8e#4{q9%Q=&q+Oo#(abqW@ca=M~me+U;?4 z#z7bmR1ifWN-qLR6_Oze1`G}$p-D?rL?BFv)PRIUMMU~YRa%r@gF^^4A<~CF^d>C` zQY8=!!4N$AocnWgp7ULN&+}dE+xOz#Yrkvl|NC3(uPwU4>w2`3LG-@SY;hG+Gn!f% zSc%!Jb(!w(Ot)>t2MIBo8j-1I@AH#WmcKkIGeZ=q6c48?^``yx**2E2YhTo}H!2~_ z4QmETF}$!-P|h1}*E~o#q}G&G^b#-6{OKq+XA54-pbg1Qi8!Q<)xlD;%C?gHCrloj z3W!n1?78=dU~IF-s_oJTNC7PL@WU+yy+j}Clqq85;9He}-;r@IuF3FIk+ ze;CZ*;o(AVlOsRw5PERy1Hb4XoMp;Pt0?3)I+GBJt(#T)QRG{yOFhwu}V-@F2Oo2a(r&JS>~X{cOz z@W;<)UC$>mlk!3ly{g3laNYfSOu?4VUd_}jr`Ba9Yy@x6Tk4L!pL#3onu(;0{idR2 zORiO>g~XCQ8`*6<@Yac~Qe4xtOEzDCb`pj0kBQ=yk>608W-M}repIO##Zqh%E7$o} zYU?Dn2jP?1s*6ms^*atURX$6bc$8U&aD7QgHr>5s7*Yt-IlNL53 z&O*2o_XqDPq5kKslekJ!&7@fA^hDsC@CG;+C?zmkkQx3NDF?V2o}^?Ro{D}DlEFmj zWd>E|Ff=E2vtLCjSA%AEZI~A%)lrD^_ znsXDh<8NQj7qYh8`;|c65~wTOx$KHU=zN1eN%-P;C`Vj zywv|N{b7iF5 zIwDpDyMXod4pppvX=ra>F!!`G^nr+Fd>(mJNB9o6a(f5A^^h+cZ`|iS-ARfH%`J+l z5EL>Mvq*h39Gs%oZUIS;XVzcKLKL5hhy@ktkSwWlfpVnWg#nUe-n+yE+-eHLrV6vMOUL66&L1O z5)u}Z*9Rder5-n*oV5*-{!((x#ipWeJ_8i+@ofq?kW_{l(LG{Y{+^1r7W0N*e4iUn?CXS};JeK_FDZ)S!ZwS&)q?W(gE zsmgp)lIMu7t=W?ve?=uq-JJ-*wT_vFa~OE$g8NGafts1##y3AzpM2W;cRWXI0|yjT zG2d~m@F1-?bV) zZ>1`^bb0o_zytCB+f(u1cfHCK;(R$u6b~pbU@nl*L{<%u7inB;CQs|Mn|JWwTct*7 zG)1W$dAH>FEl?{EOrqg#Fb`y$4+pus#0Qvy-Ux}K-*x@ zm1joBO81~Zd5PKLs_X%fA9`-0wFCGiwk_huc8sTI+^8E@bTjr*#|lrYoYqaZ{r$Q2 zmmUwhcv6_Wz$eiGe$}_FQ(h>Rwe?dCERNX+UYKbwOcYGiJkWQ;z8jkA(8E_fcR2Oy z%fF|r+6_M);UCGZ^LSRKzvNIzt4fF&TdrxO2kuUFIn8Z;Kwk8ie9~&u!xBa0Ioo55 zXgkR%64L%0zZk!&$4OBQLl}4&qw6=@R-p8tF{%eXR@idFS>$?twqT;}x|>T|x!$G1 zYBDdW-E#e_PNenK=kJa<8M`M54wG1jt+S>qWe%C2O;Gh2+v}Mp39m^PR~4_RPScXZ3?Gct$gfMYuG@E-=Np6D?UHLJ<8s$CQ^ z7F}<;xC8Mh3wbiOJjCOvU}J2fV6X2fSd=>jU!6E%Dli9G%K`DTm-vN*V`vg(D>7(c zV+9ggY`H8;o;v_w5dQ|>|xDLub-!D zh2!XHW&S&25~?0&6`!@-Uo*QJgs@;-_LPn;*j$vpJ=HHXrA>1$`j`li>|TJmCY&cu zY`*o5+sD+<~ z+jy%t(}B@0L=VzA{87MjTZY&|0m{k?T$k24afp_mQDI3L_agc>^R{{2oAv;a?diXV zc=llEbHT=N4Aw3iUc8Yuw!e5N1pOhPvdu;P5V$u<+$}`28C-c71$o#@I(f;} zH?y;$MP#?@*0Jz(*kw(@iJ{hE1OqSw-;|R5jwR3&wLl#pr-VoJ)TTxUJ%ajX)fqDVZv&R~71KU5-u(6E zwaFO}xTIUQ9nRi*c?Km&ybp?*I|-5D-8_K)1Izh~3GTzTATN*7~*70l1)jT&)Y^lHqfAOd;6#Pfp!_wwkPS$a{^NW8eS2kQK(7x) zn-pw%hV3|yJJ7cZNS(xA8B6+%)6fkP0H&@0ysOy%eLs6_wGh6JnmSqI5rj5Wb!Q~eBXP=hQ5g{U zL9S?H*=^6*Lq*p$BhkjybRBY~Kbg=yTdB4aD+U(jO*dz+f~!sp3h_V^wJ`8;?y0ljvqL37 zh=b4n5Qvr)9KJZHg$Mh+pMi&lT^5(?_~*^y)k7{uot~Vwk?G3sf=RGW6$umikq5Sx z)F4eC{iy~V24z+ow#!!c8w1I1+p%dnWmZ&AHx)a(C7~X(lx(Bu=8o=9CA^Xn4O?y$ zCy)zeVnqmDsOrQvEh(?Prt@RQ8br+cpx0h>)w(4`e|TRA00Bha1b6>=R>U*w*#?94 zyV(8}u}C84XKTLb`m9%l%6zI}3vN2+&^JF%>40ht;Me#AUOala4!x_6unAIY$@OkD z(_L+_d2Xpc6pLi1?v8>#9dxp`Hn3Qp^Wh+fYU-VSa)l@k3^3uz*tR@b>Fz3;HB$~k zth0~Lyf$?Q-*Oa45#WdNjuOUeLDgSc8~Ix8XEfdciU(xTVjKBd?u8dw>Y}UM4}k&0 z;3Tm6FL&-9d$iZB<~{#gwxjc@(QeDthGc*EI?ZM9dzsVfTw4~NO_OhVu;P6Tj=+xx tW~sJh4dI)1)pi1|dG~;I=jgq_q^T89Ho(GN|IhQEJ^0^Z0Q6(>zW|$kMa}>K literal 0 HcmV?d00001 diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/TID-MGMTGW.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/TID-MGMTGW.yaml new file mode 100644 index 0000000..406ed71 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/TID-MGMTGW.yaml @@ -0,0 +1,61 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- + vnf: + VNFC: + - + bridge-ifaces: + - vpci: "0000:00:10.0" + bandwidth: "1 Gbps" + name: "mgmt0" + - vpci: "0000:00:11.0" + bandwidth: "1 Gbps" + name: "pub0" + numas: + - interfaces: [] + paired-threads-id: + - - 0 + - 1 + paired-threads: 1 + memory: 4 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/tid/TID-MGMTGW-VM.qcow2" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: + - "64b" + - "iommu" + - "lps" + - "tlbps" + - "hwsv" + - "dioc" + - "ht" + name: "VM" + name: "TID-MGMTGW" + external-connections: + - local_iface_name: "mgmt0" + VNFC: "VM" + type: "mgmt" + name: "mgmt0" + description: "Management interface" + - local_iface_name: "pub0" + VNFC: "VM" + type: "bridge" + name: "pub0" + description: "Interface to the Reference Lab" + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-gen.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-gen.yaml new file mode 100644 index 0000000..604513a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-gen.yaml @@ -0,0 +1,54 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-gen" +description: "mwc16 generator/sinc for testing a corporate network" +topology: + nodes: + tidgen4pLarge1: + type: VNF + VNF model: tidgen4pLarge + tidgen4pLarge2: + type: VNF + VNF model: tidgen4pLarge + +#external datacenter networks must be declared here + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mwc16mgmt: + type: external_network + model: mwc16mgmt + connections: + connection 2: + type: link + nodes: + - mwc16data1: null + - tidgen4pLarge1: xe0 + connection 3: + type: link + nodes: + - mwc16data2: null + - tidgen4pLarge2: xe0 + connection 4: + type: link + nodes: + - mwc16mgmt: null + - tidgen4pLarge1: eth0 + - tidgen4pLarge2: eth0 + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-pe.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-pe.yaml new file mode 100644 index 0000000..ca53092 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-pe.yaml @@ -0,0 +1,189 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-pe" +description: "mwc16 3 PEs, plus a gateway VNF for access control " +topology: + nodes: + PE1: + graph: + x: 298 + y: 149 + ifaces: + left: + - - xe2 + - d + - - xe3 + - d + right: + - - xe0 + - d + - - xe1 + - d + bottom: + - - eth0 + - v + type: VNF + VNF model: 6WindTR1.1.2 + PE2: + graph: #all graph sections are optional, for web displaying + x: 745 + y: 148 + ifaces: + left: + - - xe0 + - d + - - xe1 + - d + right: + - - xe2 + - d + - - xe3 + - d + bottom: + - - eth0 + - v + type: VNF + VNF model: 6WindTR1.1.2 + PE3: + graph: + x: 536 + y: 320 + ifaces: + left: + - - xe0 + - d + - - xe2 + - d + right: + - - xe1 + - d + - - xe3 + - d + bottom: + - - eth0 + - v + type: VNF + VNF model: 6WindTR1.1.2 + "TID-MGMTGW": #this is gateway VM + graph: + x: 465 + y: 591 + ifaces: + left: + - - pub0 + - v + right: + - - mgmt0 + - m + type: VNF + VNF model: "TID-MGMTGW" + + #external datacenter networks that this scenario use must be declared here + "macvtap:em2": + graph: + x: 169 + y: 589 + ifaces: + right: + - - "0" + - v + type: external_network + model: "macvtap:em2" + MAN: + graph: + x: 872 + y: 324 + ifaces: + left: + - - "0" + - d + type: external_network + model: MAN + mwc16data1: + graph: + x: 51 + y: 149 + ifaces: + right: + - - "0" + - d + type: external_network + model: mwc16data1 + mwc16data2: + graph: + x: 989 + y: 149 + ifaces: + left: + - - "0" + - d + type: external_network + model: mwc16data2 + mwc16mgmt: + graph: + x: 751 + y: 567 + ifaces: + left: + - - "0" + - v + type: external_network + model: mwc16mgmt + connections: + connection 0: + type: link + nodes: + - "macvtap:em2": null #connect external network "macvtap:em2" (null because it does not have interfaces) + - "TID-MGMTGW": pub0 #connect interface "pub0" from VNF "TID-MGMTGW" + connection 1: + type: link + nodes: + - MAN: null + - PE3: xe3 + connection 2: + type: link + nodes: + - mwc16data1: null + - PE1: xe2 + connection 3: + type: link + nodes: + - mwc16data2: null + - PE2: xe2 + connection 4: + type: link + nodes: + - mwc16mgmt: null + - "TID-MGMTGW": mgmt0 + - PE1: eth0 + - PE2: eth0 + - PE3: eth0 + connection 8: + type: link + nodes: + - PE2: xe1 + - PE3: xe1 + connection 9: + type: link + nodes: + - PE1: xe1 + - PE3: xe0 + connection 10: + type: link + nodes: + - PE1: xe0 + - PE2: xe0 + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openmano-openvim.txt b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openmano-openvim.txt new file mode 100644 index 0000000..93874ad --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openmano-openvim.txt @@ -0,0 +1,38 @@ + +#1 Create an openmano tenant (MANUAL) +openmano tenant-create SP --description="Tenant for service provider" + +openmano tenant-list + # 5b774582-b5e6-11e5-8b84-5254006be016 SP + +#2 Create openvim datacenter and attach to this tenant (MANUAL) +openmano datacenter-create openvim1 http://localhost:9080/openvim + +openmano datacenter-list --all + # 03edb122-b544-11e5-8b84-5254006be016 OSDC + +#3 Attach the datacenter to the tenant (MANUAL) +export OPENMANO_TENANT=5b774582-b5e6-11e5-8b84-5254006be016 #USE YOUR UUID +openmano datacenter-attach openvim1 --vim-tenant-id 21b586fa-b5e2-11e5-a97e-5254006be016 #USE YOUR UUID "openvim tenant-list" + + +#4 Update/Get the datacenter external networks +openmano datacenter-net-update -f openvim1 + +#5 Create VNFs (AUTO) +openmano vnf-create tidgen4pLarge.yaml +openmano vnf-create 6WindTR1.1.2.yaml +openmano vnf-create TID-MGMTGW.yaml + +#6 Create PEs (SP) scenario (monosite) (AUTO) +openmano scenario-create mwc16-pe.yaml + +#7 Desploy PEs (SP) scenario (AUTO) +openmano scenario-deploy mwc16-pe mwc16-pe + +#8 Create gen/sync traffic generators scenario (AUTO) +openmano scenario-create mwc16-gen.yaml + +#9 Deploy gen/sync traffic generators scenario (AUTO) +openmano scenario-deploy mwc16-gen mwc16-gen + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openvim.txt b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openvim.txt new file mode 100644 index 0000000..32b877f --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openvim.txt @@ -0,0 +1,42 @@ +#1 add compute host to openvim (MANUAL) +#descriptors must be created with the ./script/host-add.sh +openvim host-add nfv102.yaml +openvim host-add nfv103.yaml +openvim host-add nfv104.yaml +openvim host-add nfv105.yaml + +openvim host-list + # 60b31d5a-b5e1-11e5-8492-5254006be016 nfv102 + # c3c1d9be-b5e0-11e5-8492-5254006be016 nfv104 + # cf0b5d22-b5e0-11e5-8492-5254006be016 nfv103 + # f6ce6b0c-b5df-11e5-8492-5254006be016 nfv105 + +#2 create external networks +openvim net-create openmano/openvim/test/networks/net-example0.yaml + # 0bcdd112-b5e2-11e5-a97e-5254006be016 default Created +openvim net-create openmano/openvim/test/networks/net-example1.yaml + # 0f019648-b5e2-11e5-a97e-5254006be016 macvtap:em1 Created +openvim net-create '{network: {name: "macvtap:em2", type: bridge_man, shared: true, "provider:physical":"macvtap:em2"}}' + #USE a appropiate value at provider:physical depending on your environment + #in our case is a physical compute node interface that can be use for accessing + #this field can be omitted, so that openvim will create the net using one of the preprovisioned compute node bridge interfaces + #also a specific bridge can be forced: e.g.: "provider:physical: bridge:virbrMan1" + + # 1f4e7d6c-b5ed-11e5-a97e-5254006be016 macvtap:em2 Created +openvim net-create '{network: {name: "mwc16data1", type: data, shared: true, "provider:physical":openflow:port1/5"}}' + #USE a appropiate value at provider:physical depending on your environment + #in our case this is a valid openflow port at the openflow switch port + #this field can be omitted, so that openvim will create the net but not attached to a concrete switch phyciscal port ... + #... but it will connect all the VNF of all scenarios attached to this network + #also it can be a fake name if "host only" mode is used for openvim + #the reason for forcing a concrete switch port is to make easier the connection of other scenarios deployed with a DIFFERENT VIM + +openvim net-create '{network: {name: "mwc16data2", type: data, shared: true, "provider:physical":openflow:port1/6"}}' +openvim net-create '{network: {name: "MAN", type: data, shared: true, "provider:physical":openflow:port1/7"}}' + + +#3 create a tenant +openvim tenant-create --name admin --description admin + # 21b586fa-b5e2-11e5-a97e-5254006be016 admin Created + +export OPENVIM_TENANT=21b586fa-b5e2-11e5-a97e-5254006be016 #USE YOUR UUID diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/tidgen4pLarge.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/tidgen4pLarge.yaml new file mode 100644 index 0000000..111b5dc --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/tidgen4pLarge.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgen4pLarge + description: tidgen 4x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgen4pLarge-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: tidgen4pLarge-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: tidgen4pLarge-VM # name of Virtual Machine + description: tidgen with 4x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenLarge.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/mwc16-gen.xml b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/mwc16-gen.xml new file mode 100644 index 0000000..00f6d20 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/mwc16-gen.xml @@ -0,0 +1,84 @@ + + + + + + 11af9f00-baf8-11e5-99ee-001b21b98a9d + mwc16-gen + mwc16-gen + mwc16 generator/sinc for testing a corporate network + + 11be9258-baf8-11e5-99ee-001b21b98a9d + connection 4 + connection 4 + ELAN + + 1 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + eth0 + + + 2 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + eth0 + + + mwc16mgmt + VLAN + + + + 11beec62-baf8-11e5-99ee-001b21b98a9d + connection 3 + connection 3 + ELAN + + 2 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + xe0 + + + mwc16data2 + VLAN + + + + 11bf0634-baf8-11e5-99ee-001b21b98a9d + connection 2 + connection 2 + ELAN + + 1 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + xe0 + + + mwc16data1 + VLAN + + + + 1 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + + + 2 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/tidgen4pLarge.xml b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/tidgen4pLarge.xml new file mode 100644 index 0000000..d02d58a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/tidgen4pLarge.xml @@ -0,0 +1,139 @@ + + + + + + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + tidgen4pLarge + tidgen 4x10Gbps 28GB 11cores + + 11aaab30-baf8-11e5-99ee-001b21b98a9d + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 11aaab30-baf8-11e5-99ee-001b21b98a9d + tidgen4pLarge-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/tid/tidgenLarge.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/checksums.txt b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/checksums.txt new file mode 100644 index 0000000..e89b164 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/checksums.txt @@ -0,0 +1,18 @@ +3767a8dfb15ce1f3ee9633f0e8c0a36f88adc0987b0f87dc8b04263770a89eed gen_pkgs.sh +566bc06c6ec1846d1bc003e59cd4a5e57e2c3d984c58987e5f78a710cb379616 mwc16-gen_test.py +c8e178ab93b4661721b441a3ae41e54f06ca34e498d8accf4acda80d69f97b73 openmano_scenarios/mwc16-gen.yaml +4ca3804ef29123f905702d989c671eaf619540a9534197e6196c07789b3e0718 openmano_scenarios/IMS-allin1-corpA.yaml +dd17551cd01c683014908724796df89af99dab25ef9c2930c1b24625ed78b4d0 openmano_scenarios/mwc16-pe.yaml +c20765d8cefb94d550267532677fd9e6aab64078d8d9c154ee0dcba1e2dcf175 openmano_scenarios/IMS-allin1-corpB.yaml +65d77b657ec52ed9e435fc87d12c2751526a37a6393fbe8a015f2fa0b1af310c openmano_vnfs/mwc16-gen2.yaml +ce37404f05e46ac8e24daf325f621402adef1322cfc287c1009f94fb86e1d676 openmano_vnfs/6WindTR1.1.2.yaml +01d994ed8d36ab844098f9dc3597a124bacc646ef596ed9c83faa4757eab30b9 openmano_vnfs/mwc16-gen1.yaml +55a6aae2549fffbe6ddf0cc187b4f38be14ed14f4e06be2fd63a9697124a779d openmano_vnfs/IMS-ALLin1.yaml +b24bfc8e468ca7b0665de98788b751c59416a5608de87ad28bf9f9b3467bfbdd rift_scenarios/IMS-corpB.xml +c065322e40cf7e4413e0ecebd70eaf2512ac80dac0bf31d7e986706801970d7b rift_scenarios/mwc16-gen.xml +a5c57ef25bb366aad3f548217d4b1e2d4bc60591168cf6173ee1853544c05651 rift_scenarios/IMS-corpA.xml +a3565ca6040654b72fb91acf0281f92dfda704c6dad12042d1f7de09e62ee8ed rift_scenarios/mwc16-pe.xml +5f03711d62432fcfe35038e2ed4f4adcacf5ab7b06f13969fac5bc9928cdb2ba rift_vnfs/IMS-ALLIN1.xml +9d9e1dec89b5cea0cd3a4cf69bd606a7f25f4607086f43fe2b3b1b16e7cdeba7 rift_vnfs/mwc16gen2.xml +f8bf47bc904f0b71dc766e27093ca22ddd2d36d28a0d22c48d210c5ddc9119fd rift_vnfs/6WindTR1.1.2.xml +6af440ccd412e95b6e7dd1638e30acffe0143a565fb7f208b052b74788b5dc64 rift_vnfs/mwc16gen1.xml diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/gen_pkgs.sh b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/gen_pkgs.sh new file mode 100755 index 0000000..762314a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/gen_pkgs.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +tmp_dir=$(mktemp -d) +echo "Generating packages in temporary directory: ${tmp_dir}" + +#6WindTR1.1.2 VNF +mkdir -p ${tmp_dir}/6wind_vnf/vnfd +cp -f rift_vnfs/6WindTR1.1.2.xml ${tmp_dir}/6wind_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} 6wind_vnf + + +# mwc16-pe.yaml +mkdir -p ${tmp_dir}/mwc16_pe_ns/nsd +cp -f rift_scenarios/mwc16-pe.xml ${tmp_dir}/mwc16_pe_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_pe_ns + +# mwc16-pe-onevnf.yaml +mkdir -p ${tmp_dir}/mwc16_pe_onevnf_ns/nsd +cp -f rift_scenarios/mwc16-pe-onevnf.xml ${tmp_dir}/mwc16_pe_onevnf_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_pe_onevnf_ns + + +# mwc16-gen1.yaml +mkdir -p ${tmp_dir}/mwc16_gen1_vnf/vnfd +cp -f rift_vnfs/mwc16gen1.xml ${tmp_dir}/mwc16_gen1_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_gen1_vnf + +# mwc16-gen2.yaml +mkdir -p ${tmp_dir}/mwc16_gen2_vnf/vnfd +cp -f rift_vnfs/mwc16gen2.xml ${tmp_dir}/mwc16_gen2_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_gen2_vnf + +# mwc16-gen.yaml +mkdir -p ${tmp_dir}/mwc16_gen_ns/nsd +cp -f rift_scenarios/mwc16-gen.xml ${tmp_dir}/mwc16_gen_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_gen_ns + + +# IMS-ALLin1.yaml +mkdir -p ${tmp_dir}/ims_allin1_vnf/vnfd +cp -f rift_vnfs/IMS-ALLIN1.xml ${tmp_dir}/ims_allin1_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_vnf + +# IMS-allin1-corpa.yaml +mkdir -p ${tmp_dir}/ims_allin1_corpa/nsd +cp -f rift_scenarios/IMS-corpA.xml ${tmp_dir}/ims_allin1_corpa/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_corpa + +# IMS-allin1-corpb.yaml +mkdir -p ${tmp_dir}/ims_allin1_corpb/nsd +cp -f rift_scenarios/IMS-corpB.xml ${tmp_dir}/ims_allin1_corpb/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_corpb \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/mwc16-gen_test.py b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/mwc16-gen_test.py new file mode 100755 index 0000000..02149ad --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/mwc16-gen_test.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import dictdiffer +import logging +import os +import sys +import unittest +import xmlrunner +import yaml + +import rift.openmano.rift2openmano as rift2openmano +import rift.openmano.openmano_client as openmano_client + +logger = logging.getLogger() + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) + +def delete_list_dict_keys(source_list, lst_keys): + for l in source_list: + if isinstance(l, dict): + delete_keys_from_dict(l, lst_keys) + elif isinstance(l, list): + delete_list_dict_keys(l, lst_keys) + +def delete_keys_from_dict(source_dict, lst_keys): + for k in lst_keys: + try: + del source_dict[k] + except KeyError: + pass + for v in source_dict.values(): + if isinstance(v, dict): + delete_keys_from_dict(v, lst_keys) + if isinstance(v, list): + delete_list_dict_keys(v, lst_keys) + + +class Rift2OpenmanoTest(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.maxDiff = None + + def load_openmano_vnf(self, openmano_vnf_path): + with open(openmano_vnf_path, 'rb') as hdl: + openmano_vnf = yaml.load(hdl) + + return openmano_vnf + + def load_openmano_ns(self, openmano_ns_path): + with open(openmano_ns_path, 'rb') as hdl: + openmano_ns = yaml.load(hdl) + + return openmano_ns + + def rift_vnf(self, rift_vnf_path): + with open(rift_vnf_path, 'r') as xml_hdl: + rift_vnf = rift2openmano.RiftVNFD.from_xml_file_hdl(xml_hdl) + return rift_vnf + + def rift2openmano_vnf(self, rift_vnf_path): + rift_vnf = self.rift_vnf(rift_vnf_path) + openmano_vnfd = rift2openmano.rift2openmano_vnfd(rift_vnf) + + logger.debug( + "Converted vnf: %s", + yaml.safe_dump(openmano_vnfd, indent=4, default_flow_style=False)) + + return openmano_vnfd + + def rift2openmano_ns(self, rift_ns_path, rift_vnf_paths): + rift_vnf_hdls = [open(path, 'r') for path in rift_vnf_paths] + vnf_dict = rift2openmano.create_vnfd_from_xml_files(rift_vnf_hdls) + + with open(rift_ns_path, 'r') as xml_hdl: + rift_ns = rift2openmano.RiftNSD.from_xml_file_hdl(xml_hdl) + + openmano_nsd = rift2openmano.rift2openmano_nsd(rift_ns, vnf_dict) + logger.debug( + "Converted ns: %s", + yaml.safe_dump(openmano_nsd, indent=4, default_flow_style=False)) + + return openmano_nsd + + def generate_vnf_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(source_dict, ["description"]) + delete_keys_from_dict(dest_dict, ["description", "image metadata", "class"]) + + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + def generate_ns_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(dest_dict, ["graph"]) + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + +class Mwc16GenTest(Rift2OpenmanoTest): + OPENMANO_6WIND_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/6WindTR1.1.2.yaml" + ) + RIFT_6WIND_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/6WindTR1.1.2.xml" + ) + + OPENMANO_IMS_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/IMS-ALLin1.yaml" + ) + RIFT_IMS_VNF_PATH = os.path.join(THIS_DIR, + "rift_vnfs/IMS-ALLIN1.xml" + ) + + OPENMANO_GEN1_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/mwc16-gen1.yaml" + ) + RIFT_GEN1_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/mwc16gen1.xml" + ) + + OPENMANO_GEN2_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/mwc16-gen2.yaml" + ) + RIFT_GEN2_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/mwc16gen2.xml" + ) + + OPENMANO_MWC16_GEN_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/mwc16-gen.yaml" + ) + RIFT_MWC16_GEN_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/mwc16-gen.xml" + ) + + OPENMANO_MWC16_PE_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/mwc16-pe.yaml" + ) + RIFT_MWC16_PE_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/mwc16-pe.xml" + ) + + OPENMANO_IMS_CORPA_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/IMS-allin1-corpA.yaml" + ) + RIFT_IMS_CORPA_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/IMS-corpA.xml" + ) + + OPENMANO_IMS_CORPB_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/IMS-allin1-corpB.yaml" + ) + RIFT_IMS_CORPB_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/IMS-corpB.xml" + ) + + def test_6wind_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_6WIND_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_6WIND_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_ims_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_IMS_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_IMS_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_gen1_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_GEN1_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_GEN1_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_gen2_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_GEN2_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_GEN2_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_ims_corpa_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_IMS_CORPA_NS_PATH, + [Mwc16GenTest.RIFT_IMS_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_IMS_CORPA_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_ims_corpb_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_IMS_CORPB_NS_PATH, + [Mwc16GenTest.RIFT_IMS_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_IMS_CORPB_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_mwc16_gen_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_GEN_NS_PATH, + [Mwc16GenTest.RIFT_GEN1_VNF_PATH, Mwc16GenTest.RIFT_GEN2_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_GEN_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_mwc16_pe_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_PE_NS_PATH, + [Mwc16GenTest.RIFT_6WIND_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_PE_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + logger.setLevel(logging.DEBUG if args.verbose else logging.WARN) + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpA.yaml new file mode 100644 index 0000000..2152313 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpA.yaml @@ -0,0 +1,33 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: IMS-corpA +description: All in one Clearwater IMS for corporation A in MWC16 +topology: + nodes: + IMS-ALLIN1__1: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1 # VNF name as introduced in OPENMANO DB + net-corpA: + type: external_network # Datacenter net + model: net-corpA + connections: + conn1: # provide a name for this net or connection + type: link + nodes: + - net-corpA: "0" # Datacenter net + - IMS-ALLIN1__1: eth0 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpB.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpB.yaml new file mode 100644 index 0000000..f6d5f7f --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpB.yaml @@ -0,0 +1,33 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: IMS-corpB +description: All in one Clearwater IMS for corporation B in MWC16 +topology: + nodes: + IMS-ALLIN1__1: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1 # VNF name as introduced in OPENMANO DB + net-corpB: + type: external_network # Datacenter net + model: net-corpB + connections: + conn1: # provide a name for this net or connection + type: link + nodes: + - net-corpB: "0" # Datacenter net + - IMS-ALLIN1__1: eth0 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-gen.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-gen.yaml new file mode 100644 index 0000000..11755a7 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-gen.yaml @@ -0,0 +1,61 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-gen" +description: "mwc16-gen" +topology: + nodes: + mwc16gen1__1: + type: VNF + VNF model: mwc16gen1 + mwc16gen2__2: + type: VNF + VNF model: mwc16gen2 + "direct_vlan146": + type: external_network + model: "direct_vlan146" + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mgmt: + type: external_network + model: mgmt + connections: + mgmt TEF: + type: link + nodes: + - "direct_vlan146": "0" + - mwc16gen1__1: eth0 + - mwc16gen2__2: eth0 + mwc16gen1__1-PE1: + type: link + nodes: + - mwc16data1: "0" + - mwc16gen1__1: xe0 + mwc16gen2__2-PE2: + type: link + nodes: + - mwc16data2: "0" + - mwc16gen2__2: xe0 + management: + type: link + nodes: + - mgmt: "0" + - mwc16gen1__1: eth1 + - mwc16gen2__2: eth1 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-pe.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-pe.yaml new file mode 100644 index 0000000..ae7a6a1 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-pe.yaml @@ -0,0 +1,79 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-pe" +description: "mwc16-pe" +topology: + nodes: + 6WindTR1.1.2__1: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__2: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__3: + type: VNF + VNF model: 6WindTR1.1.2 + interDC: + type: external_network + model: interDC + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mgmt: + type: external_network + model: mgmt + connections: + 6WindTR1.1.2__1 enty point: + type: link + nodes: + - mwc16data1: "0" + - 6WindTR1.1.2__1: xe2 + 6WindTR1.1.2__3 to OpenStack: + type: link + nodes: + - interDC: "0" + - 6WindTR1.1.2__3: xe3 + 6WindTR1.1.2__2 entry point: + type: link + nodes: + - mwc16data2: "0" + - 6WindTR1.1.2__2: xe2 + management: + type: link + nodes: + - mgmt: "0" + - 6WindTR1.1.2__1: eth0 + - 6WindTR1.1.2__2: eth0 + - 6WindTR1.1.2__3: eth0 + 6WindTR1.1.2__2-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__2: xe1 + - 6WindTR1.1.2__3: xe1 + 6WindTR1.1.2__1-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__1: xe1 + - 6WindTR1.1.2__3: xe0 + 6WindTR1.1.2__1-6WindTR1.1.2__2: + type: link + nodes: + - 6WindTR1.1.2__1: xe0 + - 6WindTR1.1.2__2: xe0 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/6WindTR1.1.2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/6WindTR1.1.2.yaml new file mode 100644 index 0000000..a67797d --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/6WindTR1.1.2.yaml @@ -0,0 +1,81 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: "6WindTR1.1.2" + VNFC: + - bridge-ifaces: + - vpci: "0000:00:03.0" + bandwidth: "1 Gbps" + name: "eth0" + numas: + - interfaces: + - vpci: "0000:00:05.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + - vpci: "0000:00:06.0" + bandwidth: "10 Gbps" + name: "xe1" + dedicated: "yes" + - vpci: "0000:00:07.0" + bandwidth: "10 Gbps" + name: "xe2" + dedicated: "yes" + - vpci: "0000:00:08.0" + bandwidth: "10 Gbps" + name: "xe3" + dedicated: "yes" + paired-threads-id: [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]] + paired-threads: 6 + memory: 8 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2" + image metadata: + use_incremental: "no" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + name: "VM" + external-connections: + - local_iface_name: eth0 + VNFC: VM + type: mgmt + name: eth0 + description: management + - local_iface_name: xe0 + VNFC: VM + type: data + name: xe0 + description: Data plane + - local_iface_name: xe1 + VNFC: VM + type: data + name: xe1 + description: Data plane + - local_iface_name: xe2 + VNFC: VM + type: data + name: xe2 + description: Data plane + - local_iface_name: xe3 + VNFC: VM + type: data + name: xe3 + description: Data plane + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/IMS-ALLin1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/IMS-ALLin1.yaml new file mode 100644 index 0000000..6a03ba9 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/IMS-ALLin1.yaml @@ -0,0 +1,39 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: IMS-ALLIN1 + description: IMS-ALLIN1 + external-connections: + - name: eth0 + type: mgmt + VNFC: IMS-ALLIN1-VM + local_iface_name: eth0 + description: General purpose interface + VNFC: + - name: IMS-ALLIN1-VM + description: IMS-ALLIN1-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/datatronics/allin1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: [] + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen1.yaml new file mode 100644 index 0000000..f874ff8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen1.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen1 + description: tidgen 4x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen1-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen1-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: mwc16gen1-VM # name of Virtual Machine + description: tidgen with 4x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen1.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen2.yaml new file mode 100644 index 0000000..db1685a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen2.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen2 + description: tidgen 4x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen2-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen2-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: mwc16gen2-VM # name of Virtual Machine + description: tidgen with 4x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen2.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpA.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpA.xml new file mode 100644 index 0000000..89a1c4f --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpA.xml @@ -0,0 +1,45 @@ + + + + + + 965dc8ea-c475-11e5-8040-fa163eb18cb8 + IMS-corpA + IMS-corpA + All in one Clearwater IMS for corporation A in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + conn1 + conn1 + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corpA + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpB.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpB.xml new file mode 100644 index 0000000..c2dadf7 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpB.xml @@ -0,0 +1,45 @@ + + + + + + 123dc8ea-c475-11e5-8040-fa163eb18123 + IMS-corpB + IMS-corpB + All in one Clearwater IMS for corporation B in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + conn1 + conn1 + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corpB + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-gen.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-gen.xml new file mode 100644 index 0000000..911a971 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-gen.xml @@ -0,0 +1,104 @@ + + + + + + 091e3932-c46c-11e5-8576-fa163eb18cb8 + mwc16-gen + mwc16-gen + mwc16-gen + + 094740d4-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1__1-PE1 + mwc16gen1__1-PE1 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + xe0 + + + mwc16data1 + VLAN + + + + 0947754a-c46c-11e5-8576-fa163eb18cb8 + mwc16gen2__2-PE2 + mwc16gen2__2-PE2 + ELAN + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + xe0 + + + mwc16data2 + VLAN + + + + 0947888c-c46c-11e5-8576-fa163eb18cb8 + mgmt TEF + mgmt TEF + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth0 + + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + eth0 + + + direct_vlan146 + VLAN + + + + 0947bb90-c46c-11e5-8576-fa163eb18cb8 + management + management + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth1 + + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + eth1 + + + mgmt + VLAN + + + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe-onevnf.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe-onevnf.xml new file mode 100644 index 0000000..a00d4ff --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe-onevnf.xml @@ -0,0 +1,60 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe-onevnf + mwc16-pe-onevnf + mwc16-pe-onevnf + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe.xml new file mode 100644 index 0000000..8e8b3e4 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe.xml @@ -0,0 +1,156 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe + mwc16-pe + mwc16-pe + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 76610cb8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__3 to OpenStack + 6WindTR1.1.2__3 to OpenStack + ELAN + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe3 + + + interDC + VLAN + + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 76611fc8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2 entry point + 6WindTR1.1.2__2 entry point + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data2 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + 7660b376-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + + 76604f80-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + 766091de-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/6WindTR1.1.2.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/6WindTR1.1.2.xml new file mode 100644 index 0000000..fcb6eee --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/6WindTR1.1.2.xml @@ -0,0 +1,151 @@ + + + + + + b7a3d170-c448-11e5-8795-fa163eb18cb8 + 6WindTR1.1.2 + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + + + eth0 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + VM + 0000:00:0a.0 + + 12 + 8192 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 6 + + 0 + 1 + + + 2 + 3 + + + 4 + 5 + + + 6 + 7 + + + 8 + 9 + + + 10 + 11 + + + + + + /mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2 + + eth0 + eth0 + + OM-MGMT + 0000:00:03.0 + 1000000000 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:05.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:06.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:07.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:08.0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/IMS-ALLIN1.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/IMS-ALLIN1.xml new file mode 100644 index 0000000..0a66d67 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/IMS-ALLIN1.xml @@ -0,0 +1,68 @@ + + + + + + 47914a30-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1 + IMS-ALLIN1 + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1-VM + IMS-ALLIN1-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/datatronics/allin1.qcow2 + + eth0 + eth0 + + OM-MGMT + 0000:00:0a.0 + 0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen1.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen1.xml new file mode 100644 index 0000000..15c3ad2 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen1.xml @@ -0,0 +1,141 @@ + + + + + + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1 + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen1.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen2.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen2.xml new file mode 100644 index 0000000..c30312b --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen2.xml @@ -0,0 +1,141 @@ + + + + + + eecfd632-bef1-11e5-b5b8-0800273ab84b + mwc16gen2 + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen2-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen2.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/20160212_openmano_RO_descriptors.zip b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/20160212_openmano_RO_descriptors.zip new file mode 100644 index 0000000000000000000000000000000000000000..3f9f1b12b7d2aa80a6c2f0eb22cdf2a55860f398 GIT binary patch literal 6904 zcmai&1z3~a+rUS6r+|cXhjht^0gf0TH9|T^cY}bmGy(z&f^@e?NT&iyhbXO-(t`3$ zcw_j!@Au!XYxj0t&+gyOe$P4QJ~sr6f{G3J_0VxwQM!Ed#|s)fdOBLU@k0K(MB}$h zetpr++|tq1)z<0iI#K{K026R#uA)>+TDNbE1OU842LQ;gU1#lO?&RtWRcgw5b1a6k2jGO~*R9Aj5bCxVV12r#^mD zJ=aFyh5KP0Ux}tHSsy7mKy&q#bU?@jqhC;OwuORfX}J>;jb@~7zla0bdDGE|t6WCq zl=K596?%Z=^y4So*z&Bq7|}d2+XW#QfdN@!I>MWMbKt3l`&G4A3RGcd8EL~x)X!If z3YZHQjk-Wng0F9<9Z4K_(;Q?WopQ8DhnXO;kt6>%8 z43?BLFiNSZ6n%wakZ@Gj&&z;r<}LM|L~5_&wp4#QM)MYMZ4J|N%_je6#f_#Ok>*5Y zk@1%^*eZ`xYwvzcrCJ;{#{(f*dQwlw*o($FCn=zExH*f=exm+m?#hTf+}ws=YqAsE z$S&Ln?SC5Kv9@%ybhR~~xHse&%18KWol~c1>aGcgY?Ffa95@vuwGz#>On9#kwDeG3 z`=WJib%`y6u($-9VwZAvxN33L8;lYelMu<3t)8!hM)H**7z;o_G*U{o!P~i-{7wdY z*!krcQHM0Z%kB%r(uLMaTxBwS!~d{r14;$xX!R|gpGMc~Y{5@r{<=2*TB_AwjaGBj zz24i=to}i{Yd><=e6fLGXSw~^#SnLiXCvBXdGT&6+a_~$ngd>BfuHj2NBaH6T=GX_ zFyRt#VmZ{kbv7jFJa2Zn{qPo7HOUU;T0m!O#*BE5TbHFct0Wb}yf`-gPMc2UzQW1L zQCk^wc}{>k*A(35M2ez%@t#<`K8yn_@2nQ3g6ecgLsV6tml?zSYUD$9e0|x=DyDmQ5hr5B;KwfIWvKiCZ`5Mnq*(dnebJgkcwj;!xqK++XKUTUoiJ_FSBZb{* zfdS){^q663%%D+;{Z(V04~5E4Hs@{OX+} z0YYr=z71p&mFL2qnyuOBR=yn&P}AY_vwN@i(*B6}=L09yTBKCJKL z#)5-!@tT+f=fhFlTwBB&~OAEx}FqT6%Tp9{|)1tU0&B4o@Uzpq3@@hlU{%hHrCoxSX zT$=5xE}t!t(&2Hz#Ga0P>2n@%(>ap6V{jkRG~Fd25&PH(VTE! zoos&Ajix2Ex85~zCV-A6+)^C?FM(b;ASsD7#tNe<%0xUz(BM>n3S&e#JGm*hxH9%l zI1&JNwKcaQWP4O!k+cSsYFo2m)u&kPH(BJwgZfrtqgbzQqLTDhL-@hP@}|c2Dz@vU zBYDV0M$OC_Csi(yj+&ovA!|;h#J1@%y4tZ{<=7Hslxdd;)aqR+#(y&O(}uQ zb|~;HR4kkL5b~0WSvqvw0mlm?^v-qhs~%zyN4R)Z__jxOO}x9U1$?u+)LyPV;2O$@ z+qzFiwUf=y;y@++tS?8r)yj}?=snJIiT=+v{1_ChTvg2`y-r_WU!?|eY!b+vpf5C; z%*VLk3$hRt*o$sq$EaGZP7fGm>8i9sa|oWB$^e9ud2DhA8zIM+7Kq%GC3SJqX;zZb zIa3<8T>*GI@8Zw&d+k}F#Ft0Ait}W~o$<+oR8eoGt|!{r&52d3uIC19%LfDnY{D9m zSKlW`QB>%ewIKDZWYWq`w9%PVqISz7Dui zJVvEe61&4rBh?`pMQ*I~GJ=LyY=Mau1PTI+9eG3~J{?5zcPI0Ix9WB>#xC8C9+bz; z%km|@4Onr~V0cc@?7j3!%U~ix9_v-T7t%?~v%-)Y2}m470{74%P8{}ivZ}y6l8*Pb|$kxa)<#MH)V;9=G6WG#dd{uXzS+0FRf+Xsk$cICNH|f0M zufo`t1KL9ubgU>OYD0=!JQU2k$WP=q4c2Pm>+n`XqlyEyQO7Q5Y=GD#94Rcf7N2c3 z(?QUS@OzhUn|^01)Ro?&O_9#XFG1!)OO?D!{(Z?o>6xOmREz#A?JQH{hd`J(P%R7?G6 zrj*5IE0B2??K;p7KXqFd_W59;%kLX?UCHF!Zv09GSi)t4F*+veH$9AIA-4SGK!0HqhD7#Wg z)~cTy-vkzz^^h_PCRtiMQ6&Eq7o*&2WW{h6rQ;!Wge3p1+Qa$;71N5+dpW?KnFA^t z?IM)XfJ*c=);ebWLDH-qT%JzcicVbKPTb1QSf0*U;~vQEYgF1LO%>BEd?aLh;+yrN z&f|Src%e%3y5H`C5vf8Kut2)zW3Ue!8^5_k*=Odd$$9SkbmJ9Vqb{6y%H=gAmh*&A zG==VZ&3=Q-IiW}2MD`ZV>&EZuAe|hCFN8EbTij--3m5 zY~S$>CJqqm80ecAKDJWr$A!F+!Fnj0#H!r*gG}wY-^P#bg1qSsDhISL4?f+6Lgg6R za}Np63_PkkG+Ua=PF0f@d;2O_?i)^tC>|A@4eR*|R_x^EZA%7K?sqG7f^9W!@ObrT zd`hgRQ%M>Nc`?a+;764}t{~s6K={TFt0hf3+Cf0rQYv=lyp6$~!RV*sq!%`4In>%^ zmq!)Z>kR9}=)ERnr}$^v`fhKwiAOEvsg{HWX<#Z1X1KNBv@{)l;BO^Z_-*Oc$p1&) zA8UrDl=_vtJ9^v5a!Q)oxHEu1<+4iRjBYw#f z3=+GP*AJIBZFec}?H}@PBFa1M9Ri{fo*Trp572xEN&$G2E6BLt&=GR?cBsSko1a+sQp9c^qx_+`f*i3@4h?))p`PJY-UC%W0wq z8NB_eW-XbiH7?$0!QaJ5{m~ZqIHUT&#&AJF;fc>q#b5FoujI`$Jx5V32X7n;(sM^g zy2j9F9TR(U2awKLzy^IOYi9!FHe_GUJN%bCYw)sK)l2=@^Wtna?Do6(VV$?BU{47! z+OMzo(V}N$ws3c7@QL@U61!aQf1PW9Dk|`E&3ysqKW5(0ubKBsaS$$VMpcsmzZu93 zDQKX`Hzq7;IBKYlq+?RM3!i!{5SsdMl;?(@ZoYj}_E?h&y?(a9RECCBNkuvS^jp*- zH~GGHO-5$Ln`(=2J{_CXoYLi%9$BDTh7Pv=5T~E5c*<_DaEyd3&RjZ!sR3qNABknF zZ#o`Z4$({Foc?C31gCC%O2_YOfpWZ%4>F`XkDaSz!d!i3=h`imDy$OxY%hd>u0pSg zNw+leYPtY@lN2?EN6x1cQnTJKROMg~<&~zM>Zu0vajRM;QdZqS>cMg~y{Vki#~8%1 zps>!CGzbFGRTbq9I^1g!DW<2O8_kTI?@S|7EFMy#ofcGcWbAj#O&Xwn7qLmOp69Qs z{B8Mli$`8xNMQL|hRcb;c%HDzL662(LIa)3r!k?p6#^;)bvbR~hi$l#$YS;SAsyhy zeY&ZRgbK+?)B+vpl8Ykb;HN)ZiPbhwnQ=RpNUqyKKlYO`f*)*haS@iqt4itqWu<&6 zXGCYtaZZ#lfFL1=l8qOOa3(K{DkO*8w#Alie8`NNl#LvnJC56oS(BMvY-ja@L`sU1 zEAbrC%h$x{D#~3MZur&@4+1Xso+ZU>>;Y_)a?DXrIjr6>53U(KV42LTk{!S-Q`$fY z6X9ygqCoH5AHp%=(xJi=JaXxci(1-ktjx52Rb9mV_FPXns(Rmi*7>JV|7)2i=y4&` zo<=aL;rh_A48!)b{ndv9+!$;3?|VGVi1raC!J%C)!;{1nrk{(A5vRmd7M$ zN+Z=mK5di<2J8q{HKS!xd$X#`I`pPAnr9Qr(aMa8 zdJb}nP$2!MwGC`hC!BQqk*Ph*UEo)O&)VEaT<;n-E5lSp#4jKmgkq`AsLO`4F1b2I zoW@q3hC&+MB0J}Cn@#-7sBV_Ms64pS?(_2L6MBmG&Feoq_-vE9*3CPpp?;A1jCz~F z+{g zyTHU5dsaL1DkGm$YhuBus41{v@lJMQih!=}Cw^K9TfvNruZI(?7aut20!txJeCMRd zQGq9+;lPf-u;-gL#NU%C))|LiJb$vk3GS4!7@Tl-K(7B8dK{tA?(bD@q|4t3zLp!Y7mUc^vk^b}^_xcn#ID7wG5l9g6Opq3-(mh910v_o zeheayzDF4M*SLaJeLiNu<1;CROD-bspN%<0UL<_Y{G0c$Z8}7tbHDs0U@u`UU(paP>BBk{i6&) + + + 965dc8ea-c475-11e5-8040-fa163eb18cb8 + IMS-corpA + IMS-corpA + All in one Clearwater IMS for corporation A in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + data + data + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corp + 108 + VLAN + + + + 9670b946-c475-11e5-8040-fa163eb18cb9 + management + management + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth1 + + + net-mgmtOS + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + juju + + clearwater-aio-proxy + + + 1 + 0 + + + config + + home_domain + string + true + ims.com + + + password + string + cw-aio + true + + + + create-update-user + + number + string + true + + + password + string + true + + + + delete-user + + number + string + true + + + + 1 + config + + proxied_ip + <rw_mgmt_ip> + + + + + + Update Domain + + 1 + 37838e08-d04c-11e5-8e5b-001b21b98a9d + cwims_vnfd + + 1 + config + + + + + Add User + + 1 + 37838e08-d04c-11e5-8e5b-001b21b98a9d + cwims_vnfd + + 1 + create-update-user + + + + + Delete User + + 1 + 37838e08-d04c-11e5-8e5b-001b21b98a9d + cwims_vnfd + + 1 + delete-user + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpB.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpB.xml new file mode 100644 index 0000000..c2dadf7 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpB.xml @@ -0,0 +1,45 @@ + + + + + + 123dc8ea-c475-11e5-8040-fa163eb18123 + IMS-corpB + IMS-corpB + All in one Clearwater IMS for corporation B in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + conn1 + conn1 + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corpB + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/add_corpA_input.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/add_corpA_input.yaml new file mode 100644 index 0000000..9a2dabd --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/add_corpA_input.yaml @@ -0,0 +1,53 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +config_agent: {host: 1.1.1.1, name: agent, port: 9090, secret: secret, user: user} +init_config: {} +rpc_ip: + name: Add Corporation + nsr_id_ref: 359d76ab-6618-4894-93f7-b37b2ecbc711 + parameter: + - {name: Corporation Name, value: CorpA} + - {name: Tunnel Key, value: '10'} + parameter_group: + - name: PE1 + parameter: + - {name: Vlan ID, value: 101} + - {name: Interface Name, value: eth3} + - {name: Corp. Network, value: 10.0.1.0/24} + - {name: Corp. Gateway, value: 10.0.1.1} + - {name: Local Network, value: 10.255.255.0/24} + - {name: Local Network Area, value: '0'} + - name: PE2 + parameter: + - {name: Vlan ID, value: 102} + - {name: Interface Name, value: eth3} + - {name: Corp. Network, value: 10.0.2.0/24} + - {name: Corp. Gateway, value: 10.0.2.1} + - {name: Local Network, value: 10.255.255.0/24} + - {name: Local Network Area, value: '0'} + - name: PE3 + parameter: + - {name: Vlan ID, value: 108} + - {name: Interface Name, value: eth4} + - {name: Corp. Network, value: 10.0.4.0/24} + - {name: Corp. Gateway, value: 10.0.4.1} + - {name: Local Network, value: 10.255.255.0/24} + - {name: Local Network Area, value: '0'} +unit_names: {159d76ab-6618-4894-93f7-b37b2ecbc711: pe1, 259d76ab-6618-4894-93f7-b37b2ecbc711: pe2, + 359d76ab-6618-4894-93f7-b37b2ecbc711: pe3} +vnfr_index_map: {159d76ab-6618-4894-93f7-b37b2ecbc711: 1, 259d76ab-6618-4894-93f7-b37b2ecbc711: 2, + 359d76ab-6618-4894-93f7-b37b2ecbc711: 3} + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/gwcorpA.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/gwcorpA.xml new file mode 100644 index 0000000..d447420 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/gwcorpA.xml @@ -0,0 +1,123 @@ + + + + 091e3932-c46c-11e5-8576-fa163eb18cb8 + gw_corpA + gw_corpA + Gateways to access as corpA to PE1 and PE2 + + 194740d4-c46c-11e5-8576-fa163eb18cb8 + connection 0 + connection 0 + ELAN + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + eth1 + + + mgmt + VLAN + + + + 0947754a-c46c-11e5-8576-fa163eb18cb8 + connection 1 + connection 1 + ELAN + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + eth1 + + + mgmt + VLAN + + + + 294740d4-c46c-11e5-8576-fa163eb18cb8 + connection 2 + connection 2 + ELAN + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + eth0 + + + mwc1 + VLAN + + + + 4947754a-c46c-11e5-8576-fa163eb18cb8 + connection 3 + connection 3 + ELAN + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + eth0 + + + mwc2 + VLAN + + + + 5947888c-c46c-11e5-8576-fa163eb18cb8 + connection 4 + connection 4 + ELAN + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + xe0 + + + mwc16data1 + VLAN + 101 + + + + 7947bb90-c46c-11e5-8576-fa163eb18cb8 + connection 5 + connection 5 + ELAN + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + xe0 + + + mwc16data2 + VLAN + 102 + + + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-gen.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-gen.xml new file mode 100644 index 0000000..b5fc178 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-gen.xml @@ -0,0 +1,91 @@ + + + + + + 091e3932-c46c-11e5-8576-fa163eb18cb7 + mwc16_traffic_generator + mwc16_traffic_generator + Traffic generator connected to the demo environment + + 094740d4-c46c-11e5-8576-fa163eb18cb8 + connection 0 + connection 0 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth0 + + + mwc + VLAN + + + + 0947754a-c46c-11e5-8576-fa163eb18cb8 + connection 1 + connection 1 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth1 + + + mgmt + VLAN + + + + 0947888c-c46c-11e5-8576-fa163eb18cb8 + connection 2 + connection 2 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + xe0 + + + mwc16data1 + VLAN + 3000 + + + + 0947bb90-c46c-11e5-8576-fa163eb18cb8 + connection 3 + connection 3 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + xe2 + + + mwc16data2 + VLAN + 3000 + + + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe-onevnf.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe-onevnf.xml new file mode 100644 index 0000000..a00d4ff --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe-onevnf.xml @@ -0,0 +1,60 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe-onevnf + mwc16-pe-onevnf + mwc16-pe-onevnf + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe.xml new file mode 100644 index 0000000..0bc3f31 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe.xml @@ -0,0 +1,566 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe + mwc16-pe + mwc16-pe + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + juju + + vpe-router + + + 0 + 0 + + + 1 + config + + vpe-router + <rw_mgmt_ip> + + + user + root + + + pass + 6windos + + + hostname + pe2 + + + + 2 + configure-interface + + iface-name + eth1 + + + cidr + 10.10.10.10/30 + + + + 3 + configure-interface + + iface-name + eth2 + + + cidr + 10.10.10.6/30 + + + + 4 + configure-interface + + iface-name + eth3 + + + + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + juju + + vpe-router + + + 0 + 0 + + + 1 + config + + vpe-router + <rw_mgmt_ip> + + + user + root + + + pass + 6windos + + + hostname + pe3 + + + + 2 + configure-interface + + iface-name + eth1 + + + cidr + 10.10.10.2/30 + + + + 3 + configure-interface + + iface-name + eth2 + + + cidr + 10.10.10.5/30 + + + + 4 + configure-interface + + iface-name + eth3 + + + + 5 + configure-interface + + iface-name + eth4 + + + + + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + juju + + vpe-router + + + 0 + 0 + + + 1 + config + + vpe-router + <rw_mgmt_ip> + + + user + root + + + pass + 6windos + + + hostname + pe1 + + + + 2 + configure-interface + + iface-name + eth1 + + + cidr + 10.10.10.9/30 + + + + 3 + configure-interface + + iface-name + eth2 + + + cidr + 10.10.10.1/30 + + + + 4 + configure-interface + + iface-name + eth3 + + + + + + 76610cb8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__3 to OpenStack + 6WindTR1.1.2__3 to OpenStack + ELAN + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe3 + + + interDC + VLAN + + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 76611fc8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2 entry point + 6WindTR1.1.2__2 entry point + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data2 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + 7660b376-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + + 76604f80-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + 766091de-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + Add SP Test Corporation + + Corporation Name + string + true + SP + + + Tunnel Key + integer + true + 10 + true + + + PE1 + + Vlan ID + integer + true + 3000 + true + + + Interface Name + string + true + eth3 + true + + + Corp. Network + string + true + 10.0.1.0/24 + true + + + Corp. Gateway + string + true + 10.0.1.1 + true + + false + + + PE2 + + Vlan ID + integer + true + 3000 + true + + + Interface Name + string + true + eth3 + true + + + Corp. Network + string + true + 10.0.2.0/24 + true + + + Corp. Gateway + string + true + 10.0.2.1 + true + + false + + + PE3 + + Vlan ID + integer + true + 3000 + true + + + Interface Name + string + true + eth3 + true + + + Corp. Network + string + true + 10.0.3.0/24 + true + + + Corp. Gateway + string + true + 10.0.3.1 + true + + false + + /home/rift/.install/usr/bin/add_corporation.py + + + Add Corporation + + Corporation Name + string + true + CorpA + + + Tunnel Key + integer + true + 1 + true + + + PE1 + + Vlan ID + integer + true + 101 + true + + + Interface Name + string + true + eth3 + + + Corp. Network + string + true + 10.0.1.0/24 + + + Corp. Gateway + string + true + 10.0.1.1 + + false + + + PE2 + + Vlan ID + integer + true + 102 + true + + + Interface Name + string + true + eth3 + + + Corp. Network + string + true + 10.0.2.0/24 + + + Corp. Gateway + string + true + 10.0.2.1 + + false + + + PE3 + + Vlan ID + integer + true + 108 + true + + + Interface Name + string + true + eth4 + + + Corp. Network + string + true + 10.0.4.0/24 + + + Corp. Gateway + string + true + 10.0.4.1 + + false + + /home/rift/.install/usr/bin/add_corporation.py + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/pe_config.py b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/pe_config.py new file mode 100755 index 0000000..63426cd --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/pe_config.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from gi.repository import NsdYang, RwYang + +def add_pe_vnf(nsd, vnf_index, intf_ip_pairs): + const_vnfd = nsd.constituent_vnfd.add() + const_vnfd.vnfd_id_ref = "b7a3d170-c448-11e5-8795-fa163eb18cb8" + const_vnfd.member_vnf_index = vnf_index + + vnf_config = const_vnfd.vnf_configuration + vnf_config.input_params.config_priority = 0 + vnf_config.input_params.config_delay = 0 + + # Select "script" configuration + vnf_config.config_type = 'juju' + vnf_config.juju.charm = 'vpe-router' + + # Set the initital-config + init_config = NsdYang.InitialConfigPrimitive.from_dict({ + "seq": 1, + "name": "config", + "parameter": [ + {"name": "vpe-router", "value": ""}, + {"name": "user", "value": "root"}, + {"name": "pass", "value": "6windos"} + ] + }) + vnf_config.initial_config_primitive.append(init_config) + + for seq, (intf, cidr) in enumerate(intf_ip_pairs, start=2): + params = [{"name": "iface-name", "value": intf}] + if cidr is not None: + params.append( + {"name": "cidr", "value": cidr} + ) + + vnf_config.initial_config_primitive.add().from_dict({ + "seq": seq, + "name": "configure-interface", + "parameter": params + }) + + +nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() +add_pe_vnf(nsd, 1, + [ + ("eth1", "10.10.10.9/30"), + ("eth2", "10.10.10.1/30"), + ("eth3", None), + ] +) + +add_pe_vnf(nsd, 2, + [ + ("eth1", "10.10.10.10/30"), + ("eth2", "10.10.10.6/30"), + ("eth3", None), + ] +) + +add_pe_vnf(nsd, 3, + [ + ("eth1", "10.10.10.2/30"), + ("eth2", "10.10.10.5/30"), + ("eth3", None), + ("eth4", None), + ] +) + +ns_cfg_prim = nsd.config_primitive.add() +ns_cfg_prim.name = "Add SP Test Corporation" +ns_cfg_prim.user_defined_script = "/home/rift/.install/usr/bin/add_corporation.py" + +ns_cfg_prim.parameter.add().from_dict({ + "name": "Corporation Name", + "data_type": "string", + "mandatory": True, + }) + +ns_cfg_prim.parameter.add().from_dict({ + "name": 'Tunnel Key', + "data_type": "integer", + "mandatory": True, + "default_value": "10", + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE1", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "3000", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE2", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "3000", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE3", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "3000", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.3.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.3.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim = nsd.config_primitive.add() +ns_cfg_prim.name = "Add Corporation" +ns_cfg_prim.user_defined_script = "/home/rift/.install/usr/bin/add_corporation.py" + +ns_cfg_prim.parameter.add().from_dict({ + "name": "Corporation Name", + "data_type": "string", + "mandatory": True, + }) + +ns_cfg_prim.parameter.add().from_dict({ + "name": 'Tunnel Key', + "data_type": "integer", + "mandatory": True, + "default_value": "1", + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE1", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "101", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE2", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "102", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE3", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "108", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth4", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.4.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.4.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +model = RwYang.Model.create_libncx() +model.load_module("nsd") +print(nsd.to_xml_v2(model, pretty_print=True)) + +print("\n\n") +print(json.dumps(nsd.as_dict(), indent=4, separators=(',', ': '))) \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/6WindTR1.1.2.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/6WindTR1.1.2.xml new file mode 100644 index 0000000..fcb6eee --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/6WindTR1.1.2.xml @@ -0,0 +1,151 @@ + + + + + + b7a3d170-c448-11e5-8795-fa163eb18cb8 + 6WindTR1.1.2 + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + + + eth0 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + VM + 0000:00:0a.0 + + 12 + 8192 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 6 + + 0 + 1 + + + 2 + 3 + + + 4 + 5 + + + 6 + 7 + + + 8 + 9 + + + 10 + 11 + + + + + + /mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2 + + eth0 + eth0 + + OM-MGMT + 0000:00:03.0 + 1000000000 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:05.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:06.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:07.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:08.0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/IMS-ALLIN1.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/IMS-ALLIN1.xml new file mode 100644 index 0000000..cb0d1ff --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/IMS-ALLIN1.xml @@ -0,0 +1,81 @@ + + + + + + 47914a30-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1_2p + IMS-ALLIN1_2p + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1_2p-VM + IMS-ALLIN1_2p-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/allin1.qcow2 + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe1.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe1.xml new file mode 100644 index 0000000..5f69514 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe1.xml @@ -0,0 +1,94 @@ + + + + + + 11115d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE1 + gw_corpA_PE1 + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE1-VM + gw_corpA_PE1-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE1.qcow2 + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 0 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + 10000000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe2.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe2.xml new file mode 100644 index 0000000..2c00246 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe2.xml @@ -0,0 +1,94 @@ + + + + + + 22215d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE2 + gw_corpA_PE2 + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE2-VM + gw_corpA_PE2-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE2.qcow2 + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 0 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + 10000000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen1.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen1.xml new file mode 100644 index 0000000..0de39b4 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen1.xml @@ -0,0 +1,141 @@ + + + + + + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + mwc16gen + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/tidgen_mwc16.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen2.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen2.xml new file mode 100644 index 0000000..c30312b --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen2.xml @@ -0,0 +1,141 @@ + + + + + + eecfd632-bef1-11e5-b5b8-0800273ab84b + mwc16gen2 + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen2-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen2.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/IMS-allin1-corpA.yaml.generic b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/IMS-allin1-corpA.yaml.generic new file mode 100644 index 0000000..87cdfd5 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/IMS-allin1-corpA.yaml.generic @@ -0,0 +1,24 @@ +--- +name: IMS-corpA +description: All in one Clearwater IMS for corporation A in MWC16 +topology: + nodes: + ims-corpA: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1_2p # VNF name as introduced in OPENMANO DB + net-corpA: + type: external_network # Datacenter net + model: net-corp:${VLAN_CORPA_PE3} + net-mgmtOS: + type: external_network # Datacenter net + model: net-mgmtOS + connections: + data: # provide a name for this net or connection + nodes: + - net-corpA: null # Datacenter net + - ims-corpA: eth0 # Node and its interface + management: # provide a name for this net or connection + nodes: + - net-mgmtOS: null # Datacenter net + - ims-corpA: eth1 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml new file mode 100644 index 0000000..4736acf --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml @@ -0,0 +1,59 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: gw_corpA +topology: + nodes: + gw_corpA_PE1: + type: VNF + VNF model: gw_corpA_PE1 + gw_corpA_PE2: + type: VNF + VNF model: gw_corpA_PE2 + mwc1: + type: external_network + model: mwc1 + mwc2: + type: external_network + model: mwc2 + mwc16data1_vlan: + type: external_network + model: "mwc16data1:101" + mwc16data2_vlan: + type: external_network + model: "mwc16data2:102" + connections: + connection 0: + type: link + nodes: + - mwc1: null + - gw_corpA_PE1: eth0 + connection 1: + type: link + nodes: + - mwc2: null + - gw_corpA_PE2: eth0 + connection 2: + type: link + nodes: + - mwc16data1_vlan: null + - gw_corpA_PE1: xe0 + connection 3: + type: link + nodes: + - mwc16data2_vlan: null + - gw_corpA_PE2: xe0 +description: Gateways to access as corpA to PE1 and PE2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml.generic b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml.generic new file mode 100644 index 0000000..fb953fc --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml.generic @@ -0,0 +1,92 @@ +name: gw_corpA +topology: + nodes: + gw_corpA_PE1: + graph: + x: 370 + y: 149 + ifaces: + left: + - - eth0 + - v + right: + - - xe0 + - v + type: VNF + VNF model: gw_corpA_PE1 + gw_corpA_PE2: + graph: + x: 370 + y: 308 + ifaces: + left: + - - eth0 + - v + right: + - - xe0 + - v + type: VNF + VNF model: gw_corpA_PE2 + mwc1: + graph: + x: 81 + y: 149 + ifaces: + right: + - - "0" + - v + type: external_network + model: mwc1 + mwc2: + graph: + x: 81 + y: 308 + ifaces: + right: + - - "0" + - v + type: external_network + model: mwc2 + mwc16data1_vlan: + graph: + x: 690 + y: 161 + ifaces: + left: + - - "0" + - d + type: external_network + model: "mwc16data1:${VLAN_CORPA_PE1}" + mwc16data2_vlan: + graph: + x: 688 + y: 327 + ifaces: + left: + - - "0" + - d + type: external_network + model: "mwc16data2:${VLAN_CORPA_PE2}" + connections: + connection 0: + type: link + nodes: + - mwc1: null + - gw_corpA_PE1: eth0 + connection 1: + type: link + nodes: + - mwc2: null + - gw_corpA_PE2: eth0 + connection 2: + type: link + nodes: + - mwc16data1_vlan: null + - gw_corpA_PE1: xe0 + connection 3: + type: link + nodes: + - mwc16data2_vlan: null + - gw_corpA_PE2: xe0 +description: Gateways to access as corpA to PE1 and PE2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/mwc16-pe.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/mwc16-pe.yaml new file mode 100644 index 0000000..ae7a6a1 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/mwc16-pe.yaml @@ -0,0 +1,79 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-pe" +description: "mwc16-pe" +topology: + nodes: + 6WindTR1.1.2__1: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__2: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__3: + type: VNF + VNF model: 6WindTR1.1.2 + interDC: + type: external_network + model: interDC + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mgmt: + type: external_network + model: mgmt + connections: + 6WindTR1.1.2__1 enty point: + type: link + nodes: + - mwc16data1: "0" + - 6WindTR1.1.2__1: xe2 + 6WindTR1.1.2__3 to OpenStack: + type: link + nodes: + - interDC: "0" + - 6WindTR1.1.2__3: xe3 + 6WindTR1.1.2__2 entry point: + type: link + nodes: + - mwc16data2: "0" + - 6WindTR1.1.2__2: xe2 + management: + type: link + nodes: + - mgmt: "0" + - 6WindTR1.1.2__1: eth0 + - 6WindTR1.1.2__2: eth0 + - 6WindTR1.1.2__3: eth0 + 6WindTR1.1.2__2-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__2: xe1 + - 6WindTR1.1.2__3: xe1 + 6WindTR1.1.2__1-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__1: xe1 + - 6WindTR1.1.2__3: xe0 + 6WindTR1.1.2__1-6WindTR1.1.2__2: + type: link + nodes: + - 6WindTR1.1.2__1: xe0 + - 6WindTR1.1.2__2: xe0 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/tidgen.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/tidgen.yaml new file mode 100644 index 0000000..d1b2b45 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/tidgen.yaml @@ -0,0 +1,56 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: mwc16_traffic_generator +description: Traffic generator connected to the demo environment +topology: + nodes: + mwc16gen__1: + type: VNF + VNF model: mwc16gen + mwc: + type: external_network + model: mwc + mgmt: + type: external_network + model: mgmt + mwc16data1: + type: external_network + model: "mwc16data1:3000" + mwc16data2: + type: external_network + model: "mwc16data2:3000" + connections: + connection 0: + type: link + nodes: + - mwc: "0" + - mwc16gen__1: eth0 + connection 1: + type: link + nodes: + - mgmt: "0" + - mwc16gen__1: eth1 + connection 2: + type: link + nodes: + - "mwc16data1:3000": "0" + - mwc16gen__1: xe0 + connection 3: + type: link + nodes: + - "mwc16data2:3000": "0" + - mwc16gen__1: xe2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/6WindTR1.1.2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/6WindTR1.1.2.yaml new file mode 100644 index 0000000..a67797d --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/6WindTR1.1.2.yaml @@ -0,0 +1,81 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: "6WindTR1.1.2" + VNFC: + - bridge-ifaces: + - vpci: "0000:00:03.0" + bandwidth: "1 Gbps" + name: "eth0" + numas: + - interfaces: + - vpci: "0000:00:05.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + - vpci: "0000:00:06.0" + bandwidth: "10 Gbps" + name: "xe1" + dedicated: "yes" + - vpci: "0000:00:07.0" + bandwidth: "10 Gbps" + name: "xe2" + dedicated: "yes" + - vpci: "0000:00:08.0" + bandwidth: "10 Gbps" + name: "xe3" + dedicated: "yes" + paired-threads-id: [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]] + paired-threads: 6 + memory: 8 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2" + image metadata: + use_incremental: "no" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + name: "VM" + external-connections: + - local_iface_name: eth0 + VNFC: VM + type: mgmt + name: eth0 + description: management + - local_iface_name: xe0 + VNFC: VM + type: data + name: xe0 + description: Data plane + - local_iface_name: xe1 + VNFC: VM + type: data + name: xe1 + description: Data plane + - local_iface_name: xe2 + VNFC: VM + type: data + name: xe2 + description: Data plane + - local_iface_name: xe3 + VNFC: VM + type: data + name: xe3 + description: Data plane + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/IMS-ALLin1_2p.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/IMS-ALLin1_2p.yaml new file mode 100644 index 0000000..ee58e17 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/IMS-ALLin1_2p.yaml @@ -0,0 +1,46 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: IMS-ALLIN1_2p + description: IMS-ALLIN1_2p + external-connections: + - name: eth0 + type: bridge + VNFC: IMS-ALLIN1_2p-VM + local_iface_name: eth0 + description: Virtio data interface + - name: eth1 + type: mgmt + VNFC: IMS-ALLIN1_2p-VM + local_iface_name: eth1 + description: Management interface + VNFC: + - name: IMS-ALLIN1_2p-VM + description: IMS-ALLIN1_2p-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/allin1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + - name: eth1 + vpci: "0000:00:0b.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: [] + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE1.yaml new file mode 100644 index 0000000..c05e54c --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE1.yaml @@ -0,0 +1,48 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE1 + description: gw_corpA_PE1 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE1-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: xe0 + type: data + VNFC: gw_corpA_PE1-VM + local_iface_name: xe0 + description: Interface to PE1 + VNFC: + - name: gw_corpA_PE1-VM + description: gw_corpA_PE1-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE2.yaml new file mode 100644 index 0000000..ca64c6e --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE2.yaml @@ -0,0 +1,48 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE2 + description: gw_corpA_PE2 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE2-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: xe0 + type: data + VNFC: gw_corpA_PE2-VM + local_iface_name: xe0 + description: Interface to PE2 + VNFC: + - name: gw_corpA_PE2-VM + description: gw_corpA_PE2-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE2.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/tidgen_mwc16.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/tidgen_mwc16.yaml new file mode 100644 index 0000000..d818d15 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/tidgen_mwc16.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen + description: tidgen 2x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen1-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen1-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe0 + description: Data interface 0 + - name: xe1 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe1 + description: Data interface 1 + - name: xe2 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe2 + description: Data interface 2 + - name: xe3 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe3 + description: Data interface 3 + VNFC: # Virtual machine array + - name: mwc16gen1-VM # name of Virtual Machine + description: tidgen with 2x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/tidgen_mwc16.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_2sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov.yaml new file mode 100644 index 0000000..f9487bb --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov.yaml @@ -0,0 +1,49 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_2sriov +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_2sriov #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_2sriov + default: #Name of external network + type: external_network + model: default + connections: + mgmtnet: + nodes: + - tidgen1: eth0 + - tidgen2: eth0 + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + control-net: + nodes: + - default: null + - tidgen1: eth1 + - tidgen2: eth1 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..501e307 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml @@ -0,0 +1,44 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_2sriov_no_ctrlnet +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_2sriov_no_ctrlnet #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_2sriov_no_ctrlnet + default: #Name of external network + type: external_network + model: default + connections: + mgmtnet: + nodes: + - tidgen1: eth0 + - tidgen2: eth0 + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_4sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov.yaml new file mode 100644 index 0000000..35cb543 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov.yaml @@ -0,0 +1,57 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_4sriov +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_4sriov #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_4sriov + default: #Name of external network + type: external_network + model: default + connections: + mgmtnet: + nodes: + - tidgen1: eth0 + - tidgen2: eth0 + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + datanet2: + nodes: + - tidgen1: xe2 + - tidgen2: xe2 + datanet3: + nodes: + - tidgen1: xe3 + - tidgen2: xe3 + control-net: + nodes: + - default: null + - tidgen1: eth1 + - tidgen2: eth1 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..de2b2b8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml @@ -0,0 +1,48 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_4sriov_no_ctrlnet +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_4sriov_no_ctrlnet #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_4sriov_no_ctrlnet + default: #Name of external network + type: external_network + model: default + connections: + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + datanet2: + nodes: + - tidgen1: xe2 + - tidgen2: xe2 + datanet3: + nodes: + - tidgen1: xe3 + - tidgen2: xe3 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov.yaml new file mode 100644 index 0000000..a1fe8ab --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov.yaml @@ -0,0 +1,73 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_2sriov + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: tidgenMWC-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..9a9c9e3 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml @@ -0,0 +1,65 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_2sriov_no_ctrlnet + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov.yaml new file mode 100644 index 0000000..7d007cd --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov.yaml @@ -0,0 +1,91 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_4sriov + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: tidgenMWC-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" # Optional + bandwidth: 1 Mbps # Optional, informative only diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..54c92fd --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml @@ -0,0 +1,83 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_4sriov_no_ctrlnet + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only diff --git a/modules/core/mano/models/plugins/CMakeLists.txt b/modules/core/mano/models/plugins/CMakeLists.txt new file mode 100644 index 0000000..e26729f --- /dev/null +++ b/modules/core/mano/models/plugins/CMakeLists.txt @@ -0,0 +1,13 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + yang + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/models/plugins/yang/CMakeLists.txt b/modules/core/mano/models/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..145a5e4 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/CMakeLists.txt @@ -0,0 +1,47 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/15/2014 +# + +# NOTE: These files will be used by the new MANO subsystem +set(source_yang_files + vnfd.yang vnfr.yang rw-vnfd.yang rw-vnfr.yang + vld.yang vlr.yang rw-vld.yang rw-vlr.yang + nsd.yang nsr.yang rw-nsd.yang rw-nsr.yang + pnfd.yang + vnffgd.yang + ietf-network.yang + ietf-network-topology.yang + ietf-l2-topology.yang + rw-topology.yang + ) + +rift_add_yang_target( + TARGET mano_yang + YANG_FILES ${source_yang_files} + GIR_PATHS ${CMAKE_CURRENT_BINARY_DIR} + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwmanifest_yang_gen + rwschema_yang_gen + rwcloud_yang_gen + ) + +rift_gen_yang_tree(mano-pyang-trees + OUTFILE_PREFIX mano.yang + YANG_FILES + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vnfd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vld.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/nsd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-vnfd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-vld.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-nsd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/pnfd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vnffgd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-network.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-network-topology.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-l2-topology.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-topology.yang + ) diff --git a/modules/core/mano/models/plugins/yang/Makefile b/modules/core/mano/models/plugins/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/models/plugins/yang/ietf-l2-topology.tailf.yang b/modules/core/mano/models/plugins/yang/ietf-l2-topology.tailf.yang new file mode 100644 index 0000000..d09e914 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-l2-topology.tailf.yang @@ -0,0 +1,40 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-l2-topology-annotation +{ + namespace "urn:ietf:params:xml:ns:yang:ietf-l2-topology"; + prefix "ietf-l2-topology-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + import ietf-l2-topology { + prefix lt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:server-provided" { + tailf:callpoint base_show; + } + +} diff --git a/modules/core/mano/models/plugins/yang/ietf-l2-topology.yang b/modules/core/mano/models/plugins/yang/ietf-l2-topology.yang new file mode 100755 index 0000000..9f572cb --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-l2-topology.yang @@ -0,0 +1,578 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-l2-topology { + yang-version 1; + namespace "urn:ietf:params:xml:ns:yang:ietf-l2-topology"; + prefix "l2t"; + + import ietf-network { + prefix "nw"; + } + + import ietf-network-topology { + prefix "nt"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + organization "TBD"; + contact "I-D Editor: jie.dong@huawei.com"; + + description + "This module defines a basic model for + the layer-2 topology of a network"; + + revision "2015-06-23" { + description "Initial revision"; + reference "draft-ietf-i2rs-l2-network-topology-01"; + } + + /* + * Typedefs + */ + + typedef vlan { + type uint16 { + range "0..4095"; + } + description "VLAN ID"; + } + + typedef trill-nickname { + type uint16; + description "TRILL Nickname"; + } + + typedef flag-type { + type identityref { + base "flag-identity"; + } + description "Base type for flags"; + } + + typedef l2-network-event-type { + type enumeration { + enum "add" { + value 0; + description "An L2 node or link or termination-point + has been added"; + } + enum "remove" { + value 1; + description "An L2 node or link or termination-point + has been removed"; + } + enum "update" { + value 2; + description "An L2 node or link or termination-point + has been updated"; + } + } + description "l2 network event type for notifications"; + } // l2-topology-event-type + + + /* + * Features + */ + + feature VLAN { + description + "Indicates that the system supports the + vlan functions"; + } + + feature QinQ { + description + "Indicates that the system supports the + qinq functions"; + } + + feature PBB { + description + "Indicates that the device supports the + provider-backbone-bridging functions"; + } + + feature VPLS { + description + "Indicates that the device supports the + VPLS functions"; + reference "RFC 4761, RFC 4762"; + } + + feature TRILL { + description + "Indicates that the device supports the + TRILL functions"; + reference "RFC 6325"; + } + + feature VXLAN { + description + "Indicates that the device supports the + VXLAN functions"; + reference "RFC 7348"; + } + + /* + * Identities + */ + identity flag-identity { + description "Base type for flags"; + } + + identity encapsulation-type { + description + "Base identity from which specific encapsulation + types are derived."; + } + + identity eth-encapsulation-type { + base encapsulation-type; + description + "Base identity from which specific ethernet + encapsulation types are derived."; + + } + + identity ethernet { + base eth-encapsulation-type; + description + "native ethernet encapsulation"; + } + + identity vlan { + base eth-encapsulation-type; + description + "vlan encapsulation"; + } + + identity qinq { + base eth-encapsulation-type; + description + "qinq encapsulation"; + } + + identity pbb { + base eth-encapsulation-type; + description + "pbb encapsulation"; + } + + identity trill { + base eth-encapsulation-type; + description + "trill encapsulation"; + } + + identity vpls { + base eth-encapsulation-type; + description + "vpls encapsulation"; + } + + identity vxlan { + base eth-encapsulation-type; + description + "vxlan encapsulation"; + } + + identity frame-relay { + base encapsulation-type; + description + "Frame Relay encapsulation"; + } + + identity ppp { + base encapsulation-type; + description + "PPP encapsulation"; + } + + identity hdlc { + base encapsulation-type; + description + "HDLC encapsulation"; + } + + identity atm { + base encapsulation-type; + description + "Base identity from which specific ATM + encapsulation types are derived."; + + } + + identity pwe3 { + base encapsulation-type; + description + "Base identity from which specific pw + encapsulation types are derived."; + } + + + /* + * Groupings + */ + + + grouping l2-network-type { + description "Identify the topology type to be L2."; + container l2-network { + presence "indicates L2 Network"; + description + "The presence of the container node indicates + L2 Topology"; + } + } + + grouping l2-network-attributes { + description "L2 Topology scope attributes"; + container l2-network-attributes { + description "Containing L2 network attributes"; + leaf name { + type string; + description "Name of the L2 network"; + } + + leaf-list flag { + type flag-type; + description "L2 network flags"; + } + } + } + + grouping l2-node-attributes { + description "L2 node attributes"; + container l2-node-attributes { + description "Containing L2 node attributes"; + leaf name { + type string; + description "Node name"; + } + leaf description { + type string; + description "Node description"; + } + leaf-list management-address { + type inet:ip-address; + description "System management address"; + } + leaf management-vid { + if-feature VLAN; + type vlan; + description "System management VID"; + } + leaf-list nick-name { + if-feature TRILL; + type trill-nickname; + description "Nickname of the RBridge"; + } + leaf-list flag { + type flag-type; + description "Node operational flags"; + } + } + } // grouping l2-node-attributes + + + grouping l2-link-attributes { + description "L2 link attributes"; + container l2-link-attributes { + description "Containing L2 link attributes"; + leaf name { + type string; + description "Link name"; + } + leaf-list flag { + type flag-type; + description "Link flags"; + } + leaf rate { + type decimal64 { + fraction-digits 2; + } + description "Link rate"; + + } + leaf delay { + type uint32; + description "Link delay in microseconds"; + } + leaf-list srlg { + type uint32; + description + "List of Shared Risk Link Groups + this link belongs to."; + } + } + } // grouping l2-link-attributes + + grouping l2-termination-point-attributes { + description "L2 termination point attributes"; + container l2-termination-point-attributes { + description "Containing L2 TP attributes"; + leaf description { + type string; + description "Port description"; + } + + leaf maximum-frame-size { + type uint32; + description "Maximum frame size"; + } + + choice l2-termination-point-type { + description + "Indicates termination-point type + specific attributes"; + case ethernet { + leaf mac-address { + type yang:mac-address; + description "Interface MAC address"; + } + + leaf eth-encapsulation { + type identityref { + base eth-encapsulation-type; + } + description + "Encapsulation type of this + ternimation point."; + } + + leaf port-vlan-id { + if-feature VLAN; + type vlan; + description "Port VLAN ID"; + } + + list vlan-id-name { + if-feature VLAN; + key "vlan-id"; + description "Interface configured VLANs"; + leaf vlan-id { + type vlan; + description "VLAN ID"; + } + leaf vlan-name { + type string; + description "VLAN Name"; + } + } + } //case ethernet + + case legacy { + leaf encapsulation { + type identityref { + base encapsulation-type; + } + description + "Encapsulation type of this termination point."; + } + } //case legacy + + } //choice termination-point-type + + leaf tp-state { + type enumeration { + enum in-use { + value 0; + description + "the termination point is in forwarding state"; + } + enum blocking { + value 1; + description + "the termination point is in blocking state"; + } + enum down { + value 2; + description + "the termination point is in down state"; + } + enum others { + value 3; + description + "the termination point is in other state"; + } + } + config false; + description "State of the termination point"; + } + } + } // grouping l2-termination-point-attributes + +/*** grouping of network/node/link/tp leaf-refs ***/ + + grouping network-ref { + description + "Grouping for an absolute reference to a network topology + instance."; + leaf network-ref { + type leafref { + path "/nw:network/nw:network-id"; + } + description + "An absolute reference to a network topology instance."; + } + } + + grouping link-ref { + description + "Grouping for an absolute reference to a link instance."; + uses network-ref; + leaf link-ref { + type leafref { + path "/nw:network" + +"[nw:network-id = current()/../network-ref]" + +"/nt:link/nt:link-id"; + } + description + "An absolute reference to a link instance."; + } + } + + grouping node-ref { + description + "Grouping for an absolute reference to a node instance."; + uses network-ref; + leaf node-ref { + type leafref { + path "/nw:network" + +"[nw:network-id = current()/../network-ref]" + +"/nw:node/nw:node-id"; + } + description + "An absolute reference to a node instance."; + } + } + + grouping tp-ref { + description + "Grouping for an absolute reference to a termination point."; + uses node-ref; + leaf tp-ref { + type leafref { + path "/nw:network" + +"[nw:network-id = current()/../network-ref]" + +"/nw:node[nw:node-id = current()/../node-ref]" + +"/nt:termination-point/nt:tp-id"; + } + description + "Grouping for an absolute reference to a TP."; + } + } + + + /* + * Data nodes + */ + augment "/nw:network/nw:network-types" { + description + "Introduce new network type for L2 topology"; + uses l2-network-type; + } + + augment "/nw:network" { + /* RIFT-Change: when not to be used yet + when "nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description + "Configuration parameters for the L2 network + as a whole"; + uses l2-network-attributes; + } + + augment "/nw:network/nw:node" { + /* RIFT-Change: when not to be used yet + when "../nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description + "Configuration parameters for L2 at the node + level"; + uses l2-node-attributes; + } + + augment "/nw:network/nt:link" { + /* RIFT-Change: when not to be used yet + when "/nw:network/nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description "Augment L2 topology link information"; + uses l2-link-attributes; + } + + augment "/nw:network/nw:node/nt:termination-point" { + /* RIFT-Change: when not to be used yet + when "/nw:network/nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description + "Augment L2 topology termination point configuration"; + uses l2-termination-point-attributes; + } + + /* + * Notifications + */ + + notification l2-node-event { + description "Notification event for L2 node"; + leaf event-type { + type l2-network-event-type; + description "Event type"; + } + uses node-ref; + uses l2-network-type; + uses l2-node-attributes; + } + + notification l2-link-event { + description "Notification event for L2 link"; + leaf event-type { + type l2-network-event-type; + description "Event type"; + } + uses link-ref; + uses l2-network-type; + uses l2-link-attributes; + } + + notification l2-termination-point-event { + description "Notification event for L2 termination point"; + leaf event-type { + type l2-network-event-type; + description "Event type"; + } + uses tp-ref; + uses l2-network-type; + uses l2-termination-point-attributes; + } + +} // module l2-topology diff --git a/modules/core/mano/models/plugins/yang/ietf-network-topology.tailf.yang b/modules/core/mano/models/plugins/yang/ietf-network-topology.tailf.yang new file mode 100644 index 0000000..26868e5 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network-topology.tailf.yang @@ -0,0 +1,34 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network-topology-annotation +{ + namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology"; + prefix "ietf-network-topology-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + tailf:annotate "/nd:network/nd:server-provided" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/ietf-network-topology.yang b/modules/core/mano/models/plugins/yang/ietf-network-topology.yang new file mode 100755 index 0000000..e8f7c79 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network-topology.yang @@ -0,0 +1,257 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network-topology { + yang-version 1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology"; + prefix lnk; + + import ietf-inet-types { + prefix inet; + } + import ietf-network { + prefix nd; + } + + organization "TBD"; + contact + "WILL-BE-DEFINED-LATER"; + description + "This module defines a common base model for network topology, + augmenting the base network model with links to connect nodes, + as well as termination points to terminate links on nodes."; + + revision 2015-06-08 { + description + "Initial revision."; + reference "draft-ietf-i2rs-yang-network-topo-01"; + } + + typedef link-id { + type inet:uri; + description + "An identifier for a link in a topology. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same link in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in + separate datastores. An implementation MAY choose to capture + semantics in the identifier, for example to indicate the type + of link and/or the type of topology that the link is a part + of."; + } + + typedef tp-id { + type inet:uri; + description + "An identifier for termination points on a node. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same TP in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in + separate datastores. An implementation MAY choose to capture + semantics in the identifier, for example to indicate the type + of TP and/or the type of node and topology that the TP is a + part of."; + } + + grouping link-ref { + description + "References a link in a specific network."; + leaf link-ref { + type leafref { + path "/nd:network[nd:network-id=current()/../"+ + "nd:network-ref]/link/link-id"; + } + description + "A type for an absolute reference a link instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nd:network-ref; + } + + grouping tp-ref { + description + "References a termination point in a specific node."; + leaf tp-ref { + type leafref { + path "/nd:network[nd:network-id=current()/../"+ + "nd:network-ref]/nd:node[nd:node-id=current()/../"+ + "nd:node-ref]/termination-point/tp-id"; + } + description + "A type for an absolute reference to a termination point. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nd:node-ref; + } + + augment "/nd:network" { + description + "Add links to the network model."; + list link { + key "link-id"; + + description + "A Network Link connects a by Local (Source) node and + a Remote (Destination) Network Nodes via a set of the + nodes' termination points. + As it is possible to have several links between the same + source and destination nodes, and as a link could + potentially be re-homed between termination points, to + ensure that we would always know to distinguish between + links, every link is identified by a dedicated link + identifier. + Note that a link models a point-to-point link, not a + multipoint link. + Layering dependencies on links in underlay topologies are + not represented as the layering information of nodes and of + termination points is sufficient."; + container source { + description + "This container holds the logical source of a particular + link."; + leaf source-node { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:node/nd:node-id"; + } + mandatory true; + description + "Source node identifier, must be in same topology."; + } + leaf source-tp { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:node[nd:node-id=current()/../"+ + "source-node]/termination-point/tp-id"; + } + description + "Termination point within source node that terminates + the link."; + } + } + container destination { + description + "This container holds the logical destination of a + particular link."; + leaf dest-node { + type leafref { + // RIFT change + path "../../../../nd:network/nd:node/nd:node-id"; + } + mandatory true; + description + "Destination node identifier, must be in the same + network."; + } + leaf dest-tp { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:node[nd:node-id=current()/../"+ + "dest-node]/termination-point/tp-id"; + } + description + "Termination point within destination node that + terminates the link."; + } + } + leaf link-id { + type link-id; + description + "The identifier of a link in the topology. + A link is specific to a topology to which it belongs."; + } + list supporting-link { + key "network-ref link-ref"; + description + "Identifies the link, or links, that this link + is dependent on."; + leaf network-ref { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:supporting-network/nd:network-ref"; + } + description + "This leaf identifies in which underlay topology + supporting link is present."; + } + leaf link-ref { + type leafref { + path "/nd:network[nd:network-id=current()/.."+ + "/network-ref]/link/link-id"; + } + description + "This leaf identifies a link which is a part + of this link's underlay. Reference loops, in which + a link identifies itself as its underlay, either + directly or transitively, are not allowed."; + } + } + } + } + augment "/nd:network/nd:node" { + description + "Augment termination points which terminate links. + Termination points can ultimately be mapped to interfaces."; + list termination-point { + key "tp-id"; + description + "A termination point can terminate a link. + Depending on the type of topology, a termination point + could, for example, refer to a port or an interface."; + leaf tp-id { + type tp-id; + description + "Termination point identifier."; + } + list supporting-termination-point { + key "network-ref node-ref tp-ref"; + description + "The leaf list identifies any termination points that + the termination point is dependent on, or maps onto. + Those termination points will themselves be contained + in a supporting node. + This dependency information can be inferred from + the dependencies between links. For this reason, + this item is not separately configurable. Hence no + corresponding constraint needs to be articulated. + The corresponding information is simply provided by the + implementing system."; + leaf network-ref { + type leafref { + // RIFT change: + path "/nd:network/nd:node/nd:supporting-node/nd:network-ref"; + } + description + "This leaf identifies in which topology the + supporting termination point is present."; + } + leaf node-ref { + type leafref { + // RIFT change: + path "/nd:network/nd:node/nd:supporting-node/nd:node-ref"; + } + description + "This leaf identifies in which node the supporting + termination point is present."; + } + leaf tp-ref { + type leafref { + path "/nd:network[nd:network-id=current()/../"+ + "network-ref]/nd:node[nd:node-id=current()/../"+ + "node-ref]/termination-point/tp-id"; + } + description + "Reference to the underlay node, must be in a + different topology"; + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/ietf-network.tailf.yang b/modules/core/mano/models/plugins/yang/ietf-network.tailf.yang new file mode 100644 index 0000000..705842a --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network.tailf.yang @@ -0,0 +1,31 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network-annotation +{ + namespace "urn:ietf:params:xml:ns:yang:ietf-network"; + prefix "ietf-network-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:server-provided" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/ietf-network.yang b/modules/core/mano/models/plugins/yang/ietf-network.yang new file mode 100755 index 0000000..a059e94 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network.yang @@ -0,0 +1,157 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network { + yang-version 1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network"; + prefix nd; + + import ietf-inet-types { + prefix inet; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + organization "TBD"; + contact + "WILL-BE-DEFINED-LATER"; + description + "This module defines a common base model for a collection + of nodes in a network. Node definitions s are further used + in network topologies and inventories."; + + revision 2015-06-08 { + description + "Initial revision."; + reference "draft-ietf-i2rs-yang-network-topo-01"; + } + + typedef node-id { + type inet:uri; + description + "Identifier for a node."; + } + + typedef network-id { + type inet:uri; + description + "Identifier for a network."; + } + + grouping network-ref { + description + "Contains the information necessary to reference a network, + for example an underlay network."; + leaf network-ref { + type leafref { + path "/network/network-id"; + } + description + "Used to reference a network, for example an underlay + network."; + } + } + + grouping node-ref { + description + "Contains the information necessary to reference a node."; + leaf node-ref { + type leafref { + path "/network[network-id=current()/../network-ref]"+ + "/node/node-id"; + } + description + "Used to reference a node. + Nodes are identified relative to the network they are + contained in."; + } + uses network-ref; + } + + list network { + config false; + key "network-id"; + description + "Describes a network. + A network typically contains an inventory of nodes, + topological information (augmented through + network-topology model), as well as layering + information."; + container network-types { + description + "Serves as an augmentation target. + The network type is indicated through corresponding + presence containers augmented into this container."; + } + leaf network-id { + type network-id; + description + "Identifies a network."; + } + leaf server-provided { + type boolean; + config false; + description + "Indicates whether the information concerning this + particular network is populated by the server + (server-provided true, the general case for network + information discovered from the server), + or whether it is configured by a client + (server-provided true, possible e.g. for + service overlays managed through a controller)."; + } + list supporting-network { + key "network-ref"; + description + "An underlay network, used to represent layered network + topologies."; + + leaf network-ref { + type leafref { + path "/network/network-id"; + } + description + "References the underlay network."; + } + } + list node { + key "node-id"; + description + "The inventory of nodes of this network."; + leaf node-id { + type node-id; + description + "Identifies a node uniquely within the containing + network."; + } + list supporting-node { + key "network-ref node-ref"; + description + "Represents another node, in an underlay network, that + this node is supported by. Used to represent layering + structure."; + leaf network-ref { + type leafref { + path "../../../supporting-network/network-ref"; + } + description + "References the underlay network that the + underlay node is part of."; + } + leaf node-ref { + type leafref { + path "/network/node/node-id"; + } + description + "References the underlay node itself."; + } + } + } + } +} + diff --git a/modules/core/mano/models/plugins/yang/nsd.tailf.yang b/modules/core/mano/models/plugins/yang/nsd.tailf.yang new file mode 100644 index 0000000..80711c3 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/nsd-annotation"; + prefix "nsd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import nsd { + prefix nsd; + } +} diff --git a/modules/core/mano/models/plugins/yang/nsd.yang b/modules/core/mano/models/plugins/yang/nsd.yang new file mode 100755 index 0000000..328ac0c --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsd.yang @@ -0,0 +1,871 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:nsd"; + prefix "nsd"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import vld { + prefix "vld"; + } + + import vnfd { + prefix "vnfd"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2014-10-27 { + description + "Initial revision. This YANG file defines + the Network Service Descriptor (NSD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + typedef parameter-data-type { + type enumeration { + enum string; + enum integer; + enum boolean; + } + } + + grouping primitive-parameter { + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf data-type { + description + "Data type associated with the name."; + type parameter-data-type; + } + + leaf mandatory { + description "Is this field mandatory"; + type boolean; + default false; + } + + leaf default-value { + description "The default value for this field"; + type string; + } + + leaf parameter-pool { + description "NSD Parameter pool name to use for this paramter"; + type string; + } + + leaf read-only { + description + "The value should be greyed out by the UI. + Only applies to parameters with default values."; + type boolean; + } + + leaf hidden { + description + "The value should be hidden by the UI. + Only applies to parameters with default values."; + type boolean; + } + } + + grouping vnf-configuration { + container vnf-configuration { + description + "Information regarding the VNF configuration + is captured here. Note that if the NS contains + multiple instances of the same VNF, each instance + of the VNF may have different configuration"; + + leaf config-type { + description + "Must use this configuration type and fill in only pertaining + config-method below"; + type enumeration { + enum none; + enum netconf; + enum rest; + enum script; + enum juju; + } + } + + choice config-method { + description + "Defines the configuration method for the VNF."; + case netconf { + description + "Use NETCONF for configuring the VNF."; + container netconf { + leaf target { + description + "Netconf configuration target"; + type enumeration { + enum running; + enum candidate; + } + } + + leaf protocol { + description + "Protocol to use for netconf (e.g. ssh)"; + type enumeration { + enum None; + enum ssh; + } + } + + leaf port { + description + "Port for the netconf server."; + type inet:port-number; + } + } + } + + case rest { + description + "Use REST for configuring the VNF."; + container rest { + leaf port { + description + "Port for the REST server."; + type inet:port-number; + } + } + } + + case script { + description + "Use custom script for configuring the VNF. + This script is executed in the context of + Orchestrator."; + container script { + leaf script-type { + description + "Script type - currently supported : bash, expect"; + type enumeration { + enum bash; + enum expect; + } + } + } + } + + case juju { + description + "Configure the VNF through Juju."; + container juju { + leaf charm { + description "Juju charm to use with the VNF."; + type string; + } + } + } + } + + container config-access { + leaf mgmt-ip-address { + description + "IP address to be used to configure this VNF, + optional if it is possible to resolve dynamically."; + type inet:ip-address; + } + + leaf username { + description + "username for configuration."; + type string; + } + + leaf password { + description + "Password for configuration access authentication."; + type string; + } + } + + container input-params { + description + "Miscelaneous input parameters to be considered + while processing the NSD to apply configuration"; + + leaf config-priority { + description + "Configuration priority - order of confgiration + to be applied to each VNF in this NS, + low number gets precedence over high number"; + type uint64; + } + + leaf config-delay { + description + "Wait (seconds) before applying the configuration to VNF"; + type uint64; + } + } + + list config-primitive { + rwpb:msg-new ConfigPrimitive; + description + "List of configuration primitives supported by the + configuration agent for this VNF."; + key "name"; + + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + key "name"; + uses primitive-parameter; + } + } + + list initial-config-primitive { + rwpb:msg-new InitialConfigPrimitive; + description + "Initial set of configuration primitives."; + key "seq"; + leaf seq { + description + "Sequence number for the configuration primitive."; + type uint64; + } + + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + key "name"; + leaf name { + type string; + } + + leaf value { + type string; + } + } + } + + leaf config-template { + description + "Configuration template for each VNF"; + type string; + } + } + } // END - grouping vnf-configuration + + + container nsd-catalog { + + list nsd { + key "id"; + + leaf id { + description "Identifier for the NSD."; + type yang:uuid; + } + + leaf name { + description "NSD name."; + mandatory true; + type string; + } + + leaf short-name { + description "NSD short name."; + type string; + } + + + leaf vendor { + description "Vendor of the NSD."; + type string; + } + + leaf logo { + description + "Vendor logo for the Network Service"; + type string; + } + + leaf description { + description "Description of the NSD."; + type string; + } + + leaf version { + description "Version of the NSD"; + type string; + } + + list connection-point { + description + "List for external connection points. + Each NS has one or more external connection + points. As the name implies that external + connection points are used for connecting + the NS to other NS or to external networks. + Each NS exposes these connection points to + the orchestrator. The orchestrator can + construct network service chains by + connecting the connection points between + different NS."; + + key "name"; + leaf name { + description + "Name of the NS connection point."; + type string; + } + + leaf type { + description + "Type of the connection point."; + type manotypes:connection-point-type; + } + } + + leaf-list vld-ref { + type leafref { + path "/vld:vld-catalog/vld:vld/vld:id"; + } + } + + /* Still having issues modelling this, + see the comments under vnfd-connection-point-ref + */ + list vld { + description + "List of Virtual Link Descriptors."; + + key "id"; + + leaf id { + description + "Identifier for the VLD."; + type yang:uuid; + } + + leaf name { + description + "Virtual Link Descriptor (VLD) name."; + type string; + } + + leaf short-name { + description + "Short name for VLD for UI"; + type string; + } + + leaf vendor { + description "Provider of the VLD."; + type string; + } + + leaf description { + description "Description of the VLD."; + type string; + } + + leaf version { + description "Version of the VLD"; + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + list vnfd-connection-point-ref { + description + "A list of references to connection points."; + key "member-vnf-index-ref"; + + leaf member-vnf-index-ref { + description "Reference to member-vnf within constituent-vnfds"; + type leafref { + path "../../../nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type yang:uuid; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name + in a vnfd. This is a leafref to path: + /vnfd:vnfd-catalog/vnfd:vnfd + + [vnfd:id = current()/../nsd:vnfd-id-ref] + + /vnfd:connection-point/vnfd:name + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type string; + } + } + + // replicate for pnfd container here + uses manotypes:provider-network; + } + + list constituent-vnfd { + description + "List of VNFDs that are part of this + network service."; + + key "member-vnf-index"; + + leaf member-vnf-index { + description + "Identifier/index for the VNFD. This separate id + is required to ensure that multiple VNFs can be + part of single NS"; + type uint64; + } + + leaf vnfd-id-ref { + description + "Identifier for the VNFD."; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + + // Provide this VNF configuration parameters + uses vnf-configuration; + } + + list vnf-dependency { + description + "List of VNF dependencies."; + key vnf-source-ref; + leaf vnf-source-ref { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + leaf vnf-depends-on-ref { + description + "Reference to VNF that sorce VNF depends."; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + } + + list vnffgd { + description + "List of VNF Forwarding Graph Descriptors (VNFFGD)."; + + key "id"; + + leaf id { + description + "Identifier for the VNFFGD."; + type yang:uuid; + } + + leaf name { + description + "VNFFGD name."; + type string; + } + + leaf short-name { + description + "Short name for VNFFGD for UI"; + type string; + } + + leaf vendor { + description "Provider of the VNFFGD."; + type string; + } + + leaf description { + description "Description of the VNFFGD."; + type string; + } + + leaf version { + description "Version of the VNFFGD"; + type string; + } + + list rsp { + description + "List of Rendered Service Paths (RSP)."; + + key "id"; + + leaf id { + description + "Identifier for the RSP."; + type yang:uuid; + } + + leaf name { + description + "RSP name."; + type string; + } + + list vnfd-connection-point-ref { + description + "A list of references to connection points."; + key "member-vnf-index-ref"; + + leaf member-vnf-index-ref { + description "Reference to member-vnf within constituent-vnfds"; + type leafref { + path "../../../../nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf order { + type uint8; + description + "A number that denotes the order of a VNF in a chain"; + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type yang:uuid; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name + in a vnfd. This is a leafref to path: + /vnfd:vnfd-catalog/vnfd:vnfd + + [vnfd:id = current()/../nsd:vnfd-id-ref] + + /vnfd:connection-point/vnfd:name + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type string; + } + } + } //rsp + + list classifier { + description + "List of classifier rules."; + + key "id"; + + leaf id { + description + "Identifier for the classifier rule."; + type yang:uuid; + } + + leaf name { + description + "Name of the classifier."; + type string; + } + + leaf rsp-id-ref { + description + "A reference to the RSP."; + type leafref { + path "../../nsd:rsp/nsd:id"; + } + } + + + leaf member-vnf-index-ref { + description "Reference to member-vnf within constituent-vnfds"; + type leafref { + path "../../../nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type yang:uuid; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name + in a vnfd. This is a leafref to path: + /vnfd:vnfd-catalog/vnfd:vnfd + + [vnfd:id = current()/../nsd:vnfd-id-ref] + + /vnfd:connection-point/vnfd:name + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type string; + } + + list match-attributes { + description + "List of match attributes."; + + key "id"; + + leaf id { + description + "Identifier for the classifier match attribute rule."; + type yang:uuid; + } + + leaf ip-proto { + description + "IP Protocol."; + type uint8; + } + + leaf source-ip-address { + description + "Source IP address."; + type inet:ip-address; + } + + leaf destination-ip-address { + description + "Destination IP address."; + type inet:ip-address; + } + + leaf source-port { + description + "Source port number."; + type inet:port-number; + } + + leaf destination-port { + description + "Destination port number."; + type inet:port-number; + } + //TODO: Add more match criteria + } //match-attributes + } // classifier + } // vnffgd + + uses manotypes:monitoring-param; + uses manotypes:input-parameter-xpath; + + list parameter-pool { + description + "Pool of parameter values which must be + pulled from during configuration"; + key "name"; + + leaf name { + description + "Name of the configuration value pool"; + type string; + } + + container range { + description + "Create a range of values to populate the pool with"; + + leaf start-value { + description + "Generated pool values start at this value"; + type uint32; + mandatory true; + } + + leaf end-value { + description + "Generated pool values stop at this value"; + type uint32; + mandatory true; + } + } + } + + list config-primitive { + description + "Network service level configuration primitives."; + + key "name"; + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + + key "name"; + uses primitive-parameter; + } + + list parameter-group { + description + "Grouping of parameters which are logically grouped in UI"; + key "name"; + + leaf name { + description + "Name of the parameter group"; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + key "name"; + uses primitive-parameter; + } + + leaf mandatory { + description "Is this parameter group mandatory"; + type boolean; + default true; + } + } + + list vnf-primitive-group { + description + "List of configuration primitives grouped by VNF."; + + key "member-vnf-index-ref"; + leaf member-vnf-index-ref { + description + "Reference to member-vnf within constituent-vnfds"; + type uint64; + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + + type string; + } + + leaf vnfd-name { + description + "Name of the VNFD"; + type string; + } + + list primitive { + key "index"; + + leaf index { + description "Index of this primitive"; + type uint32; + } + + leaf name { + description "Name of the primitive in the VNF primitive "; + type string; + } + } + } + + leaf user-defined-script { + description + "A user defined script."; + type string; + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/nsr.cli.xml b/modules/core/mano/models/plugins/yang/nsr.cli.xml new file mode 100755 index 0000000..61ea6f0 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsr.cli.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/modules/core/mano/models/plugins/yang/nsr.tailf.yang b/modules/core/mano/models/plugins/yang/nsr.tailf.yang new file mode 100644 index 0000000..8fca452 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsr.tailf.yang @@ -0,0 +1,35 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/nsr-annotation"; + prefix "nsr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import nsr { + prefix nsr; + } + + tailf:annotate "/nsr:ns-instance-opdata" { + tailf:callpoint base_show; + } + tailf:annotate "/nsr:exec-ns-config-primitive" { + tailf:actionpoint rw_action; + } + tailf:annotate "/nsr:get-ns-config-primitive-values" { + tailf:actionpoint rw_action; + } +} diff --git a/modules/core/mano/models/plugins/yang/nsr.yang b/modules/core/mano/models/plugins/yang/nsr.yang new file mode 100755 index 0000000..6d7816e --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsr.yang @@ -0,0 +1,859 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsr +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:nsr"; + prefix "nsr"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import vlr { + prefix "vlr"; + } + + import vld { + prefix "vld"; + } + + import nsd { + prefix "nsd"; + } + + import vnfr { + prefix "vnfr"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + import rw-sdn { + prefix "rwsdn"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Network Service Record (NSR)"; + reference + "Derived from earlier versions of base YANG files"; + } + + typedef config-states { + type enumeration { + enum init; + enum configuring; + enum configured; + enum failed; + } + } + + container ns-instance-config { + + list nsr { + key "id"; + unique "name"; + + leaf id { + description "Identifier for the NSR."; + type yang:uuid; + } + + leaf name { + description "NSR name."; + type string; + } + + leaf short-name { + description "NSR short name."; + type string; + } + + leaf description { + description "NSR description."; + type string; + } + + leaf nsd-ref { + description "Reference to NSD"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:id"; + } + } + + leaf admin-status { + description + "This is the administrative status of the NS instance"; + + type enumeration { + enum ENABLED; + enum DISABLED; + } + } + + uses manotypes:input-parameter; + } + } + + + grouping vnffgr { + + list vnffgr { + key "id"; + + leaf id { + description "Identifier for the VNFFGR."; + type yang:uuid; + } + + leaf nsd-id { + description + "Network sevice descriptor ID reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:id"; + } + } + + + leaf vnffgd-id-ref { + description "VNFFG descriptor id reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd" + + "[nsd:id = current()/../nsr:nsd-id]" + + "/nsd:vnffgd/nsd:id"; + } + } + + leaf vnffgd-name-ref { + description "VNFFG descriptor name reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd" + + "[nsd:id = current()/../nsr:nsd-id]" + + "/nsd:vnffgd[nsd:id = current()/../vnffgd-id-ref]" + + "/nsd:name"; + } + } + + leaf sdn-account { + description + "The SDN account to use when requesting resources for + this vnffgr"; + type leafref { + path "/rwsdn:sdn-account/rwsdn:name"; + } + } + + leaf operational-status { + description + "The operational status of the VNFFGR instance + init : The VNFFGR has just started. + running : The VNFFGR is in running state. + terminate : The VNFFGR is being terminated. + terminated : The VNFFGR is in the terminated state. + failed : The VNFFGR instantiation failed + "; + + type enumeration { + rwpb:enum-type "VnffgrOperationalStatus"; + enum init; + enum running; + enum terminate; + enum terminated; + enum failed; + } + } + + list rsp { + key "id"; + + leaf id { + description + "Identifier for the RSP."; + type yang:uuid; + } + + leaf name { + description + "Name for the RSP"; + type string; + } + + leaf vnffgd-rsp-id-ref { + description + "Identifier for the VNFFG Descriptor RSP reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:vnffgd" + + "[nsd:id = current()/../../nsr:vnffgd-id-ref]" + + "/nsd:rsp/nsd:id"; + } + } + + leaf vnffgd-rsp-name-ref { + description + "Name for the VNFFG Descriptor RSP reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:vnffgd" + + "[nsd:id = current()/../../nsr:vnffgd-id-ref]" + + "/nsd:rsp[nsd:id=current()/../vnffgd-rsp-id-ref]" + + "/nsd:name"; + } + } + + leaf path-id { + description + "Unique Identifier for the service path"; + type uint32; + } + + list vnfr-connection-point-ref { + key "hop-number"; + leaf hop-number { + description + "Monotonically increasing number to show service path hop + order"; + type uint8; + } + leaf service-function-type { + description + "Type of Service Function. + NOTE: This needs to map with Service Function Type in ODL to + support VNFFG. Service Function Type is manadatory param in ODL + SFC. This is temporarily set to string for ease of use"; + type string; + } + + leaf member-vnf-index-ref { + type uint64; + } + leaf vnfd-id-ref { + description + "Reference to VNF Descriptor Id"; + type string; + } + leaf vnfr-id-ref { + description + "A reference to a vnfr id"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + leaf vnfr-name-ref { + description + "A reference to a vnfr name"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name"; + } + } + leaf vnfr-connection-point-ref { + description + "A reference to a vnfr connection point."; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr" + + "[vnfr:id = current()/../nsr:vnfr-id-ref]" + + "/vnfr:connection-point/vnfr:name"; + } + } + leaf service-index { + description + "Location within the service path"; + type uint8; + } + container connection-point-params { + leaf mgmt-address { + type inet:ip-address; + } + leaf name { + type string; + } + leaf port-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf vm-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + + container service-function-forwarder { + leaf name { + description + "Service Function Forwarder name"; + type string; + } + leaf ip-address { + description + "Data Plane IP Address of the SFF"; + type inet:ip-address; + } + leaf port { + description + "Data Plane Port of the SFF"; + type inet:port-number; + } + } + } + } + } + } + + container ns-instance-opdata { + config false; + + list nsr { + key "ns-instance-config-ref"; + + leaf ns-instance-config-ref { + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + leaf name-ref { + description "Network service name reference"; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:name"; + } + } + + leaf nsd-name-ref { + description "Network service descriptor name reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:name"; + } + } + + + leaf create-time { + description + "Creation timestamp of this Network Service. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + + type uint32; + } + + list connection-point { + description + "List for external connection points. + Each NS has one or more external connection points. + As the name implies that external connection points + are used for connecting the NS to other NS or to + external networks. Each NS exposes these connection + points to the orchestrator. The orchestrator can + construct network service chains by connecting the + connection points between different NS."; + + key "name"; + leaf name { + description + "Name of the NS connection point."; + type string; + } + + leaf type { + description + "Type of the connection point."; + type manotypes:connection-point-type; + } + } + + list vlr { + key "vlr-ref"; + leaf vlr-ref { + description + "Reference to a VLR record in the VLR catalog"; + type leafref { + path "/vlr:vlr-catalog/vlr:vlr/vlr:id"; + } + } + + + list vnfr-connection-point-ref { + description + "A list of references to connection points."; + key "vnfr-id"; + + leaf vnfr-id { + description "A reference to a vnfr"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + + leaf connection-point { + description + "A reference to a connection point name in a vnfr"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr" + + "[vnfr:id = current()/../nsr:vnfr-id]" + + "/vnfr:connection-point/vnfr:name"; + } + } + } + } + + leaf-list constituent-vnfr-ref { + description + "List of VNFRs that are part of this + network service."; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + + uses vnffgr; + + leaf operational-status { + description + "The operational status of the NS instance + init : The network service has just started. + vl-init-phase : The VLs in the NS are being instantiated. + vnf-init-phase : The VNFs in the NS are being instantiated. + running : The NS is in running state. + terminate : The NS is being terminated. + vnf-terminate-phase : The NS is terminating the VNFs in the NS. + vl-terminate-phase : The NS is terminating the VLs in the NS. + terminated : The NS is in the terminated state. + failed : The NS instantiation failed. + "; + + type enumeration { + enum init; + enum vl-init-phase; + enum vnf-init-phase; + enum running; + enum terminate; + enum vnf-terminate-phase; + enum vl-terminate-phase; + enum terminated; + enum failed; + } + } + + leaf config-status { + description + "The configuration status of the NS instance + configuring: At least one of the VNFs in this instance is in configuring state + configured: All the VNFs in this NS instance are configured or config-not-needed state + "; + type config-states; + } + + uses manotypes:monitoring-param; + + list vnf-monitoring-param { + description + "List of VNF monitoring params."; + + key "vnfr-id-ref"; + + leaf vnfr-id-ref { + description + "Reference to vnfr-id"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + + uses manotypes:monitoring-param; + } + + list config-agent-job { + key "job-id"; + + leaf job-id { + description "config agent job Identifier for the NS."; + type uint64; + } + + leaf job-name { + description "Config agent job name"; + type string; + } + + leaf job-status { + description + "Job status to be set based on each VNF primitive execution, + pending - if at least one VNF is in pending state + and remaining VNFs are in success state. + Success - if all VNF executions are in success state + failure - if one of the VNF executions is failure"; + type enumeration { + enum pending; + enum success; + enum failure; + } + } + + list vnfr { + key "id"; + leaf id { + description "Identifier for the VNFR."; + type yang:uuid; + } + leaf vnf-job-status { + description + "Job status to be set based on each VNF primitive execution, + pending - if at least one primitive is in pending state + and remaining primitives are in success state. + Success - if all primitive executions are in success state + failure - if one of the primitive executions is failure"; + type enumeration { + enum pending; + enum success; + enum failure; + } + } + list primitive { + key "name"; + leaf name { + description "the name of the primitive"; + type string; + } + leaf execution-id { + description "Execution id of the primitive"; + type string; + } + leaf execution-status { + description "status of the Execution"; + type enumeration { + enum pending; + enum success; + enum failure; + } + } + leaf execution-error-details { + description "Error details if execution-status is failure"; + type string; + } + } + } + } + } + } + + + rpc get-ns-config-primitive-values { + description "Executes a VNF configuration primitive"; + input { + leaf nsr_id_ref { + description "Reference to NSR ID ref"; + mandatory true; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + leaf name { + description "Name of the NS config primitive group"; + mandatory true; + type string; + } + } + + output { + list ns-parameter { + description "Automatically generated parameter"; + key "name"; + + leaf name { + description "Parameter name which should be pulled from a parameter pool"; + type string; + } + leaf value { + description "Automatically generated value"; + type string; + } + } + + list ns-parameter-group { + description "Automatically generated parameters in parameter group"; + key "name"; + leaf name { + description "Parameter group name"; + type string; + } + list parameter { + description "Automatically generated group parameter"; + key "name"; + + leaf name { + description "Parameter name which should be pulled from a parameter pool"; + type string; + } + leaf value { + description "Automatically generated value"; + type string; + } + } + } + + list vnf-primitive-group { + description + "List of configuration primitives grouped by VNF."; + + key "member-vnf-index-ref"; + leaf member-vnf-index-ref { + description + "Reference to member-vnf within constituent-vnfds"; + type uint64; + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + + type string; + } + + list primitive { + key "index"; + leaf index { + description "Index of this primitive"; + type uint32; + } + + leaf name { + description "Name of the primitive associated with a value pool"; + type string; + } + + list parameter { + description "Automatically generated parameter"; + key "name"; + + leaf name { + description "Parameter name which should be pulled from a parameter pool"; + type string; + } + leaf value { + description "Automatically generated value"; + type string; + } + } + } + } + } + } + + + + rpc exec-ns-config-primitive { + description "Executes a NS configuration primitive or script"; + + input { + leaf name { + description "Name of the primitive"; + type string; + } + + leaf nsr_id_ref { + description "Reference to NSR ID ref"; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + list parameter { + description + "List of NS Primitive parameters"; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf value { + description + "Value associated with the name."; + type string; + } + } + + list parameter-group { + description + "List of NS Primitive parameter groups"; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + list parameter { + description + "List of NS parameter group parameters"; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf value { + description + "Value associated with the name."; + type string; + } + } + } + + list vnf-list { + description + "List of VNFs whose primitives are being set."; + key "member_vnf_index_ref"; + + leaf member_vnf_index_ref { + description "Member VNF index"; + type uint64; + } + + leaf vnfr-id-ref { + description + "A reference to a vnfr. This is a + leafref to path"; + type yang:uuid; + } + + list vnf-primitive { + description + "List of configuration primitives supported by the + configuration agent for this VNF."; + key "index"; + + leaf index { + description + "index of the configuration primitive."; + type uint32; + } + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf value { + description + "Value associated with the name."; + type string; + } + } + } + } + leaf user-defined-script { + description + "A user defined script."; + type string; + } + } + output { + leaf job-id { + description "Job identifier for this RPC"; + type uint64; + } + + leaf name { + description "Name of the config"; + type string; + } + + leaf nsr_id_ref { + description "Reference to NSR ID ref"; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + list vnf-out-list { + description + "List of VNFs whose primitives were set."; + key "member_vnf_index_ref"; + + leaf member_vnf_index_ref { + description "Member VNF index"; + type uint64; + } + leaf vnfr-id-ref { + description + "A reference to a vnfr. This is a + leafref to path"; + type yang:uuid; + } + + list vnf-out-primitive { + description + "List of configuration primitives supported by the + configuration agent for this VNF."; + key "index"; + + leaf index { + description + "index of the configuration primitive."; + type uint32; + } + + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + leaf execution-id { + description "Execution id of this primitive"; + type string; + } + + leaf execution-status { + description "Status of the execution of this primitive"; + type string; + } + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/odl-network-topology.yang b/modules/core/mano/models/plugins/yang/odl-network-topology.yang new file mode 100644 index 0000000..ed15585 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/odl-network-topology.yang @@ -0,0 +1,347 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module odl-network-topology { + yang-version 1; + namespace "urn:TBD:params:xml:ns:yang:network-topology"; + // replace with IANA namespace when assigned + prefix "nt"; + + import ietf-inet-types { prefix "inet"; } + + organization "TBD"; + + contact "WILL-BE-DEFINED-LATER"; + + description + "This module defines a model for the topology of a network. + Key design decisions are as follows: + A topology consists of a set of nodes and links. + Links are point-to-point and unidirectional. + Bidirectional connections need to be represented through + two separate links. + Multipoint connections, broadcast domains etc can be represented + through a hierarchy of nodes, then connecting nodes at + upper layers of the hierarchy."; + + revision 2013-10-21 { + description + "Initial revision."; + } + + typedef topology-id { + type inet:uri; + description + "An identifier for a topology."; + } + + typedef node-id { + type inet:uri; + description + "An identifier for a node in a topology. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same node in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in separate + datastores. An implementation MAY choose to capture semantics + in the identifier, for example to indicate the type of node + and/or the type of topology that the node is a part of."; + } + + + typedef link-id { + type inet:uri; + description + "An identifier for a link in a topology. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same link in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in separate + datastores. An implementation MAY choose to capture semantics + in the identifier, for example to indicate the type of link + and/or the type of topology that the link is a part of."; + } + + typedef tp-id { + type inet:uri; + description + "An identifier for termination points on a node. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same TP in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in separate + datastores. An implementation MAY choose to capture semantics + in the identifier, for example to indicate the type of TP + and/or the type of node and topology that the TP is a part of."; + } + + typedef tp-ref { + type leafref { + path "/network-topology/topology/node/termination-point/tp-id"; + } + description + "A type for an absolute reference to a termination point. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + typedef topology-ref { + type leafref { + path "/network-topology/topology/topology-id"; + } + description + "A type for an absolute reference a topology instance."; + } + + typedef node-ref { + type leafref { + path "/network-topology/topology/node/node-id"; + } + description + + "A type for an absolute reference to a node instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + + typedef link-ref { + type leafref { + path "/network-topology/topology/link/link-id"; + } + description + "A type for an absolute reference a link instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + + grouping tp-attributes { + description + "The data objects needed to define a termination point. + (This only includes a single leaf at this point, used + to identify the termination point.) + Provided in a grouping so that in addition to the datastore, + the data can also be included in notifications."; + leaf tp-id { + type tp-id; + } + leaf-list tp-ref { + type tp-ref; + config false; + description + "The leaf list identifies any termination points that the + termination point is dependent on, or maps onto. + Those termination points will themselves be contained + in a supporting node. + This dependency information can be inferred from + the dependencies between links. For this reason, + this item is not separately configurable. Hence no + corresponding constraint needs to be articulated. + The corresponding information is simply provided by the + implementing system."; + } + } + + grouping node-attributes { + description + "The data objects needed to define a node. + The objects are provided in a grouping so that in addition to + the datastore, the data can also be included in notifications + as needed."; + + leaf node-id { + type node-id; + description + "The identifier of a node in the topology. + A node is specific to a topology to which it belongs."; + } + list supporting-node { + description + "This list defines vertical layering information for nodes. + It allows to capture for any given node, which node (or nodes) + in the corresponding underlay topology it maps onto. + A node can map to zero, one, or more nodes below it; + accordingly there can be zero, one, or more elements in the list. + If there are specific layering requirements, for example + specific to a particular type of topology that only allows + for certain layering relationships, the choice + below can be augmented with additional cases. + A list has been chosen rather than a leaf-list in order + to provide room for augmentations, e.g. for + statistics or priorization information associated with + supporting nodes."; + // This is not what was published in the initial draft, + // added topology-ref leaf and added it to the key + key "topology-ref node-ref"; + leaf topology-ref { + type topology-ref; + } + leaf node-ref { + type node-ref; + } + } + } + + grouping link-attributes { + // This is a grouping, not defined inline with the link definition itself, + // so it can be included in a notification, if needed + leaf link-id { + type link-id; + description + "The identifier of a link in the topology. + A link is specific to a topology to which it belongs."; + } + container source { + leaf source-node { + mandatory true; + type node-ref; + description + "Source node identifier, must be in same topology."; + } + leaf source-tp { + type tp-ref; + description + "Termination point within source node that terminates the link."; + + } + } + container destination { + leaf dest-node { + mandatory true; + type node-ref; + description + "Destination node identifier, must be in same topology."; + } + leaf dest-tp { + type tp-ref; + description + "Termination point within destination node that terminates the link."; + } + } + list supporting-link { + key "link-ref"; + leaf link-ref { + type link-ref; + } + } + } + + + container network-topology { + list topology { + description " + This is the model of an abstract topology. + A topology contains nodes and links. + Each topology MUST be identified by + unique topology-id for reason that a network could contain many + topologies. + "; + key "topology-id"; + leaf topology-id { + type topology-id; + description " + It is presumed that a datastore will contain many topologies. To + distinguish between topologies it is vital to have UNIQUE + topology identifiers. + "; + } + leaf server-provided { + type boolean; + config false; + description " + Indicates whether the topology is configurable by clients, + or whether it is provided by the server. This leaf is + + populated by the server implementing the model. + It is set to false for topologies that are created by a client; + it is set to true otherwise. If it is set to true, any + attempt to edit the topology MUST be rejected. + "; + } + container topology-types { + description + "This container is used to identify the type, or types + (as a topology can support several types simultaneously), + of the topology. + Topology types are the subject of several integrity constraints + that an implementing server can validate in order to + maintain integrity of the datastore. + Topology types are indicated through separate data nodes; + the set of topology types is expected to increase over time. + To add support for a new topology, an augmenting module + needs to augment this container with a new empty optional + container to indicate the new topology type. + The use of a container allows to indicate a subcategorization + of topology types. + The container SHALL NOT be augmented with any data nodes + that serve a purpose other than identifying a particular + topology type. + "; + } + list underlay-topology { + key "topology-ref"; + leaf topology-ref { + type topology-ref; + } + // a list, not a leaf-list, to allow for potential augmentation + // with properties specific to the underlay topology, + // such as statistics, preferences, or cost. + description + "Identifies the topology, or topologies, that this topology + is dependent on."; + } + + list node { + description "The list of network nodes defined for the topology."; + key "node-id"; + uses node-attributes; + must "boolean(../underlay-topology[*]/node[./supporting-nodes/node-ref])"; + // This constraint is meant to ensure that a referenced node is in fact + // a node in an underlay topology. + list termination-point { + description + + "A termination point can terminate a link. + Depending on the type of topology, a termination point could, + for example, refer to a port or an interface."; + key "tp-id"; + uses tp-attributes; + } + } + + list link { + description " + A Network Link connects a by Local (Source) node and + a Remote (Destination) Network Nodes via a set of the + nodes' termination points. + As it is possible to have several links between the same + source and destination nodes, and as a link could potentially + be re-homed between termination points, to ensure that we + would always know to distinguish between links, every link + is identified by a dedicated link identifier. + Note that a link models a point-to-point link, not a multipoint + link. + Layering dependencies on links in underlay topologies are + not represented as the layering information of nodes and of + termination points is sufficient. + "; + key "link-id"; + uses link-attributes; + must "boolean(../underlay-topology/link[./supporting-link])"; + // Constraint: any supporting link must be part of an underlay topology + must "boolean(../node[./source/source-node])"; + // Constraint: A link must have as source a node of the same topology + must "boolean(../node[./destination/dest-node])"; + // Constraint: A link must have as source a destination of the same topology + must "boolean(../node/termination-point[./source/source-tp])"; + // Constraint: The source termination point must be contained in the source node + must "boolean(../node/termination-point[./destination/dest-tp])"; + // Constraint: The destination termination point must be contained + // in the destination node + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/pnfd.yang b/modules/core/mano/models/plugins/yang/pnfd.yang new file mode 100755 index 0000000..077af1e --- /dev/null +++ b/modules/core/mano/models/plugins/yang/pnfd.yang @@ -0,0 +1,92 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module pnfd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:pnfd"; + prefix "pnfd"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Physical Network Function Descriptor (PNFD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container pnfd-catalog { + + list pnfd { + key "id"; + + leaf id { + description "Identifier for the PNFD."; + type yang:uuid; + } + + leaf name { + description "PNFD name."; + type string; + } + + leaf short-name { + description "PNFD short name."; + type string; + } + + leaf vendor { + description "Vendor of the PNFD."; + type string; + } + + leaf description { + description "Description of the PNFD."; + type string; + } + + leaf version { + description "Version of the PNFD"; + type string; + } + + list connection-point { + description + "List for external connection points. Each PNF has one or more external + connection points."; + key "id"; + leaf id { + description + "Identifier for the external connection points"; + type uint64; + } + + leaf cp-type { + description + "Type of the connection point."; + type manotypes:connection-point-type; + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-nsd.tailf.yang b/modules/core/mano/models/plugins/yang/rw-nsd.tailf.yang new file mode 100644 index 0000000..d4a7c1e --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsd-annotation"; + prefix "rw-nsd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-nsd { + prefix rw-nsd; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-nsd.yang b/modules/core/mano/models/plugins/yang/rw-nsd.yang new file mode 100755 index 0000000..b53a627 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsd.yang @@ -0,0 +1,45 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsd +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsd"; + prefix "rw-nsd"; + + import nsd { + prefix "nsd"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO NSD"; + reference + "Derived from earlier versions of base YANG files"; + } + + augment /nsd:nsd-catalog/nsd:nsd { + uses manotypes:control-param; + uses manotypes:action-param; + leaf meta { + description + "Any meta-data needed by the UI"; + type string; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-nsr.tailf.yang b/modules/core/mano/models/plugins/yang/rw-nsr.tailf.yang new file mode 100644 index 0000000..c3bb827 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsr.tailf.yang @@ -0,0 +1,38 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsr-annotation"; + prefix "rw-nsr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + + import rw-nsr { + prefix rw-nsr; + } + + import nsr { + prefix nsr; + } + + tailf:annotate "/nsr:ns-instance-opdata/nsr:nsr/rw-nsr:operational-events" { + tailf:callpoint base_show; + } + + tailf:annotate "/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-nsr.yang b/modules/core/mano/models/plugins/yang/rw-nsr.yang new file mode 100755 index 0000000..4231e80 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsr.yang @@ -0,0 +1,287 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsr"; + prefix "rw-nsr"; + + import mano-types { + prefix "manotypes"; + } + + import nsr { + prefix "nsr"; + } + + import nsd { + prefix "nsd"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping operational-events { + list operational-events { + key "id"; + description + "Recent operational events for this network service. + Though the model does not impose any restrictions on the numbe of events, + the max operational events will be limited to the most recent 10"; + + leaf id { + description "The id of the instance"; + type uint64; + } + + leaf timestamp { + description + "The timestamp of this event expressed as seconds since + unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + leaf event { + description "Short description of the event"; + type string; + } + leaf description { + description + "The description of this event"; + type string; + } + } + } + + grouping nsd-ref-count { + list nsd-ref-count { + key "nsd-id-ref"; + description "This table maintains the number of NSRs used by each NSD"; + + leaf nsd-id-ref { + description "Reference to NSD"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:id"; + } + } + leaf instance-ref-count { + description + "Reference count for the number of NSRs refering this NSD. + Every NS record instantiated using this descriptor takes + a reference on the NSD and releases the reference when the + network service is terminated. This desciptor cannot be + deleted when this counter is non zero"; + type uint64; + } + } + } + + augment /nsr:ns-instance-config/nsr:nsr { + leaf cloud-account { + description + "The configured cloud account which the NSR is instantiated within. + All VDU's, Virtual Links, and provider networks will be requested + using the cloud-account's associated CAL instance"; + type leafref { + path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name"; + } + } + + leaf om-datacenter { + description + "Openmano datacenter name to use when instantiating + the network service. This is only used when openmano + is selected as the cloud account. This should be superceded + by multiple cloud accounts when that becomes available."; + type string; + } + } + + augment /nsr:ns-instance-opdata/nsr:nsr { + uses manotypes:action-param; + uses manotypes:control-param; + + leaf cloud-account { + description + "The configured cloud account which the NSR is instantiated within. + All VDU's, Virtual Links, and provider networks will be requested + using the cloud-account's associated CAL instance"; + type leafref { + path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name"; + } + } + + container nfvi-metrics { + container vm { + leaf label { + description + "Label to show in UI"; + type string; + default "VM"; + } + + leaf active-vm { + description + "The number of active VMs."; + type uint64; + } + + leaf inactive-vm { + description + "The number of inactive VMs."; + type uint64; + } + } + + uses manotypes:nfvi-metrics; + } + + container epa-param { + container ovs-acceleration { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "OVS ACCELERATION"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + } + + container ovs-offload { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "OVS OFFLOAD"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + + } + + container ddio { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "DDIO"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + + } + + container cat { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "CAT"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + } + + container cmt { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "CMT"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + + } + } + uses operational-events; + } + + augment /nsr:ns-instance-opdata { + uses nsd-ref-count; + } + + augment /nsr:ns-instance-config { + leaf nfvi-polling-period { + description + "Defines the period (secons) that the NFVI metrics are polled at"; + type uint64; + default 4; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-topology.tailf.yang b/modules/core/mano/models/plugins/yang/rw-topology.tailf.yang new file mode 100644 index 0000000..de33abe --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-topology.tailf.yang @@ -0,0 +1,34 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-topology-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-topology"; + prefix "rw-topology-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + +} diff --git a/modules/core/mano/models/plugins/yang/rw-topology.yang b/modules/core/mano/models/plugins/yang/rw-topology.yang new file mode 100755 index 0000000..e2276ae --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-topology.yang @@ -0,0 +1,114 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-topology { + namespace "http://riftio.com/ns/riftware-1.0/rw-topology"; + prefix rw-topology; + + import ietf-inet-types {prefix inet;} + import ietf-network {prefix nw;} + import ietf-network-topology {prefix nt;} + import ietf-l2-topology {prefix l2t;} + + revision "2015-10-20" { + description "Initial revision of IP level addressing for L2 host topology"; + } + + grouping ip-address-group { + description "IP addresses if present for L2 termination points"; + container ip-attributes { + description "L2 termination points containing IP addresses"; + list ip-addresses { + key ip; + leaf ip { + type inet:ip-address; + description "IPv4 or IPv6 address"; + } + } + } + } // grouping ip-address-group + + + grouping rw-network-attributes { + description "RW Topology scope attributes"; + container rw-network-attributes { + description "Containing RW network attributes"; + leaf name { + type string; + description "Name of the RW Topology network"; + } + leaf sdn-account-name { + type string; + description "Name of the SDN account from which topology is got"; + } + } + } + + grouping rw-node-attributes { + description "RW node attributes"; + container rw-node-attributes { + description "Containing RW node attributes"; + leaf name { + type string; + description "Node name"; + } + leaf ovs-bridge-name { + type string; + description "Name of OVS bridge"; + } + } + } + + grouping rw-link-attributes { + description "RW link attributes"; + container rw-link-attributes { + description "Containing RW link attributes"; + leaf name { + type string; + description "Link name"; + } + } + } + + grouping rw-termination-point-attributes { + description "RW termination point attributes"; + container rw-termination-point-attributes { + description "Containing RW TP attributes"; + leaf description { + type string; + description "Port description"; + } + uses ip-address-group; + } + } + + augment "/nw:network" { + description + "Configuration parameters for the RW network + as a whole"; + uses rw-network-attributes; + } + + augment "/nw:network/nw:node" { + description + "Configuration parameters for RW at the node + level"; + uses rw-node-attributes; + } + + augment "/nw:network/nt:link" { + description "Augment RW topology link information"; + uses rw-link-attributes; + } + + augment "/nw:network/nw:node/nt:termination-point" { + description + "Augment RW topology termination point configuration"; + uses rw-termination-point-attributes; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vld.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vld.tailf.yang new file mode 100644 index 0000000..30840fd --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vld.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vld-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vld-annotation"; + prefix "rw-vld-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vld { + prefix rw-vld; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vld.yang b/modules/core/mano/models/plugins/yang/rw-vld.yang new file mode 100755 index 0000000..4525a6f --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vld.yang @@ -0,0 +1,27 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vld +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vld"; + prefix "rw-vld"; + + import vld { + prefix "vld"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VLD"; + reference + "Derived from earlier versions of base YANG files"; + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-vlr.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vlr.tailf.yang new file mode 100644 index 0000000..bbd4238 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vlr.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vlr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vlr-annotation"; + prefix "rw-vlr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vlr { + prefix rw-vlr; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vlr.yang b/modules/core/mano/models/plugins/yang/rw-vlr.yang new file mode 100755 index 0000000..4d5c125 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vlr.yang @@ -0,0 +1,55 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vlr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vlr"; + prefix "rw-vlr"; + + import mano-types { + prefix "manotypes"; + } + + import vlr { + prefix "vlr"; + } + + import rw-cloud { + prefix "rwcloud"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-09-30 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + augment /vlr:vlr-catalog/vlr:vlr { + leaf cloud-account { + description + "The cloud account to use when requesting resources for + this vlr"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + leaf network_pool { + description "The network pool the resource was allocated from."; + type string; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-vnfd.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vnfd.tailf.yang new file mode 100644 index 0000000..58049ee --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd-annotation"; + prefix "rw-vnfd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vnfd { + prefix rw-vnfd; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vnfd.yang b/modules/core/mano/models/plugins/yang/rw-vnfd.yang new file mode 100755 index 0000000..f5c0947 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfd.yang @@ -0,0 +1,105 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfd +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd"; + prefix "rw-vnfd"; + + import vnfd { + prefix "vnfd"; + } + + import rwvcs-types { + prefix "rwvcstypes"; + } + + import rw-pb-ext { prefix "rwpb"; } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + augment /vnfd:vnfd-catalog/vnfd:vnfd { + uses manotypes:control-param; + uses manotypes:action-param; + leaf meta { + description + "Any meta-data needed by the UI"; + type string; + } + list component { + description + "This section defines the RIFT.ware + virtual components"; + key "component-name"; + rwpb:msg-new VcsComponent; + rwpb:application-request-point; + + leaf component-name { + description ""; + type string; + } + + leaf component-type { + description ""; + type rwvcstypes:component_type; + mandatory true; + } + + choice component { + case rwvcs-rwcollection { + uses rwvcstypes:rwvcs-rwcollection; + } + case rwvcs-rwvm { + uses rwvcstypes:rwvcs-rwvm; + } + case rwvcs-rwproc { + uses rwvcstypes:rwvcs-rwproc; + } + case native-proc { + uses rwvcstypes:native-proc; + } + case rwvcs-rwtasklet { + uses rwvcstypes:rwvcs-rwtasklet; + } + } + } // list component + } + + augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu { + leaf vcs-component-ref { + description + "This defines the software components using the + RIFT.ware Virtual Component System (VCS). This + also allows specifying a state machine during + the VM startup. + NOTE: This is an significant addition to MANO, + since MANO doesn't clearly specify a method to + identify various software components in a VM. + Also using a state machine is not something that + is well described in MANO."; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/rw-vnfd:component/rw-vnfd:component-name"; + } + } + } +} +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-vnfr.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vnfr.tailf.yang new file mode 100644 index 0000000..a3d2756 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfr.tailf.yang @@ -0,0 +1,37 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfr-annotation"; + prefix "rw-vnfr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vnfr { + prefix rw-vnfr; + } + + import vnfr { + prefix vnfr; + } + + tailf:annotate "/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" { + tailf:callpoint base_show; + } + + tailf:annotate "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:operational-events" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vnfr.yang b/modules/core/mano/models/plugins/yang/rw-vnfr.yang new file mode 100755 index 0000000..df67ce5 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfr.yang @@ -0,0 +1,261 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfr"; + prefix "rw-vnfr"; + + import mano-types { + prefix "manotypes"; + } + + import rw-pb-ext { prefix "rwpb"; } + + import vnfr { + prefix "vnfr"; + } + + import vnfd { + prefix "vnfd"; + } + + import rw-cloud { + prefix "rwcloud"; + } + + import rwvcs-types { + prefix "rwvcstypes"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping vnfr-operational-events { + list operational-events { + key "id"; + description + "Recent operational events for VNFR + Though the model does not impose any restrictions on the numbe of events, + the max operational events will be limited to the most recent 10"; + + leaf id { + description "The id of the instance"; + type uint64; + } + + leaf timestamp { + description + "The timestamp of this event expressed as seconds since + unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + leaf event { + description "The event"; + type enumeration { + rwpb:enum-type "VnfrOperationalEvent"; + enum instantiate-rcvd; + enum vl-inited; + enum vnf-inited; + enum running; + enum terminate-rcvd; + enum vnf-terminated; + enum vl-terminated; + enum terminated; + } + } + leaf description { + description + "The description of this event"; + type string; + } + } + } + + grouping vdur-operational-events { + list operational-events { + key "id"; + description + "Recent operational events for VDUR + Though the model does not impose any restrictions on the numbe of events, + the max operational events will be limited to the most recent 10"; + + leaf id { + description "The id of the instance"; + type uint64; + } + + leaf timestamp { + description + "The timestamp of this event expressed as seconds since + unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + leaf event { + description "The event"; + type enumeration { + rwpb:enum-type "VdurOperationalEvent"; + enum instantiate-rcvd; + enum vm-allocation-requested; + enum running; + enum terminate-rcvd; + enum vm-terminate-requested; + enum terminated; + } + } + leaf description { + description + "The description of this event"; + type string; + } + } + } + + augment /vnfr:vnfr-catalog/vnfr:vnfr { + uses manotypes:action-param; + uses manotypes:control-param; + + leaf cloud-account { + description + "The cloud account to use when requesting resources for + this vnf"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + container nfvi-metrics { + container vm { + leaf label { + description + "Label to show in UI"; + type string; + default "VM"; + } + + leaf active-vm { + description + "The number of active VMs."; + type uint64; + } + + leaf inactive-vm { + description + "The number of inactive VMs."; + type uint64; + } + } + + uses manotypes:nfvi-metrics; + } + + list component { + description + "This section defines the RIFT.ware + virtual components"; + key "component-name"; + rwpb:msg-new VcsComponentOp; + rwpb:application-request-point; + + leaf component-name { + description ""; + type string; + } + + leaf component-type { + description ""; + type rwvcstypes:component_type; + mandatory true; + } + + choice component { + case rwvcs-rwcollection { + uses rwvcstypes:rwvcs-rwcollection; + } + case rwvcs-rwvm { + uses rwvcstypes:rwvcs-rwvm; + } + case rwvcs-rwproc { + uses rwvcstypes:rwvcs-rwproc; + } + case native-proc { + uses rwvcstypes:native-proc; + } + case rwvcs-rwtasklet { + uses rwvcstypes:rwvcs-rwtasklet; + } + } + } // list component + uses vnfr-operational-events; + } + + augment /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur { + leaf vm-pool { + description + "The pool from which this vm was allocated from"; + type string; + } + + container nfvi-metrics { + uses manotypes:nfvi-metrics; + } + + leaf vcs-component-ref { + description + "This defines the software components using the + RIFT.ware Virtual Component System (VCS). This + also allows specifying a state machine during + the VM startup. + NOTE: This is an significant addition to MANO, + since MANO doesn't clearly specify a method to + identify various software components in a VM. + Also using a state machine is not something that + is well described in MANO."; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:component/rw-vnfr:component-name"; + } + } + uses vdur-operational-events; + } + grouping vnfd-ref-count { + list vnfd-ref-count { + key "vnfd-id-ref"; + description "This table maintains the number of VNFRs used by each VNFD"; + + leaf vnfd-id-ref { + description "Reference to VNFD"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + leaf instance-ref-count { + description + "Reference count for the number of VNFRs refering this VNFD. + Every VNF Record instantiated using this descriptor takes + a reference on the VNFD and releases the reference when the + virtual network service is terminated. This desciptor cannot + be deleted when this counter is non zero"; + type uint64; + } + } + } + augment /vnfr:vnfr-catalog { + uses vnfd-ref-count; + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/vld.tailf.yang b/modules/core/mano/models/plugins/yang/vld.tailf.yang new file mode 100644 index 0000000..4d772e8 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vld.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vld-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vld-annotation"; + prefix "vld-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vld { + prefix vld; + } +} diff --git a/modules/core/mano/models/plugins/yang/vld.yang b/modules/core/mano/models/plugins/yang/vld.yang new file mode 100755 index 0000000..9608dfa --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vld.yang @@ -0,0 +1,129 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vld +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vld"; + prefix "vld"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import vnfd { + prefix "vnfd"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Link Descriptor (VLD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container vld-catalog { + + list vld { + key "id"; + + leaf id { + description "Identifier for the VLD."; + type yang:uuid; + } + + leaf name { + description "Virtual Link Descriptor (VLD) name."; + type string; + } + + leaf short-name { + description "Short name for VLD for UI"; + type string; + } + + leaf vendor { + description "Provider of the VLD."; + type string; + } + + leaf description { + description "Description of the VLD."; + type string; + } + + leaf version { + description "Version of the VLD"; + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + list vnfd-connection-point-ref { + description + "A list of references to connection points."; + key "vnfd-ref member-vnf-index-ref"; + + leaf vnfd-ref { + description "A reference to a vnfd"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + + leaf member-vnf-index-ref { + description + "A reference to the consituent-vnfd id in nsd. + Should have been a leafref to: + '/nsd:nsd-catalog:/nsd:nsd/constituent-vnfd/member-vnf-index-ref'. + Instead using direct leaf to avoid circular reference."; + type uint64; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name in a vnfd"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd" + + "[vnfd:id = current()/../vld:vnfd-ref]" + + "/vnfd:connection-point/vnfd:name"; + } + } + } + + // replicate for pnfd container here + uses manotypes:provider-network; + } + } +} diff --git a/modules/core/mano/models/plugins/yang/vlr.cli.xml b/modules/core/mano/models/plugins/yang/vlr.cli.xml new file mode 100755 index 0000000..e2e54fa --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vlr.cli.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/modules/core/mano/models/plugins/yang/vlr.tailf.yang b/modules/core/mano/models/plugins/yang/vlr.tailf.yang new file mode 100644 index 0000000..b12a2d7 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vlr.tailf.yang @@ -0,0 +1,28 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vlr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vlr-annotation"; + prefix "vlr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vlr { + prefix vlr; + } + tailf:annotate "/vlr:vlr-catalog" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/vlr.yang b/modules/core/mano/models/plugins/yang/vlr.yang new file mode 100755 index 0000000..89a3299 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vlr.yang @@ -0,0 +1,159 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vlr +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vlr"; + prefix "vlr"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + import vld { + prefix "vld"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Link Record (VLR)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container vlr-catalog { + config false; + + list vlr { + key "id"; + unique "name"; + + leaf id { + description "Identifier for the VLR."; + type yang:uuid; + } + + leaf name { + description "VLR name."; + type string; + } + + leaf res-id { + description "Identifier for resmgr id mapping"; + type yang:uuid; + } + + leaf short-name { + description "Short name for VLR for UI"; + type string; + } + + leaf vendor { + description "Provider of the VLR."; + type string; + } + + leaf description { + description "Description of the VLR."; + type string; + } + + leaf version { + description "Version of the VLR"; + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + leaf create-time { + description + "Creation timestamp of this Virtual Link. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + + type uint32; + } + + leaf vld-ref { + description "Reference to VLD"; + type leafref { + path "/vld:vld-catalog/vld:vld/vld:id"; + } + } + + leaf network-id { + description + "Identifier for the allocated network resource."; + type string; + } + + // replicate for pnfd container here + + uses manotypes:provider-network; + + leaf status { + description + "Status of the virtual link record."; + type enumeration { + enum LINK_UP; + enum DEGRADED; + enum LINK_DOWN; + } + } + leaf operational-status { + description + "The operational status of the Virtual Link + init : The VL is in init stat. + vl-alloc-pending : The VL alloc is pending in VIM + running : The VL is up and running in VM + vl-terminate-pending : The VL is being terminated in VIM. + terminated : The VL is terminated in the VM. + failed : The VL instantiation failed in VIM. + "; + + type enumeration { + rwpb:enum-type "VlOperationalStatus"; + enum init; + enum vl-alloc-pending; + enum running; + enum vl-terminate-pending; + enum terminated; + enum failed; + } + } + } + } +} + diff --git a/modules/core/mano/models/plugins/yang/vnfd.tailf.yang b/modules/core/mano/models/plugins/yang/vnfd.tailf.yang new file mode 100644 index 0000000..760d78c --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vnfd-annotation"; + prefix "vnfd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vnfd { + prefix vnfd; + } +} diff --git a/modules/core/mano/models/plugins/yang/vnfd.yang b/modules/core/mano/models/plugins/yang/vnfd.yang new file mode 100755 index 0000000..fe627ff --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfd.yang @@ -0,0 +1,461 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfd"; + prefix "vnfd"; + + import mano-types { + prefix "manotypes"; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import ietf-inet-types { + prefix "inet"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Network Function (VNF)"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping virtual-interface { + container virtual-interface { + description + "Container for the virtual interface properties"; + + leaf type { + description + "Specifies the type of virtual interface + between VM and host. + VIRTIO : Use the traditional VIRTIO interface. + PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface. + SR-IOV : Use SR-IOV interface. + OM-MGMT : Used to specify openmano mgmt external-connection type"; + + type enumeration { + enum OM-MGMT; + enum PCI-PASSTHROUGH; + enum SR-IOV; + enum VIRTIO; + } + default "VIRTIO"; + } + + leaf vpci { + description + "Specifies the virtual PCI address. Expressed in + the following format dddd:dd:dd.d. For example + 0000:00:12.0. This information can be used to + pass as metadata during the VM creation."; + type string; + } + + leaf bandwidth { + description + "Aggregate bandwidth of the NIC."; + type uint64; + } + } + } + + container vnfd-catalog { + + description + "Virtual Network Function Descriptor (VNFD)."; + + list vnfd { + key "id"; + + leaf id { + description "Identifier for the VNFD."; + type yang:uuid; + } + + leaf name { + description "VNFD name."; + mandatory true; + type string; + } + + leaf short-name { + description "VNFD short name."; + type string; + } + + leaf vendor { + description "Vendor of the VNFD."; + type string; + } + + leaf logo { + description + "Vendor logo for the Virtual Network Function"; + type string; + } + + leaf description { + description "Description of the VNFD."; + type string; + } + + leaf version { + description "Version of the VNFD"; + type string; + } + + container mgmt-interface { + description + "Interface over which the VNF is managed."; + + choice endpoint-type { + description + "Indicates the type of management endpoint."; + + case ip { + description + "Specifies the static IP address for managing the VNF."; + leaf ip-address { + type inet:ip-address; + } + } + + case vdu-id { + description + "Use the default management interface on this VDU."; + leaf vdu-id { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:id"; + } + } + } + + case cp { + description + "Use the ip address associated with this connection point."; + leaf cp { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:connection-point/vnfd:name"; + } + } + } + } + + leaf port { + description + "Port for the management interface."; + type inet:port-number; + } + + choice interface-type { + // TODO: This needs to be moved to NSD. + case netconf { + leaf netconf { + type empty; + } + } + } + + container dashboard-params { + description "Parameters for the VNF dashboard"; + + leaf path { + description "The HTTP path for the dashboard"; + type string; + } + + leaf https { + description "Pick HTTPS instead of HTTP , Default is false"; + type boolean; + } + + leaf port { + description "The HTTP port for the dashboard"; + type inet:port-number; + } + } + } + + list internal-vld { + key "id"; + description + "List of Internal Virtual Link Descriptors (VLD). + The internal VLD describes the basic topology of + the connectivity (e.g. E-LAN, E-Line, E-Tree) + between internal VNF components of the system."; + + leaf id { + description "Identifier for the VLD"; + type yang:uuid; + } + + leaf name { + description "Name of the internal VLD"; + type string; + } + + leaf short-name { + description "Short name of the internal VLD"; + type string; + } + + leaf description { + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + leaf-list internal-connection-point-ref { + type leafref { + path "../../vdu/internal-connection-point/id"; + } + } + + uses manotypes:provider-network; + } + + list connection-point { + key "name"; + description + "List for external connection points. Each VNF has one + or more external connection points. As the name + implies that external connection points are used for + connecting the VNF to other VNFs or to external networks. + Each VNF exposes these connection points to the + orchestrator. The orchestrator can construct network + services by connecting the connection points between + different VNFs. The NFVO will use VLDs and VNFFGs at + the network service level to construct network services."; + + leaf name { + description "Name of the connection point"; + type string; + } + + leaf short-name { + description "Short name of the connection point"; + type string; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + } + + list vdu { + description "List of Virtual Deployment Units"; + key "id"; + + leaf id { + description "Unique id for the VDU"; + type yang:uuid; + } + + leaf name { + description "Unique name for the VDU"; + type string; + } + + leaf description { + description "Description of the VDU."; + type string; + } + + leaf count { + description "Number of instances of VDU"; + type uint64; + } + + leaf mgmt-vpci { + description + "Specifies the virtual PCI address. Expressed in + the following format dddd:dd:dd.d. For example + 0000:00:12.0. This information can be used to + pass as metadata during the VM creation."; + type string; + } + + + uses manotypes:vm-flavor; + uses manotypes:guest-epa; + uses manotypes:vswitch-epa; + uses manotypes:hypervisor-epa; + uses manotypes:host-epa; + + leaf image { + description + "Image name for the software image. + If the image name is found within the VNF packaage it will + be uploaded to all cloud accounts during onboarding process. + Otherwise, the image must be added to the cloud account with + the same name as entered here. + "; + mandatory true; + type string; + } + + leaf image-checksum { + description + "Image md5sum for the software image. + The md5sum, if provided, along with the image name uniquely + identifies an image uploaded to the CAL. + "; + type string; + } + + leaf cloud-init { + description "Content of cloud-init script"; + type string; + } + + list internal-connection-point { + key "id"; + description + "List for internal connection points. Each VNFC + has zero or more internal connection points. + Internal connection points are used for connecting + the VNF components internal to the VNF. If a VNF + has only one VNFC, it may not have any internal + connection points."; + + leaf id { + description "Identifier for the internal connection points"; + type yang:uuid; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + + leaf internal-vld-ref { + type leafref { + path "../../../internal-vld/id"; + } + } + } + + list internal-interface { + description + "List of internal interfaces for the VNF"; + key name; + + leaf name { + description + "Name of internal interface. Note that this + name has only local significance to the VDU."; + type string; + } + + leaf vdu-internal-connection-point-ref { + type leafref { + path "../../internal-connection-point/id"; + } + } + uses virtual-interface; + } + + list external-interface { + description + "List of external interfaces for the VNF. + The external interfaces enable sending + traffic to and from VNF."; + key name; + + leaf name { + description + "Name of the external interface. Note that + this name has only local significance."; + type string; + } + + leaf vnfd-connection-point-ref { + description + "Name of the external connection point."; + type leafref { + path "../../../connection-point/name"; + } + } + uses virtual-interface; + } + } + + list vdu-dependency { + description + "List of VDU dependencies."; + + key vdu-source-ref; + leaf vdu-source-ref { + type leafref { + path "../../vdu/id"; + } + } + + leaf vdu-depends-on-ref { + description + "Reference to the VDU that + source VDU depends."; + type leafref { + path "../../vdu/id"; + } + } + } + + leaf service-function-chain { + description "Type of node in Service Function Chaining Architecture"; + + type enumeration { + enum UNAWARE; + enum CLASSIFIER; + enum SF; + enum SFF; + } + default "UNAWARE"; + } + + leaf service-function-type { + description + "Type of Service Function. + NOTE: This needs to map with Service Function Type in ODL to + support VNFFG. Service Function Type is manadatory param in ODL + SFC. This is temporarily set to string for ease of use"; + type string; + } + + uses manotypes:monitoring-param; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/vnffgd.yang b/modules/core/mano/models/plugins/yang/vnffgd.yang new file mode 100755 index 0000000..b297569 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnffgd.yang @@ -0,0 +1,71 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnffgd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vnffgd"; + prefix "vnffgd"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2014-10-27 { + description + "Initial revision. This YANG file defines + the VNF Forwarding Graph Descriptor (VNFFGD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container vnffgd-catalog { + + list vnffgd { + key "id"; + + leaf name { + description "VNF Forwarding Graph Descriptor name."; + type string; + } + + leaf id { + description "Identifier for the VNFFGD."; + type yang:uuid; + } + + leaf provider { + description "Provider of the VNFFGD."; + type string; + } + + leaf description { + description "Description of the VNFFGD."; + type string; + } + + leaf version { + description "Version of the VNFFGD"; + type string; + } + + //TODO: Add more content here + } + } +} diff --git a/modules/core/mano/models/plugins/yang/vnfr.cli.xml b/modules/core/mano/models/plugins/yang/vnfr.cli.xml new file mode 100755 index 0000000..4f0a109 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfr.cli.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/modules/core/mano/models/plugins/yang/vnfr.tailf.yang b/modules/core/mano/models/plugins/yang/vnfr.tailf.yang new file mode 100644 index 0000000..a1f83aa --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfr.tailf.yang @@ -0,0 +1,29 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vnfr-annotation"; + prefix "vnfr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vnfr { + prefix vnfr; + } + + tailf:annotate "/vnfr:vnfr-catalog" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/vnfr.yang b/modules/core/mano/models/plugins/yang/vnfr.yang new file mode 100755 index 0000000..b38ba21 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfr.yang @@ -0,0 +1,459 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfr +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfr"; + prefix "vnfr"; + + import mano-types { + prefix "manotypes"; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import vnfd { + prefix "vnfd"; + } + + import nsd { + prefix "nsd"; + } + + import vlr { + prefix "vlr"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import ietf-inet-types { + prefix "inet"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Network Function Record (VNFR)"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping virtual-interface { + container virtual-interface { + description + "Container for the virtual interface properties"; + + leaf type { + description + "Specifies the type of virtual interface + between VM and host. + VIRTIO : Use the traditional VIRTIO interface. + PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface. + SR-IOV : Use SR-IOV interface."; + type enumeration { + enum VIRTIO; + enum PCI-PASSTHROUGH; + enum SR-IOV; + } + } + + leaf bandwidth { + description + "Aggregate bandwidth of the NIC."; + type uint64; + } + + leaf ovs-offload { + description + "Defines if the NIC supports OVS offload. + MANDATORY : OVS offload support in the NIC is mandatory. + PREFERRED : OVS offload support in the NIC is preferred."; + type enumeration { + enum MANDATORY; + enum PREFERRED; + } + } + + leaf vendor-id { + description + "Specifies the vendor specific id for + the device. This is used when a NIC from + specific HW vendor is required."; + type string; + } + + leaf datapath-library { + description + "Specifies the name and version of the datapath + library the NIC is expected to support."; + type string; + } + + leaf provider-network-name { + description + "Name of the provider network to which this + NIC is attached."; + type string; + } + } + } + + container vnfr-catalog { + config false; + list vnfr { + description + "Virtual Network Function Record (VNFR)."; + key "id"; + unique "name"; + + leaf id { + description "Identifier for the VNFR."; + type yang:uuid; + } + + leaf member-vnf-index-ref { + description "Reference to member VNF index in Network service."; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf dashboard-url { + description "Dashboard URL"; + type inet:uri; + } + + leaf name { + description "VNFR name."; + type string; + } + + leaf short-name { + description "VNFR short name."; + type string; + } + + leaf vendor { + description "Vendor of the VNFR."; + type string; + } + + leaf description { + description "Description of the VNFR."; + type string; + } + + leaf version { + description "Version of the VNFR"; + type string; + } + + leaf create-time { + description + "Creation timestamp of this Virtual Network + Function. The timestamp is expressed as + seconds since unix epoch - 1970-01-01T00:00:00Z"; + + type uint32; + } + + leaf vnfd-ref { + description "Reference to VNFD"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + + // Use parameters provided here to configure this VNF + uses nsd:vnf-configuration; + + // Mainly used by Mon-params & dashboard url + container mgmt-interface { + leaf ip-address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + + list internal-vlr { + key "vlr-ref"; + + leaf vlr-ref { + description "Reference to a VLR record in the VLR catalog"; + type leafref { + path "/vlr:vlr-catalog/vlr:vlr/vlr:id"; + } + } + + leaf-list internal-connection-point-ref { + type leafref { + path "../../vdur/internal-connection-point/id"; + } + } + } + + list connection-point { + key "name"; + description + "List for external connection points. Each VNF has one + or more external connection points. As the name + implies that external connection points are used for + connecting the VNF to other VNFs or to external networks. + Each VNF exposes these connection points to the + orchestrator. The orchestrator can construct network + services by connecting the connection points between + different VNFs. The NFVO will use VLDs and VNFFGs at + the network service level to construct network services."; + + leaf name { + description "Name of the connection point"; + type string; + } + + leaf short-name { + description "Short name of the connection point"; + type string; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + + leaf vlr-ref { + description + "Reference to the VLR associated with this connection point"; + type leafref { + path "/vlr:vlr-catalog/vlr:vlr/vlr:id"; + } + } + + leaf ip-address { + description + "IP address assigned to the external connection point"; + type inet:ip-address; + } + leaf connection-point-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + } + + list vdur { + description "List of Virtual Deployment Units"; + key "id"; + unique "name"; + + leaf id { + description "Unique id for the VDU"; + type yang:uuid; + } + + leaf name { + description "name of the instantiated VDUR"; + type string; + } + + leaf vdu-id-ref { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd" + + "[vnfd:id = current()/../../vnfr:vnfd-ref]" + + "/vnfd:vdu/vnfd:id"; + } + } + + leaf vim-id { + description "Allocated VM resource id"; + type string; + } + + leaf flavor-id { + description "VIM assigned flavor id"; + type string; + } + + leaf image-id { + description "VIM assigned image id"; + type string; + } + + leaf management-ip { + description "Management IP address"; + type inet:ip-address; + } + + leaf vm-management-ip { + description "VM Private Management IP address"; + type inet:ip-address; + } + + uses manotypes:vm-flavor; + uses manotypes:guest-epa; + uses manotypes:vswitch-epa; + uses manotypes:hypervisor-epa; + uses manotypes:host-epa; + + list internal-connection-point { + key "id"; + description + "List for internal connection points. Each VNFC + has zero or more internal connection points. + Internal connection points are used for connecting + the VNF components internal to the VNF. If a VNF + has only one VNFC, it may not have any internal + connection points."; + + leaf id { + description "Identifier for the internal connection points"; + type yang:uuid; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + + leaf ip-address { + description + "IP address assigned to the external connection point"; + type inet:ip-address; + } + } + + list internal-interface { + description + "List of internal interfaces for the VNF"; + key name; + + leaf name { + description + "Name of internal interface. Note that this + name has only local significance to the VDU."; + type string; + } + + leaf vdur-internal-connection-point-ref { + type leafref { + path "../../internal-connection-point/id"; + } + } + uses virtual-interface; + } + + list external-interface { + description + "List of external interfaces for the VNF. + The external interfaces enable sending + traffic to and from VNF."; + key name; + + leaf name { + description + "Name of the external interface. Note that + this name has only local significance."; + type string; + } + + leaf vnfd-connection-point-ref { + description + "Name of the external connection point."; + type leafref { + path "../../../connection-point/name"; + } + } + uses virtual-interface; + } + leaf operational-status { + description + "The operational status of the VDU + init : The VDU has just started. + vm-init-phase : The VDUs in the VNF is being created in VIM. + vm-alloc-pending : The VM alloc is pending in VIM + running : The VDU is active in VM + terminate : The VDU is being terminated + vm-terminate-phase : The VDU in the VNF is being terminated in VIM. + terminated : The VDU is in the terminated state. + failed : The VDU instantiation failed. + "; + + type enumeration { + rwpb:enum-type "VduOperationalStatus"; + enum init; + enum vm-init-phase; + enum vm-alloc-pending; + enum running; + enum terminate; + enum vl-terminate-phase; + enum terminated; + enum failed; + } + } + } + + uses manotypes:monitoring-param; + + leaf operational-status { + description + "The operational status of the VNFR instance + init : The VNF has just started. + vl-init-phase : The internal VLs in the VNF are being instantiated. + vm-init-phase : The VMs for VDUs in the VNF are being instantiated. + running : The VNF is in running state. + terminate : The VNF is being terminated. + vm-terminate-phase : The VMs in the VNF are being terminated. + vl-terminate-phase : The internal VLs in the VNF are being terminated. + terminated : The VNF is in the terminated state. + failed : The VNF instantiation failed + "; + + type enumeration { + rwpb:enum-type "VnfrOperationalStatus"; + enum init; + enum vl-init-phase; + enum vm-init-phase; + enum running; + enum terminate; + enum vm-terminate-phase; + enum vl-terminate-phase; + enum terminated; + enum failed; + } + } + leaf config-status { + description + "The configuration status of the NS instance + configuring: At least one of the VNFs in this instance is in configuring state + configured: All the VNFs in this NS instance are configured or config-not-needed state + "; + + type enumeration { + enum configuring { + value 1; + } + enum configured { + value 2; + } + enum failed { + value 3; + } + enum config-not-needed { + value 4; + } + } + } + } + } +} + diff --git a/modules/core/mano/rwcm/CMakeLists.txt b/modules/core/mano/rwcm/CMakeLists.txt new file mode 100644 index 0000000..2fe5d3c --- /dev/null +++ b/modules/core/mano/rwcm/CMakeLists.txt @@ -0,0 +1,23 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwcm) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + test + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwcm/plugins/CMakeLists.txt b/modules/core/mano/rwcm/plugins/CMakeLists.txt new file mode 100644 index 0000000..a5203ce --- /dev/null +++ b/modules/core/mano/rwcm/plugins/CMakeLists.txt @@ -0,0 +1,18 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/29/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + yang + rwconman + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwcm/plugins/cli/cli_rwcm.xml b/modules/core/mano/rwcm/plugins/cli/cli_rwcm.xml new file mode 100644 index 0000000..8f2d75b --- /dev/null +++ b/modules/core/mano/rwcm/plugins/cli/cli_rwcm.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/CMakeLists.txt b/modules/core/mano/rwcm/plugins/rwconman/CMakeLists.txt new file mode 100644 index 0000000..b72dee7 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/CMakeLists.txt @@ -0,0 +1,41 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwconmantasklet) +set(CONMAN_INSTALL "etc/conman") + +## +# Install translation script in demos +## +install( + FILES + rift/tasklets/${TASKLET_NAME}/xlate_cfg.py + rift/tasklets/${TASKLET_NAME}/xlate_tags.yml + rift/tasklets/${TASKLET_NAME}/juju_if.py + DESTINATION ${CONMAN_INSTALL} + COMPONENT ${PKG_LONG_NAME}) + + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/rwconman_config.py + rift/tasklets/${TASKLET_NAME}/rwconman_events.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py new file mode 100644 index 0000000..143ffc8 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwconmantasklet import ConfigManagerTasklet diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/juju_if.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/juju_if.py new file mode 100755 index 0000000..f6a2ced --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/juju_if.py @@ -0,0 +1,659 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Part of the code taken from +# https://github.com/chuckbutler/juju_action_api_class/blob/master/juju_actions.py + +""" +This script is used to control a bootstrapped Juju environment +This has been tested only with local environemnt and local charms +Provide a yaml file with the details to this script to execute +Sample yaml file to deploy a service + +ims-a: + deploy: + store: local + directory: /usr/rift/charms/trusty/clearwater-aio-proxy + series: trusty + to: "lxc:0" + + #destroy: true + + # Data under config passed as such during deployment + config: + proxied_ip: 10.0.202.39 + home_domain: "ims.riftio.local" + base_number: "1234567000" + number_count: 1000 + + units: + - unit: + #id: 0 + # Wait for each command to complete + wait: true + # Bail on failure + bail: true + #destroy: true + actions: + - create-user: { number: "1234567001", password: "secret"} + - create-user: { number: "1234567002", password: "secret"} + + - unit: + wait: true + destroy: true + bail: true + actions: + - create-user: { number: "1234567010", password: "secret"} + - create-user: { number: "1234567011", password: "secret"} + +Sample yaml file to destroy a service +clearwater-aio-proxy: + destroy: true + + units: + - unit: + actions: + - delete-user: { number: "1234567001" } + - delete-user: { number: "1234567002" } + - unit: + actions: + - delete-user: { number: "1234567010" } + - delete-user: { number: "1234567011" } +""" + +import logging +import argparse +import yaml +import jujuclient +import sys +import time +import ssl +import os + +ssl_ok = False +ssh_cmd = None +scp_cmd = None + + +class Action(object): + def __init__(self, data): + # I am undecided if we need this + # model_id = "" + self.uuid = data['action']['tag'] + self.data = data # straight from juju api + self.juju_status = data['status'] + + @classmethod + def from_data(cls, data): + o = cls(data=data) + return o + + +def get_service_units(status): + results = {} + services = status.get('Services', {}) + for svc_name, svc_data in services.items(): + units = svc_data['Units'] or {} + sub_to = svc_data['SubordinateTo'] + if not units and sub_to: + for sub in sub_to: + for unit_name, unit_data in \ + (services[sub].get('Units') or {}).items(): + for sub_name, sub_data in \ + (unit_data['Subordinates'] or {}).items(): + if sub_name.startswith(svc_name): + units[sub_name] = sub_data + results[svc_name] = units + return results + + +class ApiEnvironment(jujuclient.Environment): + def actions_available(self, service=None): + args = { + "Type": 'Action', + "Request": 'ServicesCharmActions', + "Params": { + "Entities": [] + } + } + + services = self.status().get('Services', {}) + service_names = [service] if service else services + for name in service_names: + args['Params']['Entities'].append( + { + "Tag": 'service-' + name + } + ) + + return self._rpc(args) + + def actions_list_all(self, service=None): + args = { + "Type": 'Action', + "Request": 'ListAll', + "Params": { + "Entities": [] + } + } + + service_units = get_service_units(self.status()) + service_names = [service] if service else service_units.keys() + units = [] + + for name in service_names: + units += service_units[name].keys() + + for unit in set(units): + args['Params']['Entities'].append( + { + "Tag": "unit-%s" % unit.replace('/', '-'), + } + ) + + return self._rpc(args) + + def actions_enqueue(self, action, receivers, params=None): + args = { + "Type": "Action", + "Request": "Enqueue", + "Params": { + "Actions": [] + } + } + + for receiver in receivers: + args['Params']['Actions'].append({ + "Receiver": receiver, + "Name": action, + "Parameters": params or {}, + }) + + return self._rpc(args) + + def actions_cancel(self, uuid): + return self._rpc({ + 'Type': 'Action', + 'Request': 'Cancel', + "Params": { + "Entities": [{'Tag': 'action-' + uuid}] + } + }) + + +class API(object): + def __init__ (self, args, logger): + logger.debug("Args: %s" % args) + + self.file = args.file + stream = open(self.file) + self.yaml = yaml.load(stream) + stream.close() + + self.ca_cert = args.ca_cert + if args.ca_cert is None: + try: + self.ca_cert = self.yaml['ca-cert'] + except KeyError: + logger.warning("Did not get the CA certificate to use") + + endpoint = 'wss://%s:%d' % (args.server.split()[0], int(args.port)) + logger.info("Juju API endpoint %s" % endpoint) + self.env = ApiEnvironment(endpoint, ca_cert=self.ca_cert) + self.env.login(args.password, user=args.user) + #self.actions=jujuclient.Actions(self.env) + self.logger = logger + if args.file: + logger.debug("File %s" % args.file) + + self.server = args.server + self.user = args.user + self.port = args.port + self.deploy_timeout = args.deploy_timeout + self.password = args.password + self.cur_units = 0 + self.req_units = 0 + self.charm = None + + def get_status(self): + return self.env.status() + + def get_annotations(self, services): + ''' + Return dict of (servicename: annotations) for each servicename + in `services`. + ''' + if not services: + return None + + d = {} + for s in services: + d[s] = self.env.get_annotation(s, 'service')['Annotations'] + return d + + def get_actions(self, service=None): + return self.env.actions_list_all(service) + + def get_action_status(self, action_tag): + ''' + responds with the action status, which is one of three values: + + - completed + - pending + - failed + + @param action_tag - the action UUID return from the enqueue method + eg: action-3428e20d-fcd7-4911-803b-9b857a2e5ec9 + ''' + receiver = self.get_actions() + for receiver in receiver['actions']: + if 'actions' in receiver.keys(): + for action_record in receiver['actions']: + if 'action' in action_record.keys(): + if action_record['action']['tag'] == action_tag: + return action_record['status'] + + def cancel_action(self, uuid): + return self.env.actions_cancel(uuid) + + def get_service_units(self): + return get_service_units(self.env.status()) + + def get_action_specs(self): + results = self.env.actions_available() + return _parse_action_specs(results) + + def enqueue_action(self, action, receivers, params): + result = self.env.actions_enqueue(action, receivers, params) + return Action.from_data(result['results'][0]) + + def apply_config(self, service, details): + if self.cur_units == 0: + # Nothing to do + return + if 'config' in details: + self.logger.debug("Config for %s updated to: %s" % (service, details['config'])) + self.env.set_config(service, details['config']) + else: + self.logger.debug("No config section found for %s" % service) + + def deploy_service(self, service, details): + if self.cur_units == 0: + # No units of the service running + if details['deploy'] is not None: + deploy = details['deploy'] + self.logger.debug("Config used for deployment: %s" % details['config']) + if self.req_units > 0: + # Deploy the service + series = 'trusty' + try: + series = deploy['series'] + except KeyError: + self.logger.debug("Using default series %s" % series) + + store_type = 'online' + try: + store_type = deploy['store'] + except KeyError: + self.logger.debug("Using default store type %s" % store_type) + + deploy_to = None + try: + deploy_to = deploy['to'] + except KeyError: + self.logger.debug("No deploy machine specified") + + config = None + if 'config' in details: + config = details['config'] + self.logger.debug("Config for %s is %s" % (service, config)) + else: + self.logger.debug("No config section found") + + if store_type == 'local': + try: + directory = deploy['directory'] + prefix='' + try: + prefix=os.environ.get('RIFT_INSTALL') + except KeyError: + self.logger.info("RIFT_INSTALL not set in environemnt") + directory = "%s/%s" % (prefix, deploy['directory']) + if ssl_ok: + self.logger.debug("Local charm settings: dir=%s, series=%s" % (directory, series)) + result = self.env.add_local_charm_dir(directory, series) + url = result['CharmURL'] + else: + os.system('%s mkdir -p /home/ubuntu/charms/trusty' % (ssh_cmd)) + os.system('%s %s ubuntu@%s:/home/ubuntu/charms/trusty' % (scp_cmd, directory, self.server)) + + except: + self.logger.critical('Error deploying local charm %s: %s' % (service, sys.exc_info()[0])) + raise + else: + try: + self.logger.debug("Deploying from online") + url = deploy['url'] + except KeyError: + self.logger.critical("Charm url not specified") + raise + + try: + if ssl_ok: + self.logger.debug("Deploying using: service=%s, url=%s, num_units=%d, to=%s, config=%s" %(service, url, self.req_units, deploy_to, details['config'])) + self.env.deploy(service, url, num_units=self.req_units, config=config, machine_spec=deploy_to) + else: + os.system('%s juju deploy --repository=/home/ubuntu/charms --to %s local:trusty/%s %s' % (ssh_cmd, deploy_to, os.path.basename(directory), service)) + # Apply the config + self.apply_config(service, details) + except: + self.logger.critical('Error deploying %s: %s' % (service, sys.exc_info()[0])) + raise + + elif self.cur_units < self.req_units: + try: + self.env.add_units(service, (self.req_units - self.cur_units)) + except: + self.logger.critical('Error adding units for %s: %s' % (self.name, sys.exc_info()[0])) + raise + + # Wait for the deployed units to start + try: + self.logger.debug("Waiting for units to come up") + self.env.wait_for_units(timeout=self.deploy_timeout) + except: + self.logger.critical('Error starting all units for %s: %s' % (service, sys.exc_info()[0])) + raise + + def execute_on_units(self, service, details): + units = None + try: + units = details['units'] + except KeyError: + self.logger.info("No units for service %s defined" % service) + return + self.logger.debug("Service units def: %s" % units) + + try: + services = get_service_units(self.env.status()) + depl_units = services[service] + except KeyError: + self.logger.error("Unable to get units %s" % services) + raise + except: + self.logger.critical("Error on getting service details for service %s" % service) + raise + + # Slightly complicated logic to support define actions for + # specific units. + # Get all the unit definitions specified + units_ids = [] + units_no_ids = [] + for unit_conf in units: + try: + conf_id = unit_conf['id'] + self.logger.debug("Unit conf id %d" % conf_id) + units_ids[conf_id] = unit_conf['unit'] + except KeyError: + units_no_ids.append(unit_conf['unit']) + continue + + # Go through each unit deployed and apply the actions to the unit + # if the id is specified, else the first unit available + no_id_idx = 0 + for unit, status in depl_units.items(): + self.logger.debug("Execute on unit %s with %s" % (unit, status)) + idx = int(unit[unit.index('/')+1:]) + self.logger.debug("Unit index is %d" % idx) + try: + unit_conf = units_ids[idx] + self.logger.debug("Found unit config %s" % unit_conf) + except IndexError: + unit_conf = units_no_ids[no_id_idx] + self.logger.debug("Applying on unit %s" % unit_conf) + no_id_idx += 1 + + bail = False + try: + bail = unit_conf['bail'] + except KeyError: + pass + wait = False + try: + wait = unit_conf['wait'] + except KeyError: + pass + self.logger.debug("Bail is %s, Wait is %s" % (bail, wait)) + + unit_name = "unit-%s-%d" % (service, idx) + for entry in unit_conf['actions']: + for action, params in entry.items(): + self.logger.debug("Sending action: %s, %s, %s" % (action, unit_name, params)) + #result = self.actions.enqueue_units([unit], action, params) + try: + result = self.enqueue_action(action, [unit_name], params) + act_status = self.get_action_status(result.uuid) + except Exception as e: + self.logger.critical("Error applying the action %s on %s with params %s" % (action, unit, params)) + raise e + + self.logger.debug("Action %s status is %s on %s" % (action, act_status, unit)) + while wait and ((act_status == 'pending') or (act_status == 'running')): + time.sleep(1) + act_status = self.get_action_status(result.uuid) + self.logger.debug("Action %s status is %s on %s" % (action, act_status, unit)) + if bail and (act_status == 'failed'): + self.logger.critical("Error applying action %s on %s with %s" % (action, unit, params)) + raise RuntimeError("Error applying action %s on %s with %s" % (action, unit, params)) + + def remove_units(self, service, details): + if self.cur_units == 0: + # Nothing to do + return + try: + units = details['units'] + except KeyError: + self.logger.debug("remove_units: No units specified") + return + + for unit in units: + self.logger.debug("Check destroy units for %s, %s" %(service, unit)) + try: + if unit['destroy'] == False: + continue + except KeyError: + continue + try: + idx = unit['id'] + except KeyError: + self.logger.error("Need to specify unit id to destroy") + continue + + unit = '%s/%d' % (service, idx) + self.logger.debug("Destroy unit %s" % unit) + try: + status = self.env.status()['Services'][service]['Units'][unit] + except KeyError: + status = None + self.logger.debug("Status of unit %s" % status) + if status is None: + continue + unit_name = "unit-%s-%d" %(service, idx) + self.logger.debug("Destroying unit %s" % unit_name) + self.env.remove_units([unit_name]) + + def execute (self): + for service, details in self.yaml.items(): + self.cur_units = 0 + self.req_units = 0 + self.charm = service + try: + self.charm = details['charm'] + except KeyError: + pass + + self.logger.debug("Service: %s - %s" % (service, details)) + services = self.env.status()['Services'] + self.logger.debug("Services : %s" % services) + cur_units = 0 + try: + cur_units = len(services[service]['Units']) + except KeyError: + pass + req_units = 0 + try: + req_units = len(details['units']) + except KeyError: + # Deploy atleast one unit + req_units = 1 + + self.logger.debug("Units requested: %d, deployed: %d" % (req_units, cur_units)) + self.cur_units = cur_units + self.req_units = req_units + destroy = False + try: + destroy = details['destroy'] + except KeyError: + pass + if destroy: + if cur_units == 0: + # Nothing to do + return + # Execute any commands for units before destroy as this could have + # side effects for something like proxy charms + self.execute_on_units(service, details) + self.logger.debug("Destroying service %s" % service) + self.env.destroy_service(service) + return + # Apply config on already running units first + self.apply_config(service, details) + self.deploy_service(service, details) + self.execute_on_units(service, details) + # Removing units after execute to run any cleanup actions + self.remove_units(service, details) + +def _parse_action_specs(api_results): + results = {} + + r = api_results['results'] + for service in r: + servicetag = service['servicetag'] + service_name = servicetag[8:] # remove 'service-' prefix + specs = {} + if service['actions']['ActionSpecs']: + for spec_name, spec_def in \ + service['actions']['ActionSpecs'].items(): + specs[spec_name] = ActionSpec(spec_name, spec_def) + results[service_name] = specs + return results + + +def _parse_action_properties(action_properties_dict): + results = {} + + d = action_properties_dict + for prop_name, prop_def in d.items(): + results[prop_name] = ActionProperty(prop_name, prop_def) + return results + + +class Dict(dict): + def __getattr__(self, name): + return self[name] + + +class ActionSpec(Dict): + def __init__(self, name, data_dict): + params = data_dict['Params'] + super(ActionSpec, self).__init__( + name=name, + title=params['title'], + description=params['description'], + properties=_parse_action_properties(params['properties']) + ) + + +class ActionProperty(Dict): + types = { + 'string': str, + 'integer': int, + 'boolean': bool, + 'number': float, + } + type_checks = { + str: 'string', + int: 'integer', + bool: 'boolean', + float: 'number', + } + + def __init__(self, name, data_dict): + super(ActionProperty, self).__init__( + name=name, + description=data_dict.get('description', ''), + default=data_dict.get('default', ''), + type=data_dict.get( + 'type', self._infer_type(data_dict.get('default'))), + ) + + def _infer_type(self, default): + if default is None: + return 'string' + for _type in self.type_checks: + if isinstance(default, _type): + return self.type_checks[_type] + return 'string' + + def to_python(self, value): + f = self.types.get(self.type) + return f(value) if f else value + + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Perform actions on Juju') + parser.add_argument("-s", "--server", required=True, help="Juju API server") + parser.add_argument("-u", "--user", default='user-admin', help="User, default user-admin") + parser.add_argument("-p", "--password", default='nfvjuju', help="Password for the user") + parser.add_argument("-P", "--port", default="17070", help="Port number, default 17070") + parser.add_argument("-c", "--ca-cert", default=None, help="CA certificate for the server"); + parser.add_argument("-T", "--deploy-timeout", default=600, help="Timeout when bringing up units, default 600") + parser.add_argument("--debug", action="store_true") + parser.add_argument("file", help="File with commands, config parameters and actions") + args = parser.parse_args() + + if args.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.ERROR) + logger = logging.getLogger("juju-client") + + #Workaround for certificae failure, insecure. Does not work with ssl module in Python 3.3 + if sys.version_info >= (3,4): + ssl._create_default_https_context = ssl._create_unverified_context + ssl_ok=True + else: + ssh_cmd = 'ssh -i %s %s@%s' % ('~/.ssh/id_grunt', 'ubuntu', args.server) + scp_cmd = 'scp -r -i %s %s@%s' % ('~/.ssh/id_grunt', 'ubuntu', args.server) + + ssl_ok=False + api = API(args, logger) + api.execute() \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py new file mode 100644 index 0000000..c3f7ee7 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py @@ -0,0 +1,953 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import sys +import subprocess +import yaml +import os + +from gi.repository import ( + RwDts as rwdts, + RwConmanYang as conmanY, + ProtobufC, +) + +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + +def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index): + return "{}.{}.{}".format(nsr_name, vnfr_short_name, member_vnf_index) + +class RaiseException(Exception): + pass + +def log_this_vnf(vnf_cfg): + log_vnf = "" + used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address'] + for item in used_item_list: + if item in vnf_cfg: + if item == 'mgmt_ip_address': + log_vnf += "({})".format(vnf_cfg[item]) + else: + log_vnf += "{}/".format(vnf_cfg[item]) + return log_vnf + +class ConfigManagerConfig(object): + def __init__(self, dts, log, loop, parent): + self._dts = dts + self._log = log + self._loop = loop + self._parent = parent + self._nsr_dict = {} + self.pending_cfg = {} + self.terminate_cfg = {} + self.pending_tasks = [] # User for NSRid get retry (mainly excercised at restart case) + self._config_xpath = "C,/cm-config" + self._opdata_xpath = "D,/rw-conman:cm-state" + + self.cm_config = conmanY.SoConfig() + # RO specific configuration + self.ro_config = {} + for key in self.cm_config.ro_endpoint.fields: + self.ro_config[key] = None + + # Initialize cm-state + self.cm_state = {} + self.cm_state['cm_nsr'] = [] + self.cm_state['states'] = "Initialized" + + def add_to_pending_tasks(self, task): + try: + self.pending_tasks.append(task) + if len(self.pending_tasks) == 1: + self._loop.create_task(self.ConfigManagerConfig_pending_loop()) + # TBD - change to info level + self._log.debug("Started pending_loop!") + except Exception as e: + self._log.error("Failed adding to pending tasks as (%s)", str(e)) + + def del_from_pending_tasks(self, task): + try: + self.pending_tasks.remove(task) + except Exception as e: + self._log.error("Failed removing from pending tasks as (%s)", str(e)) + + @asyncio.coroutine + def ConfigManagerConfig_pending_loop(self): + loop_sleep = 2 + while True: + """ + This pending task queue is ordred by events, + must finish previous task successfully to be able to go on to the next task + """ + if self.pending_tasks: + task = self.pending_tasks.pop() + done = False + if 'nsrid' in task: + nsrid = task['nsrid'] + self._log.debug("Will execute pending task for NSR id(%s)", nsrid) + try: + # Try to configure this NSR + task['retries'] -= 1 + done = yield from self.config_NSR(nsrid) + except Exception as e: + self._log.error("Failed(%s) configuring NSR(%s), retries remained:%d!", + str(e), nsrid, task['retries']) + pass + if done: + self._log.debug("Finished pending task NSR id(%s):", nsrid) + else: + self._log.error("Failed configuring NSR(%s), retries remained:%d!", + nsrid, task['retries']) + + # Failed, re-insert (append at the end) this failed task to be retried later + # If any retries remained. + if task['retries']: + self.pending_tasks.append(task) + else: + self._log.debug("Stopped pending_loop!") + break + yield from asyncio.sleep(loop_sleep, loop=self._loop) + + @asyncio.coroutine + def register(self): + self.register_cm_config() + yield from self.register_cm_state_opdata() + + def register_cm_config(self): + def on_apply(dts, acg, xact, action, scratch): + """Apply the Service Orchestration configuration""" + if xact.id is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + self._log.debug("cm-config (xact: %s) (action: %s)", + xact, action) + + if xact.id in self.terminate_cfg: + msg = self.terminate_cfg.pop(xact.id, None) + if msg is not None: + msg_dict = msg.as_dict() + for delnsr in msg_dict['nsr']: + nsr_id = delnsr.get('id', None) + asyncio.ensure_future(self.terminate_NSR(nsr_id, xact), loop=self._loop) + return + + if xact.id not in self.pending_cfg: + self._log.debug("Could not find transaction data for transaction-id") + return + + # Pop saved config (from on_prepare) + msg = self.pending_cfg.pop(xact.id, None) + self._log.debug("Apply cm-config: %s", msg) + self.cm_state['states'] += ", cm-config" + + # Process entire configuration + ro_cfg = self.ro_config + + msg_dict = msg.as_dict() + self._log.debug("msg_dict is %s: %s", type(msg_dict), msg_dict) + ''' Process Resource Orchestrator endpoint config ''' + if 'ro_endpoint' in msg_dict: + self._log.debug("ro-endpoint = %s", msg_dict['ro_endpoint']) + for key, value in msg_dict['ro_endpoint'].items(): + ro_cfg[key] = value + self._log.debug("ro-config: key=%s, value=%s", key, ro_cfg[key]) + + # If all RO credentials are configured, initiate connection + + ro_complete = True + for key, value in ro_cfg.items(): + if value is None: + ro_complete = False + self._log.warning("key %s is not set", key) + # Get the ncclient handle (OR interface) + orif = self._parent._event._orif + # Get netconf connection + if ro_complete is True and orif._manager is None: + self._log.info("Connecting to RO = %s!", ro_cfg['ro_ip_address']) + asyncio.wait(asyncio.ensure_future(orif.connect(), loop=self._loop)) + #asyncio.ensure_future(orif.connect(), loop=self._loop) + self._log.info("Connected to RO = %s!", ro_cfg['ro_ip_address']) + self.cm_state['states'] += ", RO connected" + else: + self._log.warning("Already connected to RO, ignored!") + + if 'nsr' in msg_dict: + for addnsr in msg_dict['nsr']: + ''' Process Initiate NSR ''' + nsr_id = addnsr.get('id', None) + if nsr_id != None: + #asyncio.ensure_future(self.config_NSR(nsr_id), loop=self._loop) + # Add this to pending task + self.add_to_pending_tasks({'nsrid' : nsr_id, 'retries' : 10}) + + return + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + action = xact_info.handle.get_query_action() + + self._log.debug("Received cm-config: xact.id=%s, msg=%s, action=%s", xact.id, msg, action) + # print("<1<<< self.pending_cfg:", self.pending_cfg) + # fref = ProtobufC.FieldReference.alloc() + # pb_msg = msg.to_pbcm() + # fref.goto_whole_message(pb_msg) + # print(">>>>>> fref.is_field_deleted():", fref.is_field_deleted()) + + msg_dict = msg.as_dict() + pending_q = self.pending_cfg + if action == rwdts.QueryAction.DELETE: + pending_q = self.terminate_cfg + if 'nsr' in msg_dict: + # Do this only if NSR is deleted + # fref = ProtobufC.FieldReference.alloc() + # pb_msg = msg.to_pbcm() + # fref.goto_whole_message(pb_msg) + # print(">>>>>> fref.is_field_deleted():", fref.is_field_deleted()) + # # Got DELETE action in prepare callback + # if fref.is_field_deleted(): + + # NS is(are) terminated + for delnsr in msg_dict['nsr']: + nsr_id = delnsr.get('id', None) + # print('>>>>>>> Will delete pending NSR id={}'.format(nsr_id)) + if nsr_id is not None: + # print(">>>>>>> self.pending_cfg:", self.pending_cfg) + # Find this NSR id if it is scheduled to be added. + for i,pending in self.pending_cfg.items(): + p_dict = pending.as_dict() + if 'nsr' in p_dict: + for p_nsr in p_dict['nsr']: + p_nsr_id = p_nsr.get('id', None) + if p_nsr_id == nsr_id: + # Found it, remove + self.pending_cfg.pop(i, None) + pending_q = None + + # Enqueue the msg in proper queue + if pending_q is not None: + pending_q[xact.id] = msg + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug("Registering for ro-config using xpath: %s", + self._config_xpath) + + acg_handler = rift.tasklets.AppConfGroup.Handler(on_apply = on_apply) + + with self._dts.appconf_group_create(handler=acg_handler) as acg: + try: + self._pool_reg = acg.register(xpath=self._config_xpath, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + self._log.info("Successfully registered (%s)", self._config_xpath) + except Exception as e: + self._log.error("Failed to register as (%s)", e) + + + @asyncio.coroutine + def register_cm_state_opdata(self): + + def state_to_string(state): + state_dict = { + conmanY.RecordState.INIT : "init", + conmanY.RecordState.RECEIVED : "received", + conmanY.RecordState.CFG_PROCESS : "cfg_process", + conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed", + conmanY.RecordState.CFG_SCHED : "cfg_sched", + conmanY.RecordState.CFG_DELAY : "cfg_delay", + conmanY.RecordState.CONNECTING : "connecting", + conmanY.RecordState.FAILED_CONNECTION : "failed_connection", + conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected", + conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected", + conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected", + conmanY.RecordState.CFG_SEND : "cfg_send", + conmanY.RecordState.CFG_FAILED : "cfg_failed", + conmanY.RecordState.READY_NO_CFG : "ready_no_cfg", + conmanY.RecordState.READY : "ready", + } + return state_dict[state] + + def prepare_show_output(): + self.show_output = conmanY.CmOpdata() + self.show_output.states = self.cm_states + nsr_dict = self._nsr_dict + + for nsr_obj in nsr_dict.values(): + cm_nsr = self.show_output.cm_nsr.add() + # Fill in this NSR from nsr object + cm_nsr.id = nsr_obj._nsr_id + cm_nsr.state = state_to_string(nsr_obj.state) + if nsr_obj.state == conmanY.RecordState.CFG_PROCESS_FAILED: + continue + cm_nsr.name = nsr_obj.nsr_name + + # Fill in each VNFR from this nsr object + vnfr_list = nsr_obj._vnfr_list + for vnfr in vnfr_list: + vnf_cfg = vnfr['vnf_cfg'] + + # Create & fill vnfr + cm_vnfr = cm_nsr.cm_vnfr.add() + cm_vnfr.id = vnfr['id'] + cm_vnfr.name = vnfr['name'] + cm_vnfr.state = state_to_string(vnf_cfg['cm_state']) + + # Fill in VNF management interface + cm_vnfr.mgmt_interface.ip_address = vnf_cfg['mgmt_ip_address'] + cm_vnfr.mgmt_interface.cfg_type = vnf_cfg['config_method'] + cm_vnfr.mgmt_interface.port = vnf_cfg['port'] + + # Fill in VNF configuration details + cm_vnfr.cfg_location = vnf_cfg['cfg_file'] + + # Fill in each connection-point for this VNF + if 'connection_point' in vnfr: + cp_list = vnfr['connection_point'] + for cp_item_dict in cp_list: + cm_cp = cm_vnfr.connection_point.add() + cm_cp.name = cp_item_dict['name'] + cm_cp.ip_address = cp_item_dict['ip_address'] + + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + + self._log.debug("Received cm-state: msg=%s, action=%s", msg, action) + + if action == rwdts.QueryAction.READ: + show_output = conmanY.CmOpdata() + show_output.from_dict(self.cm_state) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + xpath=self._opdata_xpath, + msg=show_output) + else: + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.info("Registering for cm-opdata xpath: %s", + self._opdata_xpath) + + try: + handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + yield from self._dts.register(xpath=self._opdata_xpath, + handler=handler, + flags=rwdts.Flag.PUBLISHER) + self._log.info("Successfully registered for opdata(%s)", self._opdata_xpath) + except Exception as e: + self._log.error("Failed to register for opdata as (%s)", e) + + @asyncio.coroutine + def process_nsd_vnf_configuration(self, nsr_obj, vnfr): + + def get_cfg_file_extension(method, configuration_options): + ext_dict = { + "netconf" : "xml", + "script" : { + "bash" : "sh", + "expect" : "exp", + }, + "juju" : "yml" + } + + if method == "netconf": + return ext_dict[method] + elif method == "script": + return ext_dict[method][configuration_options['script_type']] + elif method == "juju": + return ext_dict[method] + else: + return "cfg" + + ## This is how the YAML file should look like, This routine will be called for each VNF, so keep appending the file. + ## priority order is determined by the number, hence no need to generate the file in that order. A dictionary will be + ## used that will take care of the order by number. + ''' + 1 : <== This is priority + name : trafsink_vnfd + member_vnf_index : 2 + configuration_delay : 120 + configuration_type : netconf + configuration_options : + username : admin + password : admin + port : 2022 + target : running + 2 : + name : trafgen_vnfd + member_vnf_index : 1 + configuration_delay : 0 + configuration_type : netconf + configuration_options : + username : admin + password : admin + port : 2022 + target : running + ''' + + # Save some parameters needed as short cuts in flat structure (Also generated) + vnf_cfg = vnfr['vnf_cfg'] + # Prepare unique name for this VNF + vnf_cfg['vnf_unique_name'] = get_vnf_unique_name(vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref']) + + nsr_obj.this_nsr_dir = os.path.join(self._parent.cfg_dir, vnf_cfg['nsr_name'], self._nsr['name_ref']) + if not os.path.exists(nsr_obj.this_nsr_dir): + os.makedirs(nsr_obj.this_nsr_dir) + nsr_obj.cfg_path_prefix = '{}/{}_{}'.format(nsr_obj.this_nsr_dir, vnfr['short_name'], vnfr['member_vnf_index_ref']) + nsr_vnfr = '{}/{}_{}'.format(vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref']) + + # Get vnf_configuration from vnfr + vnf_config = vnfr['vnf_configuration'] + + self._log.debug("vnf_configuration = %s", vnf_config) + #print("### TBR ### vnf_configuration = ", vnf_config) + + # Create priority dictionary + cfg_priority_order = 0 + if ('input_params' in vnf_config and + 'config_priority' in vnf_config['input_params']): + cfg_priority_order = vnf_config['input_params']['config_priority'] + + # All conditions must be met in order to process configuration + if (cfg_priority_order != 0 and + vnf_config['config_type'] is not None and + vnf_config['config_type'] != 'none' and + 'config_template' in vnf_config): + + # Create all sub dictionaries first + config_priority = { + 'name' : vnfr['short_name'], + 'member_vnf_index' : vnfr['member_vnf_index_ref'], + } + + if 'config_delay' in vnf_config['input_params']: + config_priority['configuration_delay'] = vnf_config['input_params']['config_delay'] + vnf_cfg['config_delay'] = config_priority['configuration_delay'] + + configuration_options = {} + method = vnf_config['config_type'] + config_priority['configuration_type'] = method + vnf_cfg['config_method'] = method + + cfg_opt_list = ['port', 'target', 'script_type', 'ip_address', 'user', 'secret'] + for cfg_opt in cfg_opt_list: + if cfg_opt in vnf_config[method]: + configuration_options[cfg_opt] = vnf_config[method][cfg_opt] + vnf_cfg[cfg_opt] = configuration_options[cfg_opt] + + cfg_opt_list = ['mgmt_ip_address', 'username', 'password'] + for cfg_opt in cfg_opt_list: + if cfg_opt in vnf_config['config_access']: + configuration_options[cfg_opt] = vnf_config['config_access'][cfg_opt] + vnf_cfg[cfg_opt] = configuration_options[cfg_opt] + + # TBD - see if we can neatly include the config in "input_params" file, no need though + #config_priority['config_template'] = vnf_config['config_template'] + # Create config file + vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(nsr_obj.cfg_path_prefix, config_priority['configuration_type']) + vnf_cfg['cfg_file'] = '{}.{}'.format(nsr_obj.cfg_path_prefix, get_cfg_file_extension(method, configuration_options)) + vnf_cfg['xlate_script'] = os.path.join(self._parent.cfg_dir, 'xlate_cfg.py') + vnf_cfg['juju_script'] = os.path.join(self._parent.cfg_dir, 'juju_if.py') + + try: + # Now write this template into file + with open(vnf_cfg['cfg_template'], "w") as cf: + cf.write(vnf_config['config_template']) + except Exception as e: + self._log.error("Processing NSD, failed to generate configuration template : %s (Error : %s)", + vnf_config['config_template'], str(e)) + raise + + self._log.debug("VNF endpoint so far: %s", vnf_cfg) + + # Populate filled up dictionary + config_priority['configuration_options'] = configuration_options + nsr_obj.nsr_cfg_input_params_dict[cfg_priority_order] = config_priority + nsr_obj.num_vnfs_to_cfg += 1 + nsr_obj._vnfr_dict[vnf_cfg['vnf_unique_name']] = vnfr + nsr_obj._vnfr_dict[vnfr['id']] = vnfr + + self._log.debug("input_params = %s", nsr_obj.nsr_cfg_input_params_dict) + else: + self._log.info("NS/VNF %s is not to be configured by Configuration Manager!", nsr_vnfr) + yield from nsr_obj.update_vnf_cm_state(vnfr, conmanY.RecordState.READY_NO_CFG) + + # Update the cm-state + nsr_obj.populate_vm_state_from_vnf_cfg() + + @asyncio.coroutine + def config_NSR(self, id): + nsr_dict = self._nsr_dict + self._log.info("Initiate NSR fetch, id = %s", id) + + try: + if id not in nsr_dict: + nsr_obj = ConfigManagerNSR(self._log, self._loop, self, id) + nsr_dict[id] = nsr_obj + else: + self._log.info("NSR(%s) is already initialized!", id) + nsr_obj = nsr_dict[id] + except Exception as e: + self._log.error("Failed creating NSR object for (%s) as (%s)", id, str(e)) + raise + + # Populate this object with netconfd API from RO + + # Get the ncclient handle (OR interface) + orif = self._parent._event._orif + + if orif is None: + self._log.error("OR interface not initialized") + try: + # Fetch NSR + nsr = yield from orif.get_nsr(id) + self._log.debug("nsr = (%s/%s)", type(nsr), nsr) + if ('operational_status' in nsr and nsr['operational_status'] == "running"): + self._nsr = nsr + yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.RECEIVED) + else: + self._log.info("NSR(%s) is not ready yet!", nsr['nsd_name_ref']) + return False + + try: + # Parse NSR + if nsr is not None: + nsr_obj.set_nsr_name(nsr['nsd_name_ref']) + nsr_dir = os.path.join(self._parent.cfg_dir, nsr_obj.nsr_name) + self._log.info("Checking NS config directory: %s", nsr_dir) + if not os.path.isdir(nsr_dir): + os.makedirs(nsr_dir) + # self._log.critical("NS %s is not to be configured by Service Orchestrator!", nsr_obj.nsr_name) + # yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.READY_NO_CFG) + # return + + for vnfr_id in nsr['constituent_vnfr_ref']: + self._log.debug("Fetching VNFR (%s)", vnfr_id) + vnfr = yield from orif.get_vnfr(vnfr_id) + self._log.debug("vnfr = (%s/ %s)", type(vnfr), vnfr) + #print("### TBR ### vnfr = ", vnfr) + nsr_obj.add_vnfr(vnfr) + yield from self.process_nsd_vnf_configuration(nsr_obj, vnfr) + except Exception as e: + self._log.error("Failed processing NSR (%s) as (%s)", nsr_obj.nsr_name, str(e)) + yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED) + raise + + # Generate config_input_params.yaml (For debug reference) + nsr_cfg_input_file = os.path.join(nsr_obj.this_nsr_dir, "configuration_input_params.yml") + with open(nsr_cfg_input_file, "w") as yf: + yf.write(yaml.dump(nsr_obj.nsr_cfg_input_params_dict, default_flow_style=False)) + + self._log.debug("Starting to configure each VNF") + + ## Check if this NS has input parametrs + self._log.info("Checking NS configuration order: %s", nsr_cfg_input_file) + + if os.path.exists(nsr_cfg_input_file): + # Apply configuration is specified order + try: + # Fetch number of VNFs + num_vnfs = nsr_obj.num_vnfs_to_cfg + + # Go in loop to configure by specified order + self._log.info("Using Dynamic configuration input parametrs for NS: %s", nsr_obj.nsr_name) + + # cfg_delay = nsr_obj.nsr_cfg_input_params_dict['configuration_delay'] + # if cfg_delay: + # self._log.info("Applying configuration delay for NS (%s) ; %d seconds", + # nsr_obj.nsr_name, cfg_delay) + # yield from asyncio.sleep(cfg_delay, loop=self._loop) + + for i in range(1,num_vnfs+1): + if i not in nsr_obj.nsr_cfg_input_params_dict: + self._log.warning("NS (%s) - Ordered configuration is missing order-number: %d", nsr_obj.nsr_name, i) + else: + vnf_input_params_dict = nsr_obj.nsr_cfg_input_params_dict[i] + + # Make up vnf_unique_name with vnfd name and member index + #vnfr_name = "{}.{}".format(nsr_obj.nsr_name, vnf_input_params_dict['name']) + vnf_unique_name = get_vnf_unique_name( + nsr_obj.nsr_name, + vnf_input_params_dict['name'], + str(vnf_input_params_dict['member_vnf_index']), + ) + self._log.info("NS (%s) : VNF (%s) - Processing configuration input params", + nsr_obj.nsr_name, vnf_unique_name) + + # Find vnfr for this vnf_unique_name + if vnf_unique_name not in nsr_obj._vnfr_dict: + self._log.error("NS (%s) - Can not find VNF to be configured: %s", nsr_obj.nsr_name, vnf_unique_name) + else: + # Save this unique VNF's config input parameters + nsr_obj.vnf_input_params_dict[vnf_unique_name] = vnf_input_params_dict + nsr_obj.ConfigVNF(nsr_obj._vnfr_dict[vnf_unique_name]) + + # Now add the entire NS to the pending config list. + self._parent.pending_cfg.append(nsr_obj) + + except Exception as e: + self._log.error("Failed processing input parameters for NS (%s) as %s", nsr_obj.nsr_name, str(e)) + raise + else: + self._log.error("No configuration input parameters for NSR (%s)", nsr_obj.nsr_name) + + except Exception as e: + #print("##>> config_NSR Failed as:", str(e)) + self._log.error("Failed to configure NS (%s) as (%s)", nsr_obj.nsr_name, str(e)) + yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED) + raise + + return True + + @asyncio.coroutine + def terminate_NSR(self, id, xact): + nsr_dict = self._nsr_dict + if id not in nsr_dict: + self._log.error("NSR(%s) does not exist!", id) + return + else: + # Remove this object from global list + nsr_obj = nsr_dict.pop(id, None) + + # Remove this NSR if we have it on pending task list + for task in self.pending_tasks: + if task['nsrid'] == id: + self.del_from_pending_tasks(task) + #print("#### NSR({}/{}) is removed from pending task list".format(nsr_obj.nsr_name, id)) + + # Remove this NS cm-state from global status list + self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr) + + # Also remove any scheduled configuration event + for nsr_obj_p in self._parent.pending_cfg: + if nsr_obj_p == nsr_obj: + assert id == nsr_obj_p._nsr_id + #self._parent.pending_cfg.remove(nsr_obj_p) + # Mark this as being deleted so we do not try to configure it if we are in cfg_delay (will wake up and continue to process otherwise) + nsr_obj_p.being_deleted = True + #print("#### Removed scheduled configuration for NSR({})".format(nsr_obj.nsr_name)) + self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name) + + #print("#### NSR({}/{}) is deleted".format(nsr_obj.nsr_name, id)) + self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id) + + +class ConfigManagerNSR(object): + def __init__(self, log, loop, parent, id): + self._log = log + self._loop = loop + self._rwcal = None + self._vnfr_dict = {} + self._cp_dict = {} + self._nsr_id = id + self._parent = parent + self._log.info("Instantiated NSR entry for id=%s", id) + self.nsr_cfg_input_params_dict = {} + self.vnf_input_params_dict = {} + self.num_vnfs_to_cfg = 0 + self._vnfr_list = [] + self.vnf_cfg_list = [] + self.this_nsr_dir = None + self.being_deleted = False + + # Initialize cm-state for this NS + self.cm_nsr = {} + self.cm_nsr['cm_vnfr'] = [] + self.cm_nsr['id'] = id + self.cm_nsr['state'] = self.state_to_string(conmanY.RecordState.INIT) + + self.set_nsr_name('Not Set') + + # Add this NSR cm-state object to global cm-state + parent.cm_state['cm_nsr'].append(self.cm_nsr) + + def set_nsr_name(self, name): + self.nsr_name = name + self.cm_nsr['name'] = name + + def xlate_conf(self, vnfr, vnf_cfg): + + # If configuration type is not already set, try to read from input params + if vnf_cfg['interface_type'] is None: + # Prepare unique name for this VNF + vnf_unique_name = get_vnf_unique_name( + vnf_cfg['nsr_name'], + vnfr['short_name'], + vnfr['member_vnf_index_ref'], + ) + + # Find this particular (unique) VNF's config input params + if (vnf_unique_name in self.vnf_input_params_dict): + vnf_cfg_input_params_dict = self.vnf_input_params_dict[vnf_unique_name] + vnf_cfg['interface_type'] = vnf_cfg_input_params_dict['configuration_type'] + if 'configuration_options' in vnf_cfg_input_params_dict: + cfg_opts = vnf_cfg_input_params_dict['configuration_options'] + for key, value in cfg_opts.items(): + vnf_cfg[key] = value + + cfg_path_prefix = '{}/{}/{}_{}'.format( + self._parent._parent.cfg_dir, + vnf_cfg['nsr_name'], + vnfr['short_name'], + vnfr['member_vnf_index_ref'], + ) + + vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(cfg_path_prefix, vnf_cfg['interface_type']) + vnf_cfg['cfg_file'] = '{}.cfg'.format(cfg_path_prefix) + vnf_cfg['xlate_script'] = self._parent._parent.cfg_dir + '/xlate_cfg.py' + + self._log.debug("VNF endpoint so far: %s", vnf_cfg) + + self._log.info("Checking cfg_template %s", vnf_cfg['cfg_template']) + if os.path.exists(vnf_cfg['cfg_template']): + return True + return False + + def ConfigVNF(self, vnfr): + + vnf_cfg = vnfr['vnf_cfg'] + vnf_cm_state = self.find_or_create_vnfr_cm_state(vnf_cfg) + + if (vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY_NO_CFG) + or + vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY)): + self._log.warning("NS/VNF (%s/%s) is already configured! Skipped.", self.nsr_name, vnfr['name']) + return + + #UPdate VNF state + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS) + + # Now translate the configuration for iP addresses + try: + # Add cp_dict members (TAGS) for this VNF + self._cp_dict['rw_mgmt_ip'] = vnf_cfg['mgmt_ip_address'] + self._cp_dict['rw_username'] = vnf_cfg['username'] + self._cp_dict['rw_password'] = vnf_cfg['password'] + + script_cmd = 'python3 {} -i {} -o {} -x "{}"'.format(vnf_cfg['xlate_script'], vnf_cfg['cfg_template'], vnf_cfg['cfg_file'], repr(self._cp_dict)) + self._log.debug("xlate script command (%s)", script_cmd) + #xlate_msg = subprocess.check_output(script_cmd).decode('utf-8') + xlate_msg = subprocess.check_output(script_cmd, shell=True).decode('utf-8') + self._log.info("xlate script output (%s)", xlate_msg) + except Exception as e: + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS_FAILED) + self._log.error("Failed to execute translation script for VNF: %s with (%s)", log_this_vnf(vnf_cfg), str(e)) + return + + self._log.info("Applying config to VNF: %s = %s!", log_this_vnf(vnf_cfg), vnf_cfg) + try: + self.vnf_cfg_list.append(vnf_cfg) + self._log.debug("Scheduled configuration!") + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_SCHED) + except Exception as e: + self._log.error("Failed apply_vnf_config to VNF: %s as (%s)", log_this_vnf(vnf_cfg), str(e)) + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS_FAILED) + raise + + def add(self, nsr): + self._log.info("Adding NS Record for id=%s", id) + self._nsr = nsr + + def sample_cm_state(self): + return ( + { + 'cm_nsr': [ + { + 'cm_vnfr': [ + { + 'cfg_location': 'location1', + 'cfg_type': 'script', + 'connection_point': [ + {'ip_address': '1.1.1.1', 'name': 'vnf1cp1'}, + {'ip_address': '1.1.1.2', 'name': 'vnf1cp2'} + ], + 'id': 'vnfrid1', + 'mgmt_interface': {'ip_address': '7.1.1.1', + 'port': 1001}, + 'name': 'vnfrname1', + 'state': 'init' + }, + { + 'cfg_location': 'location2', + 'cfg_type': 'netconf', + 'connection_point': [{'ip_address': '2.1.1.1', 'name': 'vnf2cp1'}, + {'ip_address': '2.1.1.2', 'name': 'vnf2cp2'}], + 'id': 'vnfrid2', + 'mgmt_interface': {'ip_address': '7.1.1.2', + 'port': 1001}, + 'name': 'vnfrname2', + 'state': 'init'} + ], + 'id': 'nsrid1', + 'name': 'nsrname1', + 'state': 'init'} + ], + 'states': 'Initialized, ' + }) + + def populate_vm_state_from_vnf_cfg(self): + # Fill in each VNFR from this nsr object + vnfr_list = self._vnfr_list + for vnfr in vnfr_list: + vnf_cfg = vnfr['vnf_cfg'] + vnf_cm_state = self.find_vnfr_cm_state(vnfr['id']) + + if vnf_cm_state: + # Fill in VNF management interface + vnf_cm_state['mgmt_interface']['ip_address'] = vnf_cfg['mgmt_ip_address'] + vnf_cm_state['mgmt_interface']['port'] = vnf_cfg['port'] + + # Fill in VNF configuration details + vnf_cm_state['cfg_type'] = vnf_cfg['config_method'] + vnf_cm_state['cfg_location'] = vnf_cfg['cfg_file'] + + # Fill in each connection-point for this VNF + cp_list = vnfr['connection_point'] + for cp_item_dict in cp_list: + vnf_cm_state['connection_point'].append( + { + 'name' : cp_item_dict['name'], + 'ip_address' : cp_item_dict['ip_address'], + } + ) + + def state_to_string(self, state): + state_dict = { + conmanY.RecordState.INIT : "init", + conmanY.RecordState.RECEIVED : "received", + conmanY.RecordState.CFG_PROCESS : "cfg_process", + conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed", + conmanY.RecordState.CFG_SCHED : "cfg_sched", + conmanY.RecordState.CFG_DELAY : "cfg_delay", + conmanY.RecordState.CONNECTING : "connecting", + conmanY.RecordState.FAILED_CONNECTION : "failed_connection", + conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected", + conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected", + conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected", + conmanY.RecordState.CFG_SEND : "cfg_send", + conmanY.RecordState.CFG_FAILED : "cfg_failed", + conmanY.RecordState.READY_NO_CFG : "ready_no_cfg", + conmanY.RecordState.READY : "ready", + } + return state_dict[state] + + def find_vnfr_cm_state(self, id): + if self.cm_nsr['cm_vnfr']: + for vnf_cm_state in self.cm_nsr['cm_vnfr']: + if vnf_cm_state['id'] == id: + return vnf_cm_state + return None + + def find_or_create_vnfr_cm_state(self, vnf_cfg): + vnfr = vnf_cfg['vnfr'] + vnf_cm_state = self.find_vnfr_cm_state(vnfr['id']) + + if vnf_cm_state is None: + # Not found, Create and Initialize this VNF cm-state + vnf_cm_state = { + 'id' : vnfr['id'], + 'name' : vnfr['short_name'], + 'state' : self.state_to_string(conmanY.RecordState.RECEIVED), + 'mgmt_interface' : + { + 'ip_address' : vnf_cfg['mgmt_ip_address'], + 'port' : vnf_cfg['port'], + }, + 'cfg_type' : vnf_cfg['config_method'], + 'cfg_location' : vnf_cfg['cfg_file'], + 'connection_point' : [], + } + self.cm_nsr['cm_vnfr'].append(vnf_cm_state) + + return vnf_cm_state + + @asyncio.coroutine + def update_vnf_cm_state(self, vnfr, state): + if vnfr: + vnf_cm_state = self.find_vnfr_cm_state(vnfr['id']) + if vnf_cm_state: + vnf_cm_state['state'] = self.state_to_string(state) + else: + self._log.error("No opdata found for NS/VNF:%s/%s!", self.nsr_name, vnfr['name']) + else: + self._log.error("No VNFR supplied for state update (NS=%s)!", self.nsr_name) + + @asyncio.coroutine + def update_ns_cm_state(self, state): + self.cm_nsr['state'] = self.state_to_string(state) + + def add_vnfr(self, vnfr): + + if vnfr['id'] not in self._vnfr_dict: + self._log.info("NSR(%s) : Adding VNF Record for name=%s, id=%s", self._nsr_id, vnfr['name'], vnfr['id']) + # Add this vnfr to the list for show, or single traversal + self._vnfr_list.append(vnfr) + else: + self._log.warning("NSR(%s) : VNF Record for name=%s, id=%s already exists, overwriting", self._nsr_id, vnfr['name'], vnfr['id']) + + # Make vnfr available by id as well as by name + unique_name = get_vnf_unique_name(self.nsr_name, vnfr['short_name'], vnfr['member_vnf_index_ref']) + self._vnfr_dict[unique_name] = vnfr + self._vnfr_dict[vnfr['id']] = vnfr + + # Create vnf_cfg dictionary with default values + vnf_cfg = { + 'nsr_obj' : self, + 'vnfr' : vnfr, + 'nsr_name' : self.nsr_name, + 'nsr_id' : self._nsr_id, + 'vnfr_name' : vnfr['short_name'], + 'member_vnf_index' : vnfr['member_vnf_index_ref'], + 'port' : 0, + 'username' : 'admin', + 'password' : 'admin', + 'config_method' : 'None', + 'protocol' : 'None', + 'mgmt_ip_address' : '0.0.0.0', + 'cfg_file' : 'None', + 'script_type' : 'bash', + } + + vnfr['vnf_cfg'] = vnf_cfg + self.find_or_create_vnfr_cm_state(vnf_cfg) + + + ''' + Build the connection-points list for this VNF (self._cp_dict) + ''' + # Populate global CP list self._cp_dict from VNFR + if 'connection_point' not in vnfr: + return + + cp_list = vnfr['connection_point'] + + self._cp_dict[vnfr['member_vnf_index_ref']] = {} + for cp_item_dict in cp_list: + # Populate global dictionary + self._cp_dict[cp_item_dict['name']] = cp_item_dict['ip_address'] + + # Populate unique member specific dictionary + self._cp_dict[vnfr['member_vnf_index_ref']][cp_item_dict['name']] = cp_item_dict['ip_address'] + + return \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py new file mode 100644 index 0000000..11f67bc --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py @@ -0,0 +1,481 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import ncclient +import ncclient.asyncio_manager +import tornado.httpclient as tornadoh +import asyncio.subprocess +import asyncio +import time +import sys +import os, stat + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') + +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwConmanYang as conmanY, + RwNsrYang as nsrY, + RwVnfrYang as vnfrY, +) + +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + +def log_this_vnf(vnf_cfg): + log_vnf = "" + used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address'] + for item in used_item_list: + if item in vnf_cfg: + if item == 'mgmt_ip_address': + log_vnf += "({})".format(vnf_cfg[item]) + else: + log_vnf += "{}/".format(vnf_cfg[item]) + return log_vnf + +class ConfigManagerROifConnectionError(Exception): + pass +class ScriptError(Exception): + pass + +class ConfigManagerROif(object): + + def __init__(self, log, loop, parent): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + + try: + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(nsrY.get_schema()) + self._model.load_schema_ypbc(vnfrY.get_schema()) + except Exception as e: + self._log.error("Error generating models %s", str(e)) + + self.ro_config = self._parent._config.ro_config + + @property + def manager(self): + if self._manager is None: + raise + + return self._manager + + @asyncio.coroutine + def connect(self, timeout_secs=60): + ro_cfg = self.ro_config + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + + try: + self._log.info("Attemping Resource Orchestrator netconf connection.") + + self._manager = yield from ncclient.asyncio_manager.asyncio_connect( + loop=self._loop, + host=ro_cfg['ro_ip_address'], + port=ro_cfg['ro_port'], + username=ro_cfg['ro_username'], + password=ro_cfg['ro_password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + self._log.info("Connected to Resource Orchestrator netconf") + return + + except ncclient.transport.errors.SSHError as e: + self._log.error("Netconf connection to Resource Orchestrator ip %s failed: %s", + ro_cfg['ro_ip_address'], str(e)) + + yield from asyncio.sleep(2, loop=self._loop) + + self._manager = None + raise ConfigManagerROifConnectionError( + "Failed to connect to Resource Orchestrator within %s seconds" % timeout_secs + ) + + @asyncio.coroutine + def get_nsr(self, id): + self._log.debug("get_nsr() locals: %s", locals()) + xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']".format(id) + #xpath = "/ns-instance-opdata/nsr" + self._log.debug("Attempting to get NSR using xpath: %s", xpath) + response = yield from self._manager.get( + filter=('xpath', xpath), + ) + response_xml = response.data_xml.decode() + + self._log.debug("Received NSR(%s) response: %s", id, str(response_xml)) + + try: + nsr = nsrY.YangData_Nsr_NsInstanceOpdata_Nsr() + nsr.from_xml_v2(self._model, response_xml) + except Exception as e: + self._log.error("Failed to load nsr from xml e=%s", str(e)) + return + + self._log.debug("Deserialized NSR response: %s", nsr) + + return nsr.as_dict() + + @asyncio.coroutine + def get_vnfr(self, id): + xpath = "/vnfr-catalog/vnfr[id='{}']".format(id) + self._log.info("Attempting to get VNFR using xpath: %s", xpath) + response = yield from self._manager.get( + filter=('xpath', xpath), + ) + response_xml = response.data_xml.decode() + + self._log.debug("Received VNFR(%s) response: %s", id, str(response_xml)) + + vnfr = vnfrY.YangData_Vnfr_VnfrCatalog_Vnfr() + vnfr.from_xml_v2(self._model, response_xml) + + self._log.debug("Deserialized VNFR response: %s", vnfr) + + return vnfr.as_dict() + +class ConfigManagerEvents(object): + def __init__(self, dts, log, loop, parent): + self._dts = dts + self._log = log + self._loop = loop + self._parent = parent + self._nsr_xpath = "/cm-state/cm-nsr" + + def register(self): + try: + self._orif = ConfigManagerROif(self._log, self._loop, self._parent) + self.register_cm_rpc() + except Exception as e: + self._log.debug("Failed to register (%s)", e) + + + def register_cm_rpc(self): + + try: + self._rpc_hdl = self._dts.register( + xpath=self._nsr_xpath, + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=self.prepare_update_nsr), + flags=rwdts.Flag.PUBLISHER) + except Exception as e: + self._log.debug("Failed to register xpath(%s) as (%s)", self._nsr_xpath, e) + + @asyncio.coroutine + def prepare_update_nsr(self, xact_info, action, ks_path, msg): + """ Prepare callback for the RPC """ + self._log("Received prepare_update_nsr with action=%s, msg=%s", action, msg) + + # Fetch VNFR for each VNFR id in NSR + + @asyncio.coroutine + def update_vnf_state(self, vnf_cfg, state): + nsr_obj = vnf_cfg['nsr_obj'] + yield from nsr_obj.update_vnf_cm_state(vnf_cfg['vnfr'], state) + + @asyncio.coroutine + def apply_vnf_config(self, vnf_cfg): + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_DELAY) + yield from asyncio.sleep(vnf_cfg['config_delay'], loop=self._loop) + # See if we are still alive! + if vnf_cfg['nsr_obj'].being_deleted: + # Don't do anything, just return + return True + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_SEND) + try: + if vnf_cfg['config_method'] == 'netconf': + self._log.info("Creating ncc handle for VNF cfg = %s!", vnf_cfg) + self.ncc = ConfigManagerVNFnetconf(self._log, self._loop, self, vnf_cfg) + if vnf_cfg['protocol'] == 'ssh': + yield from self.ncc.connect_ssh() + else: + yield from self.ncc.connect() + yield from self.ncc.apply_edit_cfg() + elif vnf_cfg['config_method'] == 'rest': + if self.rcc is None: + self._log.info("Creating rcc handle for VNF cfg = %s!", vnf_cfg) + self.rcc = ConfigManagerVNFrestconf(self._log, self._loop, self, vnf_cfg) + self.ncc.apply_edit_cfg() + elif vnf_cfg['config_method'] == 'script': + self._log.info("Executing script for VNF cfg = %s!", vnf_cfg) + scriptc = ConfigManagerVNFscriptconf(self._log, self._loop, self, vnf_cfg) + yield from scriptc.apply_edit_cfg() + elif vnf_cfg['config_method'] == 'juju': + self._log.info("Executing juju config for VNF cfg = %s!", vnf_cfg) + jujuc = ConfigManagerVNFjujuconf(self._log, self._loop, self._parent, vnf_cfg) + yield from jujuc.apply_edit_cfg() + else: + self._log.error("Unknown configuration method(%s) received for %s", + vnf_cfg['config_method'], vnf_cfg['vnf_unique_name']) + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_FAILED) + return True + + #Update VNF state + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.READY) + self._log.info("Successfully applied configuration to VNF: %s", + log_this_vnf(vnf_cfg)) + except Exception as e: + self._log.error("Applying configuration(%s) file(%s) to VNF: %s failed as: %s", + vnf_cfg['config_method'], + vnf_cfg['cfg_file'], + log_this_vnf(vnf_cfg), + str(e)) + #raise + return False + + return True + +class ConfigManagerVNFscriptconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + #@asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply scriptconf to VNF: %s", log_this_vnf(vnf_cfg)) + try: + st = os.stat(vnf_cfg['cfg_file']) + os.chmod(vnf_cfg['cfg_file'], st.st_mode | stat.S_IEXEC) + #script_msg = subprocess.check_output(vnf_cfg['cfg_file'], shell=True).decode('utf-8') + + proc = yield from asyncio.create_subprocess_exec( + vnf_cfg['script_type'], vnf_cfg['cfg_file'], + stdout=asyncio.subprocess.PIPE) + script_msg = yield from proc.stdout.read() + rc = yield from proc.wait() + + self._log.debug("Debug config script output (%s)", script_msg) + if rc != 0: + raise ScriptError( + "script config returned error code : %s" % rc + ) + + except Exception as e: + self._log.error("Error (%s) while executing script config for VNF: %s", + str(e), log_this_vnf(vnf_cfg)) + raise + +class ConfigManagerVNFrestconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + def fetch_handle(self, response): + if response.error: + self._log.error("Failed to send HTTP config request - %s", response.error) + else: + self._log.debug("Sent HTTP config request - %s", response.body) + + @asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply restconf to VNF: %s", log_this_vnf(vnf_cfg)) + try: + http_c = tornadoh.AsyncHTTPClient() + # TBD + # Read the config entity from file? + # Convert connectoin-point? + http_c.fetch("http://", self.fetch_handle) + except Exception as e: + self._log.error("Error (%s) while applying HTTP config", str(e)) + +class ConfigManagerVNFnetconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(conmanY.get_schema()) + + @asyncio.coroutine + def connect(self, timeout_secs=120): + vnf_cfg = self._vnf_cfg + start_time = time.time() + self._log.debug("connecting netconf .... %s", vnf_cfg) + while (time.time() - start_time) < timeout_secs: + + try: + self._log.info("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg)) + + self._manager = yield from ncclient.asyncio_manager.asyncio_connect( + loop=self._loop, + host=vnf_cfg['mgmt_ip_address'], + port=vnf_cfg['port'], + username=vnf_cfg['username'], + password=vnf_cfg['password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + + self._log.info("Netconf connected to VNF: %s", log_this_vnf(vnf_cfg)) + return + + except ncclient.transport.errors.SSHError as e: + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION) + self._log.error("Netconf connection to VNF: %s, failed: %s", + log_this_vnf(vnf_cfg), str(e)) + + yield from asyncio.sleep(2, loop=self._loop) + + raise ConfigManagerROifConnectionError( + "Failed to connect to VNF: %s within %s seconds" % + (log_this_vnf(vnf_cfg), timeout_secs) + ) + + @asyncio.coroutine + def connect_ssh(self, timeout_secs=120): + vnf_cfg = self._vnf_cfg + start_time = time.time() + + if (self._manager != None and self._manager.connected == True): + self._log.debug("Disconnecting previous session") + self._manager.close_session + + self._log.debug("connecting netconf via SSH .... %s", vnf_cfg) + while (time.time() - start_time) < timeout_secs: + + try: + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.CONNECTING) + self._log.debug("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg)) + + self._manager = ncclient.asyncio_manager.manager.connect_ssh( + host=vnf_cfg['mgmt_ip_address'], + port=vnf_cfg['port'], + username=vnf_cfg['username'], + password=vnf_cfg['password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.NETCONF_SSH_CONNECTED) + self._log.debug("netconf over SSH connected to VNF: %s", log_this_vnf(vnf_cfg)) + return + + except ncclient.transport.errors.SSHError as e: + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION) + self._log.error("Netconf connection to VNF: %s, failed: %s", + log_this_vnf(vnf_cfg), str(e)) + + yield from asyncio.sleep(2, loop=self._loop) + + raise ConfigManagerROifConnectionError( + "Failed to connect to VNF: %s within %s seconds" % + (log_this_vnf(vnf_cfg), timeout_secs) + ) + + @asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply netconf to VNF: %s", log_this_vnf(vnf_cfg)) + + if self._manager is None: + self._log.error("Netconf is not connected to VNF: %s, aborting!", log_this_vnf(vnf_cfg)) + return + + # Get config file contents + try: + with open(vnf_cfg['cfg_file']) as f: + configuration = f.read() + except Exception as e: + self._log.error("Reading contents of the configuration file(%s) failed: %s", vnf_cfg['cfg_file'], str(e)) + return + + try: + self._log.debug("apply_edit_cfg to VNF: %s", log_this_vnf(vnf_cfg)) + xml = '{}'.format(configuration) + response = yield from self._manager.edit_config(xml, target='running') + if hasattr(response, 'xml'): + response_xml = response.xml + else: + response_xml = response.data_xml.decode() + + self._log.debug("apply_edit_cfg response: %s", response_xml) + if '' in response_xml: + raise ConfigManagerROifConnectionError("apply_edit_cfg response has rpc-error : %s", + response_xml) + + self._log.debug("apply_edit_cfg Successfully applied configuration {%s}", xml) + except: + raise + +class ConfigManagerVNFjujuconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + #@asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply juju conf to VNF: %s", log_this_vnf(vnf_cfg)) + try: + args = ['python3', + vnf_cfg['juju_script'], + '--server', vnf_cfg['mgmt_ip_address'], + '--user', vnf_cfg['user'], + '--password', vnf_cfg['secret'], + '--port', str(vnf_cfg['port']), + vnf_cfg['cfg_file']] + self._log.error("juju script command (%s)", args) + + proc = yield from asyncio.create_subprocess_exec( + *args, + stdout=asyncio.subprocess.PIPE) + juju_msg = yield from proc.stdout.read() + rc = yield from proc.wait() + + if rc != 0: + raise ScriptError( + "Juju config returned error code : %s" % rc + ) + + self._log.debug("Juju config output (%s)", juju_msg) + except Exception as e: + self._log.error("Error (%s) while executing juju config", str(e)) + raise \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg new file mode 100644 index 0000000..4510ffa --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg @@ -0,0 +1,27 @@ +# This template has all supported TAGs. +# This template can be used as input to the xlate_cfg.py script as follows: + +# python3 ./xlate_cfg.py -i ./rwconman_test_config_template.cfg -o ./rwconman_test_config.cfg -x "{1: {'test/cp1': '11.0.0.1'}, 2: {'test/cp1': '11.0.0.2'}, 'rw_mgmt_ip': '1.1.1.1', 'rw_username': 'admin', 'test/cp1': '11.0.0.3', 'rw_password': 'admin'}" + + +# This is error +#0. + +# Following are simple TAGs +1. This is Management IP: +2. This is Username: +3. This is Password: +4. This is globally unique connection point: + +# Following are colon separated complex TAGs +5. This is connection point for a given VNF with unique member index: +6. This is converting connection point IP address into network address: +7. This is converting connection point IP address into boadcast address: + +# Following generated tuple with original connectino point name (Global only) +8. This is not used anywhere: + +# Following test all of the above in single line +9. All at once: START| | | | | | | | |END + + diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py new file mode 100755 index 0000000..14bbf71 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py @@ -0,0 +1,180 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +''' +This file - ConfigManagerTasklet() +| ++--|--> ConfigurationManager() + | + +--> rwconman_config.py - ConfigManagerConfig() + | | + | +--> ConfigManagerNSR() + | + +--> rwconman_events.py - ConfigManagerEvents() + | + +--> ConfigManagerROif() + +''' + +import asyncio +import logging +import os + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwConmanYang', '1.0') + + +from gi.repository import ( + RwDts as rwdts, + RwConmanYang as conmanY, +) + +import rift.tasklets + +from . import rwconman_config as Config +from . import rwconman_events as Event + +class ConfigurationManager(object): + def __init__(self, log, loop, dts): + self._log = log + self._loop = loop + self._dts = dts + self.cfg_sleep = True + self.cfg_dir = os.path.join(os.environ["RIFT_INSTALL"], "etc/conman") + self._config = Config.ConfigManagerConfig(self._dts, self._log, self._loop, self) + self._event = Event.ConfigManagerEvents(self._dts, self._log, self._loop, self) + self.pending_cfg = [] + + @asyncio.coroutine + def update_vnf_state(self, vnf_cfg, state): + nsr_obj = vnf_cfg['nsr_obj'] + yield from nsr_obj.update_vnf_cm_state(vnf_cfg['vnfr'], state) + + @asyncio.coroutine + def update_ns_state(self, nsr_obj, state): + yield from nsr_obj.update_ns_cm_state(state) + + @asyncio.coroutine + def register(self): + yield from self._config.register() + self._event.register() + + @asyncio.coroutine + def configuration_handler(): + while True: + #self._log.debug("Pending Configuration = %s", self.pending_cfg) + if self.pending_cfg: + # pending_cfg is nsr_obj list + nsr_obj = self.pending_cfg[0] + if nsr_obj.being_deleted is False: + vnf_cfg_list = nsr_obj.vnf_cfg_list + while True: + if vnf_cfg_list: + vnf_cfg = vnf_cfg_list[0] + self._log.info("Applying Pending Configuration for NS/VNF = %s/%s", nsr_obj.nsr_name, vnf_cfg) + try: + done = yield from self._event.apply_vnf_config(vnf_cfg) + if done: + vnf_cfg_list.remove(vnf_cfg) + else: + # Do not update nsr state, since config failed for at least one VNF + nsr_obj = None + break + except Exception as e: + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_FAILED) + self._log.info("Failed(%s) to Apply Pending Configuration for VNF = %s, will retry", e, vnf_cfg) + # Do not update nsr state, since config failed for at least one VNF + nsr_obj = None + # Do not attempt the next VNF config, there might be dependancies (hence config order) + break + else: + # Done iterating thru each VNF in this NS + break + + if nsr_obj is not None: + yield from self.update_ns_state(nsr_obj, conmanY.RecordState.READY) + # Now delete this NS from pending + self.pending_cfg.pop(0) + + yield from asyncio.sleep(1, loop=self._loop) + asyncio.ensure_future(configuration_handler(), loop=self._loop) + +class ConfigManagerTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(ConfigManagerTasklet, self).__init__(*args, **kwargs) + self._dts = None + self._con_man = None + + def start(self): + super(ConfigManagerTasklet, self).start() + self.log.setLevel(logging.DEBUG) + self.log.info("Starting ConfigManagerTasklet") + + self.log.debug("Registering with dts") + + self._dts = rift.tasklets.DTS(self.tasklet_info, + conmanY.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def init(self): + self._log.info("Initializing the Service Orchestrator tasklet") + self._con_man = ConfigurationManager(self.log, + self.loop, + self._dts) + yield from self._con_man.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py new file mode 100644 index 0000000..f83a3f8 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +''' +This script will go through the input conffiguration template and convert all the matching "regular expression" and "strings" +specified in xlate_cp_list & xlate_str_list with matching IP addresses passed in as dictionary to this script. + +-i Configuration template +-o Output final configuration complete with IP addresses +-x Xlate(Translate dictionary in string format +-t TAGS to be translated + +''' + +import sys +import getopt +import ast +import re +import yaml +import netaddr + +from inspect import getsourcefile +import os.path + +xlate_dict = None + +def xlate_cp_list(line, cp_list): + for cp_string in cp_list: + match = re.search(cp_string, line) + if match is not None: + # resolve IP address using Connection Point dictionary + resolved_ip = xlate_dict[match.group(1)] + if resolved_ip is None: + print("No matching CP found: ", match.group(1)) + exit(2) + else: + line = line[:match.start()] + resolved_ip + line[match.end():] + return line + +def xlate_colon_list(line, colon_list): + for ucp_string in colon_list: + #print("Searching :", ucp_string) + match = re.search(ucp_string, line) + if match is not None: + #print("match :", match.group()) + # resolve IP address using Connection Point dictionary for specified member (unique) index + ucp_str_list = match.group(1).split(':') + #print("matched = {}, split list = {}".format(match.group(1), ucp_str_list)) + if len(ucp_str_list) != 2: + print("Invalid TAG in the configuration: ", match.group(1)) + exit(2) + + # Unique Connection Point translation to IP + if ucp_string.startswith(' + +# Literal string translations +xlate_str_list : + - + - + - + +# This list contains 2 tags separated by colon (:) +xlate_colon_list : + # Fetch CP from the member_index dictionary (I.e. CP of a particular VNF) + - + # Generate network address from CP address and mask (mask is expected to be a hard coded number in config) + - + # Generate broadcast address from CP address and mask (mask is expected to be a hard coded number in config) + - + +# This translates connection point name and generates tuple with name:resolved IP +xlate_cp_to_tuple_list : + - + diff --git a/modules/core/mano/rwcm/plugins/rwconman/rwconmantasklet.py b/modules/core/mano/rwcm/plugins/rwconman/rwconmantasklet.py new file mode 100755 index 0000000..99f19e0 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rwconmantasklet.py @@ -0,0 +1,29 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwconmantasklet +class Tasklet(rift.tasklets.rwconmantasklet.ConfigManagerTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/yang/CMakeLists.txt b/modules/core/mano/rwcm/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..9e814b7 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/yang/CMakeLists.txt @@ -0,0 +1,30 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +## +# Yang targets +## + +rift_add_yang_target( + TARGET rw_conman_yang + YANG_FILES rw-conman.yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + mano_yang_gen + DEPENDS + mano_yang +) + +## +# Install the XML file +## +install( + FILES ../cli/cli_rwcm.xml + DESTINATION usr/data/manifest + COMPONENT ${PKG_LONG_NAME} +) + diff --git a/modules/core/mano/rwcm/plugins/yang/rw-conman.tailf.yang b/modules/core/mano/rwcm/plugins/yang/rw-conman.tailf.yang new file mode 100644 index 0000000..ab34dbd --- /dev/null +++ b/modules/core/mano/rwcm/plugins/yang/rw-conman.tailf.yang @@ -0,0 +1,22 @@ +module rw-conman-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-conman-annotation"; + prefix "rw-conman-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-conman { + prefix conman; + } + + tailf:annotate "/conman:cm-state" { + tailf:callpoint base_show; + } + +} \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/yang/rw-conman.yang b/modules/core/mano/rwcm/plugins/yang/rw-conman.yang new file mode 100755 index 0000000..a7a6fc2 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/yang/rw-conman.yang @@ -0,0 +1,236 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-conman.yang + * @author Manish Patel + * @date 2015/10/27 + * @brief Service Orchestrator configuration yang + */ + +module rw-conman +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-conman"; + prefix "rw-conman"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import nsr { + prefix "nsr"; + } + + import vnfr { + prefix "vnfr"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-base { + prefix "manobase"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-10-27 { + description + "Initial revision."; + } + + // typedef ro-endpoint-method { + // type enumeration { + // enum netconf; + // enum restconf; + // } + // } + + grouping ro-endpoint { + // leaf ro-endpoint-method { + // description "interface between CM & RO, defaults to netconf"; + // type ro-endpoint-method; + // default netconf; + // } + leaf ro-ip-address { + type inet:ip-address; + description "IP Address"; + default "127.0.0.1"; + } + leaf ro-port { + type inet:port-number; + description "Port Number"; + default 2022; + } + leaf ro-username { + description "RO endpoint username"; + type string; + default "admin"; + } + leaf ro-password { + description "RO endpoint password"; + type string; + default "admin"; + } + } + + grouping vnf-cfg-items { + leaf configuration-file { + description "Location of the confguration file on CM system"; + type string; + } + leaf translator-script { + description "Script that translates the templates in the configuration-file using VNFR information + Currently, we only use IP address translations. + configuration will use connection point name instead of IP addresses."; + type string; + } + } + + container cm-config { + description "Service Orchestrator specific configuration"; + rwpb:msg-new "SoConfig"; + rwcli:new-mode "cm-config"; + + container ro-endpoint { + description "Resource Orchestrator endpoint ip address"; + rwpb:msg-new "RoEndpoint"; + uses ro-endpoint; + } + + //uses vnf-cfg-items; + + list nsr { + key "id"; + leaf id { + description "Indicates NSR bringup complete, now initiate configuration of the NSR"; + type yang:uuid; + } + } + }// cm-config + + // =================== SHOW ================== + typedef record-state { + type enumeration { + enum init; + enum received; + enum cfg-delay; + enum cfg-process; + enum cfg-process-failed; + enum cfg-sched; + enum connecting; + enum failed-connection; + enum netconf-connected; + enum netconf-ssh-connected; + enum restconf-connected; + enum cfg-send; + enum cfg-failed; + enum ready-no-cfg; + enum ready; + } + } + + // TBD: Do we need this typedef, currently not used anywhere + typedef cfg-type { + type enumeration { + enum none; + enum scriptconf; + enum netconf; + enum restconf; + enum jujuconf; + } + } + + + // This is also used by RO (Resource Orchestrator) to indicate NSR is ready + // It will only fill in IDs + container cm-state { + rwpb:msg-new "CmOpdata"; + config false; + description "CM NS & VNF states"; + + leaf states { + description "CM various states"; + type string; + } + + list cm-nsr { + description "List of NS Records"; + key "id"; + leaf id { + type yang:uuid; + } + leaf name { + description "NSR name."; + type string; + } + leaf state { + description "State of NSR"; + type record-state; + } + + list cm-vnfr { + description "List of VNF Records within NS Record"; + key "id"; + leaf id { + type yang:uuid; + } + leaf name { + description "VNFR name."; + type string; + } + leaf state { + description "Last known state of this VNFR"; + type record-state; + } + container mgmt-interface { + leaf ip-address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + leaf cfg-type { + type string; + } + leaf cfg-location { + type inet:uri; + } + list connection-point { + key "name"; + leaf name { + description "Connection Point name"; + type string; + } + leaf ip-address { + description "IP address assigned to this connection point"; + type inet:ip-address; + } + } + } // list VNFR + } // list NSR + } // cm-state + +} // rw-conman diff --git a/modules/core/mano/rwcm/test/CMakeLists.txt b/modules/core/mano/rwcm/test/CMakeLists.txt new file mode 100644 index 0000000..baebe67 --- /dev/null +++ b/modules/core/mano/rwcm/test/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(CONMAN_INSTALL "demos/conman") + +install( + FILES + start_cm_system.py + README.start_cm + DESTINATION ${CONMAN_INSTALL} + COMPONENT ${PKG_LONG_NAME}) + +# set(NS_NAME ping_pong_nsd) +# install( +# FILES +# ${NS_NAME}/configuration_input_params.yml +# ${NS_NAME}/ping_vnfd_1_scriptconf_template.cfg +# ${NS_NAME}/pong_vnfd_11_scriptconf_template.cfg +# DESTINATION ${CONMAN_INSTALL}/${NS_NAME} +# COMPONENT ${PKG_LONG_NAME}) + diff --git a/modules/core/mano/rwcm/test/README.start_cm b/modules/core/mano/rwcm/test/README.start_cm new file mode 100644 index 0000000..7a8098b --- /dev/null +++ b/modules/core/mano/rwcm/test/README.start_cm @@ -0,0 +1,4 @@ +# Following example command line to launch the system in collapse mode. +# Please tailor for expanded mode or any other requirements + +./start_cm_system.py -m ethsim -c --skip-prepare-vm diff --git a/modules/core/mano/rwcm/test/cwims_juju_nsd/configuration_input_params.yml b/modules/core/mano/rwcm/test/cwims_juju_nsd/configuration_input_params.yml new file mode 100644 index 0000000..a211660 --- /dev/null +++ b/modules/core/mano/rwcm/test/cwims_juju_nsd/configuration_input_params.yml @@ -0,0 +1,35 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is input parameters file for Network Service configuration. +# This file is formatted as below: + +# configuration_delay : 120 # Number of seconds to wait before applying configuration after NS is up +# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator +# 1 : # Configuration Priority, order in which each VNF will be configured +# name : vnfd_name # Name of the VNF +# member_vnf_index : 11 # member index of the VNF that makes it unique (in case of multiple instances of same VNF) +# configuration_type : scriptconf # Type of configuration (Currently supported values : scriptconf, netconf) +# +# Repeat VNF block for as many VNFs + +configuration_delay : 30 +number_of_vnfs_to_be_configured : 1 +1 : + name : cwims_vnfd + member_vnf_index : 1 + configuration_type : jujuconf + diff --git a/modules/core/mano/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg b/modules/core/mano/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg new file mode 100644 index 0000000..d32efe3 --- /dev/null +++ b/modules/core/mano/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg @@ -0,0 +1,23 @@ +ims-a: + deploy: + store: local + directory: /usr/rift/charms/cw-aio-proxy/trusty/ + series: trusty + to: "lxc:0" + + # Data under config passed as such during deployment + config: + proxied_ip: + home_domain: "ims.riftio.local" + base_number: "1234567000" + number_count: 1000 + + units: + - unit: + # Wait for each command to complete + wait: true + # Bail on failure + bail: true + actions: + - create-user: { number: "1234567001", password: "secret"} + - create-user: { number: "1234567002", password: "secret"} diff --git a/modules/core/mano/rwcm/test/ping_pong_nsd/configuration_input_params.yml b/modules/core/mano/rwcm/test/ping_pong_nsd/configuration_input_params.yml new file mode 100644 index 0000000..d5e48f2 --- /dev/null +++ b/modules/core/mano/rwcm/test/ping_pong_nsd/configuration_input_params.yml @@ -0,0 +1,38 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is input parameters file for Network Service configuration. +# This file is formatted as below: + +# configuration_delay : 120 # Number of seconds to wait before applying configuration after NS is up +# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator +# 1 : # Configuration Priority, order in which each VNF will be configured +# name : vnfd_name # Name of the VNF +# member_vnf_index : 11 # member index of the VNF that makes it unique (in case of multiple instances of same VNF) +# configuration_type : scriptconf # Type of configuration (Currently supported values : scriptconf, netconf) +# +# Repeat VNF block for as many VNFs + +configuration_delay : 30 +number_of_vnfs_to_be_configured : 2 +1 : + name : pong_vnfd + member_vnf_index : 2 + configuration_type : scriptconf +2 : + name : ping_vnfd + member_vnf_index : 1 + configuration_type : scriptconf diff --git a/modules/core/mano/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg b/modules/core/mano/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg new file mode 100755 index 0000000..ffa2518 --- /dev/null +++ b/modules/core/mano/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg @@ -0,0 +1,54 @@ +#!/usr/bin/bash + +# Rest API config +ping_mgmt_ip='' +ping_mgmt_port=18888 + +# VNF specific configuration +pong_server_ip='' +ping_rate=5 +server_port=5555 + +# Make rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server info for ping!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"rate\":$ping_rate}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set ping rate!" + exit $rc +fi + +output=$(curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state) +if [[ $output == *"Internal Server Error"* ]] +then + echo $output + exit 3 +else + echo $output +fi + + +exit 0 diff --git a/modules/core/mano/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg b/modules/core/mano/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg new file mode 100755 index 0000000..4f67c9d --- /dev/null +++ b/modules/core/mano/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg @@ -0,0 +1,42 @@ +#!/usr/bin/bash + +# Rest API configuration +pong_mgmt_ip='' +pong_mgmt_port=18889 + +# Test +# username= +# password= + +# VNF specific configuration +pong_server_ip='' +server_port=5555 + +# Make Rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server(own) info for pong!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/adminstatus/state +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to enable pong service!" + exit $rc +fi + +exit 0 diff --git a/modules/core/mano/rwcm/test/rwso_test.py b/modules/core/mano/rwcm/test/rwso_test.py new file mode 100755 index 0000000..7c1af7b --- /dev/null +++ b/modules/core/mano/rwcm/test/rwso_test.py @@ -0,0 +1,353 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import logging +import os +import sys +import types +import unittest +import uuid + +import xmlrunner + +import gi.repository.CF as cf +import gi.repository.RwDts as rwdts +import gi.repository.RwMain as rwmain +import gi.repository.RwManifestYang as rwmanifest +import gi.repository.RwConmanYang as conmanY +import gi.repository.RwLaunchpadYang as launchpadyang + +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class RWSOTestCase(unittest.TestCase): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + rwmain = None + tinfo = None + schema = None + id_cnt = 0 + + @classmethod + def setUpClass(cls): + msgbroker_dir = os.environ.get('MESSAGE_BROKER_DIR') + router_dir = os.environ.get('ROUTER_DIR') + cm_dir = os.environ.get('SO_DIR') + + manifest = rwmanifest.Manifest() + manifest.init_phase.settings.rwdtsrouter.single_dtsrouter.enable = True + + cls.rwmain = rwmain.Gi.new(manifest) + cls.tinfo = cls.rwmain.get_tasklet_info() + + # Run router in mainq. Eliminates some ill-diagnosed bootstrap races. + os.environ['RWDTS_ROUTER_MAINQ']='1' + cls.rwmain.add_tasklet(msgbroker_dir, 'rwmsgbroker-c') + cls.rwmain.add_tasklet(router_dir, 'rwdtsrouter-c') + cls.rwmain.add_tasklet(cm_dir, 'rwconmantasklet') + + cls.log = rift.tasklets.logger_from_tasklet_info(cls.tinfo) + cls.log.setLevel(logging.DEBUG) + + stderr_handler = logging.StreamHandler(stream=sys.stderr) + fmt = logging.Formatter( + '%(asctime)-23s %(levelname)-5s (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s') + stderr_handler.setFormatter(fmt) + cls.log.addHandler(stderr_handler) + cls.schema = conmanY.get_schema() + + def setUp(self): + def scheduler_tick(self, *args): + self.call_soon(self.stop) + self.run_forever() + + self.loop = asyncio.new_event_loop() + self.loop.scheduler_tick = types.MethodType(scheduler_tick, self.loop) + self.loop.set_debug(True) + os.environ["PYTHONASYNCIODEBUG"] = "1" + asyncio_logger = logging.getLogger("asyncio") + asyncio_logger.setLevel(logging.DEBUG) + + self.asyncio_timer = None + self.stop_timer = None + self.id_cnt += 1 + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(1, loop=self.loop) + + def run_until(self, test_done, timeout=30): + """ + Attach the current asyncio event loop to rwsched and then run the + scheduler until the test_done function returns True or timeout seconds + pass. + + @param test_done - function which should return True once the test is + complete and the scheduler no longer needs to run. + @param timeout - maximum number of seconds to run the test. + """ + def shutdown(*args): + if args: + self.log.debug('Shutting down loop due to timeout') + + if self.asyncio_timer is not None: + self.tinfo.rwsched_tasklet.CFRunLoopTimerRelease(self.asyncio_timer) + self.asyncio_timer = None + + if self.stop_timer is not None: + self.tinfo.rwsched_tasklet.CFRunLoopTimerRelease(self.stop_timer) + self.stop_timer = None + + self.tinfo.rwsched_instance.CFRunLoopStop() + + def tick(*args): + self.loop.call_later(0.1, self.loop.stop) + self.loop.run_forever() + if test_done(): + shutdown() + + self.asyncio_timer = self.tinfo.rwsched_tasklet.CFRunLoopTimer( + cf.CFAbsoluteTimeGetCurrent(), + 0.1, + tick, + None) + + self.stop_timer = self.tinfo.rwsched_tasklet.CFRunLoopTimer( + cf.CFAbsoluteTimeGetCurrent() + timeout, + 0, + shutdown, + None) + + self.tinfo.rwsched_tasklet.CFRunLoopAddTimer( + self.tinfo.rwsched_tasklet.CFRunLoopGetCurrent(), + self.stop_timer, + self.tinfo.rwsched_instance.CFRunLoopGetMainMode()) + + self.tinfo.rwsched_tasklet.CFRunLoopAddTimer( + self.tinfo.rwsched_tasklet.CFRunLoopGetCurrent(), + self.asyncio_timer, + self.tinfo.rwsched_instance.CFRunLoopGetMainMode()) + + self.tinfo.rwsched_instance.CFRunLoopRun() + + self.assertTrue(test_done()) + + def new_tinfo(self, name): + """ + Create a new tasklet info instance with a unique instance_id per test. + It is up to each test to use unique names if more that one tasklet info + instance is needed. + + @param name - name of the "tasklet" + @return - new tasklet info instance + """ + ret = self.rwmain.new_tasklet_info(name, RWSOTestCase.id_cnt) + + log = rift.tasklets.logger_from_tasklet_info(ret) + log.setLevel(logging.DEBUG) + + stderr_handler = logging.StreamHandler(stream=sys.stderr) + fmt = logging.Formatter( + '%(asctime)-23s %(levelname)-5s (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s') + stderr_handler.setFormatter(fmt) + log.addHandler(stderr_handler) + + return ret + + def get_cloud_account_msg(self): + cloud_account = launchpadyang.CloudAccount() + cloud_account.name = "cloudy" + cloud_account.account_type = "mock" + cloud_account.mock.username = "rainy" + return cloud_account + + def get_compute_pool_msg(self, name, pool_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "compute" + if pool_type == "static": + # Need to query CAL for resource + pass + else: + pool.max_size = 10 + return pool_config + + def get_network_pool_msg(self, name, pool_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "network" + if pool_type == "static": + # Need to query CAL for resource + pass + else: + pool.max_size = 4 + return pool_config + + + def get_network_reserve_msg(self, xpath): + event_id = str(uuid.uuid4()) + msg = rmgryang.VirtualLinkEventData() + msg.event_id = event_id + msg.request_info.name = "mynet" + msg.request_info.subnet = "1.1.1.0/24" + return msg, xpath.format(event_id) + + def get_compute_reserve_msg(self,xpath): + event_id = str(uuid.uuid4()) + msg = rmgryang.VDUEventData() + msg.event_id = event_id + msg.request_info.name = "mynet" + msg.request_info.image_id = "This is a image_id" + msg.request_info.vm_flavor.vcpu_count = 4 + msg.request_info.vm_flavor.memory_mb = 8192*2 + msg.request_info.vm_flavor.storage_gb = 40 + c1 = msg.request_info.connection_points.add() + c1.name = "myport1" + c1.virtual_link_id = "This is a network_id" + return msg, xpath.format(event_id) + + def test_create_resource_pools(self): + self.log.debug("STARTING - test_create_resource_pools") + tinfo = self.new_tinfo('poolconfig') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools" + pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records" + account_xpath = "C,/rw-launchpad:cloud-account" + compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']" + network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']" + + @asyncio.coroutine + def configure_cloud_account(): + msg = self.get_cloud_account_msg() + self.log.info("Configuring cloud-account: %s",msg) + yield from dts.query_create(account_xpath, + rwdts.Flag.ADVISE, + msg) + yield from asyncio.sleep(3, loop=self.loop) + + @asyncio.coroutine + def configure_compute_resource_pools(): + msg = self.get_compute_pool_msg("virtual-compute", "dynamic") + self.log.info("Configuring compute-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + yield from asyncio.sleep(3, loop=self.loop) + + + @asyncio.coroutine + def configure_network_resource_pools(): + msg = self.get_network_pool_msg("virtual-network", "dynamic") + self.log.info("Configuring network-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + yield from asyncio.sleep(3, loop=self.loop) + + + @asyncio.coroutine + def verify_resource_pools(): + self.log.debug("Verifying test_create_resource_pools results") + res_iter = yield from dts.query_read(pool_records_xpath,) + for result in res_iter: + response = yield from result + records = response.result.records + #self.assertEqual(len(records), 2) + #names = [i.name for i in records] + #self.assertTrue('virtual-compute' in names) + #self.assertTrue('virtual-network' in names) + for record in records: + self.log.debug("Received Pool Record, Name: %s, Resource Type: %s, Pool Status: %s, Pool Size: %d, Busy Resources: %d", + record.name, + record.resource_type, + record.pool_status, + record.max_size, + record.busy_resources) + @asyncio.coroutine + def reserve_network_resources(): + msg,xpath = self.get_network_reserve_msg(network_xpath) + self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, rwdts.Flag.TRACE, msg) + yield from asyncio.sleep(3, loop=self.loop) + yield from dts.query_delete(xpath, rwdts.Flag.TRACE) + + @asyncio.coroutine + def reserve_compute_resources(): + msg,xpath = self.get_compute_reserve_msg(compute_xpath) + self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, rwdts.Flag.TRACE, msg) + yield from asyncio.sleep(3, loop=self.loop) + yield from dts.query_delete(xpath, rwdts.Flag.TRACE) + + @asyncio.coroutine + def run_test(): + yield from self.wait_tasklets() + yield from configure_cloud_account() + yield from configure_compute_resource_pools() + yield from configure_network_resource_pools() + yield from verify_resource_pools() + yield from reserve_network_resources() + yield from reserve_compute_resources() + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_create_resource_pools") + + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + launchpad_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build/rwlaunchpad') + + if 'MESSAGE_BROKER_DIR' not in os.environ: + os.environ['MESSAGE_BROKER_DIR'] = os.path.join(build_dir, 'rwmsg/plugins/rwmsgbroker-c') + + if 'ROUTER_DIR' not in os.environ: + os.environ['ROUTER_DIR'] = os.path.join(build_dir, 'rwdts/plugins/rwdtsrouter-c') + + if 'SO_DIR' not in os.environ: + os.environ['SO_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwconmantasklet') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwcm/test/start_cm_system.py b/modules/core/mano/rwcm/test/start_cm_system.py new file mode 100755 index 0000000..7ec6e95 --- /dev/null +++ b/modules/core/mano/rwcm/test/start_cm_system.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import logging +import os +import sys + +import rift.vcs +import rift.vcs.demo +import rift.vcs.vms + +from rift.vcs.ext import ClassProperty + +logger = logging.getLogger(__name__) + + +class ConfigManagerTasklet(rift.vcs.core.Tasklet): + """ + This class represents SO tasklet. + """ + + def __init__(self, name='rwcmtasklet', uid=None): + """ + Creates a PingTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(ConfigManagerTasklet, self).__init__(name=name, uid=uid) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet') + plugin_name = ClassProperty('rwconmantasklet') + + +# Construct the system. This system consists of 1 cluster in 1 +# colony. The master cluster houses CLI and management VMs +sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + clusters=[ + rift.vcs.Cluster( + name='master', + virtual_machines=[ + rift.vcs.VirtualMachine( + name='vm-so', + ip='127.0.0.1', + tasklets=[ + rift.vcs.uAgentTasklet(), + ], + procs=[ + rift.vcs.Confd(), + rift.vcs.CliTasklet(manifest_file="cli_rwcm.xml"), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + rift.vcs.RestconfTasklet(), + ConfigManagerTasklet() + ], + ), + ] + ) + ] + ) + ] + ) + + +# Define the generic portmap. +port_map = {} + + +# Define a mapping from the placeholder logical names to the real +# port names for each of the different modes supported by this demo. +port_names = { + 'ethsim': { + }, + 'pci': { + } +} + + +# Define the connectivity between logical port names. +port_groups = {} + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s') + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser() + + args = parser.parse_args(argv) + + #load demo info and create Demo object + demo = rift.vcs.demo.Demo(sysinfo=sysinfo, + port_map=port_map, + port_names=port_names, + port_groups=port_groups) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args) + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml new file mode 100644 index 0000000..a0791b5 --- /dev/null +++ b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml @@ -0,0 +1,38 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is input parameters file for Network Service configuration. +# This file is formatted as below: + +# configuration_delay : 120 # Number of seconds to wait before applying configuration after NS is up +# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator +# 1 : # Configuration Priority, order in which each VNF will be configured +# name : vnfd_name # Name of the VNF +# member_vnf_index : 11 # member index of the VNF that makes it unique (in case of multiple instances of same VNF) +# configuration_type : scriptconf # Type of configuration (Currently supported values : scriptconf, netconf) +# +# Repeat VNF block for as many VNFs + +configuration_delay : 120 +number_of_vnfs_to_be_configured : 2 +1 : + name : trafsink_vnfd + member_vnf_index : 3 + configuration_type : netconf +2 : + name : trafgen_vnfd + member_vnf_index : 1 + configuration_type : netconf diff --git a/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg new file mode 100644 index 0000000..02dfc85 --- /dev/null +++ b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg @@ -0,0 +1,79 @@ + + + trafgen + 0 + + trafgen-lb + + N1TenGi-1 + + trafgen_vnfd/cp0 + + + + + trafgen_vnfd/cp0 + + + rw_trafgen + rw_trafgen + + 2 + + + direct + + + + + + + + + + + + + + + + + + 1 + + + + + + 1 + + + 10000 + 10000 + 10128 + 1 + + + 5678 + 5678 + 5678 + 1 + + + 512 + 512 + 512 + 1 + + + + + + + + + syslog + + 514 + + diff --git a/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg new file mode 100644 index 0000000..6402201 --- /dev/null +++ b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg @@ -0,0 +1,42 @@ + + + trafsink + 0 + + lb-trafsink + + N3TenGigi-1 + + trafsink_vnfd/cp0 + + + + + trafsink_vnfd/cp0 + + + rw_trafgen + rw_trafgen + + 2 + + + direct + + + + + + + + + + + + + + syslog + + 514 + + diff --git a/modules/core/mano/rwlaunchpad/CMakeLists.txt b/modules/core/mano/rwlaunchpad/CMakeLists.txt new file mode 100644 index 0000000..59695f7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwlaunchpad) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + ra + test + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/CMakeLists.txt new file mode 100644 index 0000000..71a1d91 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + yang + rwiwp + rwlaunchpadtasklet + rwmonitor + rwnsm + rwvnfm + rwvns + rwresmgr + vala + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwiwp/CMakeLists.txt new file mode 100644 index 0000000..7aa112a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwiwptasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwiwp/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/__init__.py new file mode 100644 index 0000000..cf75f79 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwiwptasklet import IwpTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/rwiwptasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/rwiwptasklet.py new file mode 100755 index 0000000..dc9e0c4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/rwiwptasklet.py @@ -0,0 +1,621 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import sys + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwIwpYang, + RwLaunchpadYang, + RwcalYang as rwcal, +) + +import rw_peas +import rift.tasklets + + +class MissionControlConnectionError(Exception): + pass + + +class MissionControlNotConnected(Exception): + pass + + +class OutofResourcesError(Exception): + pass + + +class PluginLoadingError(Exception): + pass + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class ResourcePool(object): + def __init__(self, log, loop, dts, pool_name, resource_ids): + self._log = log + self._loop = loop + self._dts = dts + self._pool_name = pool_name + self._resource_ids = resource_ids + + self._reserved_resource_ids = [] + + self._dts_reg = None + + @property + def pool_xpath(self): + raise NotImplementedError() + + @property + def id_field(self): + raise NotImplementedError() + + def pool_resource_xpath(self, resource_id): + raise NotImplementedError() + + @asyncio.coroutine + def reserve_resource(self): + self._log.debug("Attempting to reserve a resource") + + for id in self._resource_ids: + self._log.debug("Iterated resource id: %s", id) + if id not in self._reserved_resource_ids: + self._log.debug("Reserving resource id %s from pool %s", + id, self._pool_name) + self._reserved_resource_ids.append(id) + return id + + self._log.warning("Did not find a unreserved resource in pool %s", self._pool_name) + return None + + +class VMResourcePool(ResourcePool): + @property + def pool_xpath(self): + return "C,/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:vm-pool[rw-iwp:name='{}']/rw-iwp:resources".format( + self._pool_name, + ) + + @property + def id_field(self): + return "vm_id" + + def pool_resource_xpath(self, resource_id): + return self.pool_xpath + "[rw-iwp:vm-id='{}']".format( + resource_id, + ) + + +class NetworkResourcePool(ResourcePool): + @property + def pool_xpath(self): + return "C,/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:network-pool[rw-iwp:name='{}']/rw-iwp:resources".format( + self._pool_name, + ) + + @property + def id_field(self): + return "network_id" + + def pool_resource_xpath(self, resource_id): + return self.pool_xpath + "[rw-iwp:network-id='{}']".format( + resource_id, + ) + + +class ResourceManager(object): + def __init__(self, log, loop, dts): + self._log = log + self._loop = loop + self._dts = dts + + self._resource_mgr_cfg = None + + self._vm_resource_pools = {} + self._network_resource_pools = {} + + self._periodic_sync_task = None + + @asyncio.coroutine + def _update_vm_pools(self, vm_pools): + self._log.debug("Updating vm pools: %s", vm_pools) + for pool in vm_pools: + if pool.name not in self._vm_resource_pools: + self._log.debug("Adding vm resource pool %s", pool.name) + self._vm_resource_pools[pool.name] = VMResourcePool( + self._log, + self._loop, + self._dts, + pool.name, + [r.vm_id for r in pool.resources], + ) + + @asyncio.coroutine + def _update_network_pools(self, network_pools): + self._log.debug("Updating network pools: %s", network_pools) + for pool in network_pools: + if pool.name not in self._network_resource_pools: + self._log.debug("Adding network resource pool %s", pool.name) + self._network_resource_pools[pool.name] = NetworkResourcePool( + self._log, + self._loop, + self._dts, + pool.name, + [r.network_id for r in pool.resources], + ) + + @asyncio.coroutine + def reserve_vm(self): + self._log.debug("Attempting to reserve a VM resource.") + for name, pool in self._vm_resource_pools.items(): + resource_id = yield from pool.reserve_resource() + if resource_id is None: + continue + + return RwIwpYang.VMResponse( + vm_id=resource_id, + vm_pool=name, + ) + + raise OutofResourcesError("Could not find an available network resource") + + @asyncio.coroutine + def reserve_network(self): + self._log.debug("Attempting to reserve a Network resource.") + for name, pool in self._network_resource_pools.items(): + resource_id = yield from pool.reserve_resource() + if resource_id is None: + continue + + return RwIwpYang.NetworkResponse( + network_id=resource_id, + network_pool=name, + ) + + raise OutofResourcesError("Could not find an available network resource") + + def apply_config(self, resource_mgr_cfg): + self._log.debug("Applying resource manager config: %s", + resource_mgr_cfg) + + self._resource_mgr_cfg = resource_mgr_cfg + + asyncio.ensure_future( + self._update_network_pools(self._resource_mgr_cfg.pools.network_pool), + loop=self._loop, + ) + + asyncio.ensure_future( + self._update_vm_pools(self._resource_mgr_cfg.pools.vm_pool), + loop=self._loop, + ) + + +class ResourceRequestHandler(object): + NETWORK_REQUEST_XPATH = "D,/rw-iwp:resource-mgr/network-request/requests" + VM_REQUEST_XPATH = "D,/rw-iwp:resource-mgr/vm-request/requests" + + def __init__(self, dts, loop, log, resource_manager, cloud_account): + self._dts = dts + self._loop = loop + self._log = log + self._resource_manager = resource_manager + self._cloud_account = cloud_account + + self._network_reg = None + self._vm_reg = None + + self._network_reg_event = asyncio.Event(loop=self._loop) + self._vm_reg_event = asyncio.Event(loop=self._loop) + + @asyncio.coroutine + def wait_ready(self, timeout=5): + self._log.debug("Waiting for all request registrations to become ready.") + yield from asyncio.wait( + [self._network_reg_event.wait(), self._vm_reg_event.wait()], + timeout=timeout, loop=self._loop, + ) + + def register(self): + def on_network_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got network request commit (xact_info: %s)", xact_info) + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_request_ready(registration, status): + self._log.debug("Got request ready event (registration: %s) (status: %s)", + registration, status) + + if registration == self._network_reg: + self._network_reg_event.set() + elif registration == self._vm_reg: + self._vm_reg_event.set() + else: + self._log.error("Unknown registration ready event: %s", registration) + + @asyncio.coroutine + def on_network_request_prepare(xact_info, action, ks_path, request_msg): + self._log.debug( + "Got network request on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg + ) + + xpath = ks_path.to_xpath(RwIwpYang.get_schema()) + "/network-response" + + network_item = yield from self._resource_manager.reserve_network() + + network_response = RwIwpYang.NetworkResponse( + network_id=network_item.network_id, + network_pool=network_item.network_pool + ) + + self._log.debug("Responding with NetworkResponse at xpath %s: %s", + xpath, network_response) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, network_response) + + def on_vm_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vm request commit (xact_info: %s)", xact_info) + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_vm_request_prepare(xact_info, action, ks_path, request_msg): + def get_vm_ip_address(vm_id): + rc, vm_info_item = self._cloud_account.cal.get_vm( + self._cloud_account.account, + vm_id + ) + + return vm_info_item.management_ip + + self._log.debug( + "Got vm request on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg + ) + + xpath = ks_path.to_xpath(RwIwpYang.get_schema()) + "/vm-response" + + vm_item = yield from self._resource_manager.reserve_vm() + + vm_ip = get_vm_ip_address(vm_item.vm_id) + + vm_response = RwIwpYang.VMResponse( + vm_id=vm_item.vm_id, + vm_pool=vm_item.vm_pool, + vm_ip=vm_ip, + ) + + self._log.debug("Responding with VMResponse at xpath %s: %s", + xpath, vm_response) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, vm_response) + + with self._dts.group_create() as group: + self._log.debug("Registering for Network Resource Request using xpath: %s", + ResourceRequestHandler.NETWORK_REQUEST_XPATH, + ) + + self._network_reg = group.register( + xpath=ResourceRequestHandler.NETWORK_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_ready=on_request_ready, + on_commit=on_network_request_commit, + on_prepare=on_network_request_prepare, + ), + flags=rwdts.Flag.PUBLISHER, + ) + + self._log.debug("Registering for VM Resource Request using xpath: %s", + ResourceRequestHandler.VM_REQUEST_XPATH, + ) + self._vm_reg = group.register( + xpath=ResourceRequestHandler.VM_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_ready=on_request_ready, + on_commit=on_vm_request_commit, + on_prepare=on_vm_request_prepare, + ), + flags=rwdts.Flag.PUBLISHER, + ) + + +class ResourceMgrDtsConfigHandler(object): + XPATH = "C,/rw-iwp:resource-mgr" + + def __init__(self, dts, log, resource_manager): + self._dts = dts + self._log = log + + self._resource_manager = resource_manager + self._res_mgr_cfg = RwIwpYang.ResourceManagerConfig() + + def register(self): + def on_apply(dts, acg, xact, action, _): + """Apply the resource manager configuration""" + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + self._log.debug("Got resource mgr apply config (xact: %s) (action: %s)", + xact, action) + + self._resource_manager.apply_config(self._res_mgr_cfg) + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + self._log.debug("Got resource manager configuration: %s", msg) + + mgmt_domain = msg.mgmt_domain + if mgmt_domain.has_field("name"): + self._res_mgr_cfg.mgmt_domain.name = mgmt_domain.name + + mission_control = msg.mission_control + if mission_control.has_field("mgmt_ip"): + self._res_mgr_cfg.mission_control.mgmt_ip = mission_control.mgmt_ip + + if msg.has_field("pools"): + self._res_mgr_cfg.pools.from_dict(msg.pools.as_dict()) + + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug("Registering for Resource Mgr config using xpath: %s", + ResourceMgrDtsConfigHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._pool_reg = acg.register( + xpath=ResourceMgrDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + on_prepare=on_prepare + ) + + +class CloudAccountDtsHandler(object): + XPATH = "C,/rw-launchpad:cloud-account" + log_hdl = None + + def __init__(self, dts, log, cal_account): + self.dts = dts + self.log = log + self.cal_account = cal_account + self.reg = None + + def add_account(self, account): + self.log.info("adding cloud account: {}".format(account)) + self.cal_account.account = rwcal.CloudAccount.from_dict(account.as_dict()) + self.cal_account.cal = self.load_cal_plugin(account) + + def delete_account(self, account_id): + self.log.info("deleting cloud account: {}".format(account_id)) + self.cal_account.account = None + self.cal_account.cal = None + + def update_account(self, account): + self.log.info("updating cloud account: {}".format(account)) + self.cal_account.account = rwcal.CloudAccount.from_dict(account.as_dict()) + self.cal_account.cal = self.load_cal_plugin(account) + + def load_cal_plugin(self, account): + try: + plugin = rw_peas.PeasPlugin( + getattr(account, account.account_type).plugin_name, + 'RwCal-1.0' + ) + + except AttributeError as e: + raise PluginLoadingError(str(e)) + + engine, info, ext = plugin() + + # Initialize the CAL interface + cal = plugin.get_interface("Cloud") + cal.init(CloudAccountDtsHandler.log_hdl) + + return cal + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + self.log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_account(cfg.name) + + # Handle Adds + for cfg in add_cfgs: + self.add_account(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_account(cfg) + + self.log.debug("Registering for Cloud Account config using xpath: %s", + CloudAccountDtsHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=CloudAccountDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class CloudAccount(object): + def __init__(self): + self.cal = None + self.account = None + + +class IwpTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(IwpTasklet, self).__init__(*args, **kwargs) + + self._dts = None + + self._resource_manager = None + self._resource_mgr_config_hdl = None + + self._cloud_account = CloudAccount() + + def start(self): + super(IwpTasklet, self).start() + self.log.info("Starting IwpTasklet") + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS( + self.tasklet_info, + RwLaunchpadYang.get_schema(), + self.loop, + self.on_dts_state_change + ) + + CloudAccountDtsHandler.log_hdl = self.log_hdl + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in IWP stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + self._resource_manager = ResourceManager( + self._log, + self._loop, + self._dts + ) + + self.log.debug("creating resource mgr config request handler") + self._resource_mgr_config_hdl = ResourceMgrDtsConfigHandler( + self._dts, + self.log, + self._resource_manager, + ) + self._resource_mgr_config_hdl.register() + + self.log.debug("creating resource request handler") + self._resource_req_hdl = ResourceRequestHandler( + self._dts, + self.loop, + self.log, + self._resource_manager, + self._cloud_account, + ) + self._resource_req_hdl.register() + + self.log.debug("creating cloud account handler") + self.account_handler = CloudAccountDtsHandler(self._dts, self.log, self._cloud_account) + yield from self.account_handler.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/rwiwptasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rwiwptasklet.py new file mode 100755 index 0000000..f1401b9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rwiwptasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwiwptasklet + +class Tasklet(rift.tasklets.rwiwptasklet.IwpTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt new file mode 100644 index 0000000..5a0afe7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwlaunchpad) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/archive.py + rift/tasklets/${TASKLET_NAME}/checksums.py + rift/tasklets/${TASKLET_NAME}/convert.py + rift/tasklets/${TASKLET_NAME}/datacenters.py + rift/tasklets/${TASKLET_NAME}/message.py + rift/tasklets/${TASKLET_NAME}/tasklet.py + rift/tasklets/${TASKLET_NAME}/uploader.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py new file mode 100644 index 0000000..2e19300 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .tasklet import LaunchpadTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/archive.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/archive.py new file mode 100644 index 0000000..470f554 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/archive.py @@ -0,0 +1,268 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import os +import re +import shutil +import tempfile + +from . import checksums +from . import convert +from . import message + + +class ArchiveError(Exception): + pass + + +class ArchiveInvalidPath(message.ErrorMessage): + def __init__(self, filename): + msg = "unable to match checksum filename {} to contents of archive" + super().__init__("archive-error", msg.format(filename)) + + +class LaunchpadArchive(object): + def __init__(self, tar, log): + self._descriptors = dict() + self._descriptors['images'] = list() + self._descriptors['pnfd'] = list() + self._descriptors['vnfd'] = list() + self._descriptors['vld'] = list() + self._descriptors['nsd'] = list() + self._descriptors['vnffgd'] = list() + self._descriptors['schema/libs'] = list() + self._descriptors['schema/yang'] = list() + self._descriptors['schema/fxs'] = list() + + self._checksums = dict() + self._manifest = None + + self.log = log + self.tarfile = tar + self.prefix = os.path.commonprefix(self.tarfile.getnames()) + + # There must be a checksums.txt file in the archive + if os.path.join(self.prefix, 'checksums.txt') not in tar.getnames(): + self.log.message(message.OnboardMissingChecksumsFile()) + raise ArchiveError() + + # Iterate through the paths in the checksums files and validate them. + # Note that any file in the archive that is not included in the + # checksums file will be ignored. + fd = tar.extractfile(os.path.join(self.prefix, 'checksums.txt')) + archive_checksums = checksums.ArchiveChecksums.from_file_desc(fd) + + def validate_checksums(): + archive_files = {info.name for info in self.tarfile.getmembers() if info.isfile()} + + # Identify files in the checksums.txt file that cannot be located in + # the archive. + for filename in archive_checksums: + if os.path.join(self.prefix, filename) not in archive_files: + self.log.message(message.OnboardMissingFile(filename)) + raise ArchiveError() + + # Use the checksums to validate the remaining files in the archive + for filename in archive_checksums: + path = os.path.join(self.prefix, filename) + if checksums.checksum(self.tarfile.extractfile(path)) != archive_checksums[filename]: + self.log.message(message.OnboardChecksumMismatch(filename)) + raise ArchiveError() + + # Disable checksum validations for onboard performance issues + # validate_checksums() + + def assign_manifest(filename): + self._manifest = filename + + patterns = [ + (re.compile(r"images/([^/]+)"), self._descriptors["images"].append), + (re.compile(r"pnfd/([^/]+)"), self._descriptors["pnfd"].append), + (re.compile(r"vnfd/([^/]+)"), self._descriptors["vnfd"].append), + (re.compile(r"vld/([^/]+)"), self._descriptors["vld"].append), + (re.compile(r"nsd/([^/]+)"), self._descriptors["nsd"].append), + (re.compile(r"vnffgd/([^/]+)"), self._descriptors["vnffgd"].append), + (re.compile(r"schema/libs/([^/]+)"), self._descriptors["schema/libs"].append), + (re.compile(r"schema/yang/([^/]+)"), self._descriptors["schema/yang"].append), + (re.compile(r"schema/fxs/([^/]+)"), self._descriptors["schema/fxs"].append), + (re.compile(r"manifest.xml"), assign_manifest), + ] + + # Iterate through the recognized patterns and assign files accordingly + for filename in archive_checksums: + relname = os.path.relpath(filename) + for pattern, store in patterns: + if pattern.match(relname): + store(relname) + self._checksums[relname] = archive_checksums[filename] + break + + else: + raise message.MessageException(ArchiveInvalidPath(filename)) + + @property + def checksums(self): + """A dictionary of the file checksums""" + return self._checksums + + @property + def pnfds(self): + """A list of PNFDs in the archive""" + return self._descriptors['pnfd'] + + @property + def vnfds(self): + """A list of VNFDs in the archive""" + return self._descriptors['vnfd'] + + @property + def vlds(self): + """A list of VLDs in the archive""" + return self._descriptors['vld'] + + @property + def vnffgds(self): + """A list of VNFFGDs in the archive""" + return self._descriptors['vnffgd'] + + @property + def nsds(self): + """A list of NSDs in the archive""" + return self._descriptors['nsd'] + + @property + def images(self): + """A list of images in the archive""" + return self._descriptors['images'] + + @property + def filenames(self): + """A list of all the files in the archive""" + return self.pnfds + self.vnfds + self.vlds + self.vnffgds + self.nsds + self.images + + def extract(self, dest): + # Ensure that the destination directory exists + if not os.path.exists(dest): + os.makedirs(dest) + + for filename in self.filenames: + # Create the full name to perform the lookup for the TarInfo in the + # archive. + fullname = os.path.join(self.prefix, filename) + member = self.tarfile.getmember(fullname) + + # Make sure that any preceeding directories in the path have been + # created. + dirname = os.path.dirname(filename) + if not os.path.exists(os.path.join(dest, dirname)): + os.makedirs(os.path.join(dest, dirname)) + + # Copy the contents of the file to the correct path + with open(os.path.join(dest, filename), 'wb') as dst: + src = self.tarfile.extractfile(member) + shutil.copyfileobj(src, dst, 10 * 1024 * 1024) + src.close() + +class PackageArchive(object): + def __init__(self): + self.images = dict() + self.vnfds = list() + self.nsds = list() + self.vlds = list() + self.checksums = dict() + + def add_image(self, image, chksum=None): + if image.name not in self.images: + if chksum is None: + with open(image.location, 'r+b') as fp: + self.checksums["images/" + image.name] = checksums.checksum(fp) + + else: + self.checksums["images/" + image.name] = chksum + + self.images[image.name] = image + + def add_vld(self, vld): + self.vlds.append(vld) + + def add_vnfd(self, vnfd): + self.vnfds.append(vnfd) + + def add_nsd(self, nsd): + self.nsds.append(nsd) + + def create_archive(self, archive_name, dest=None): + if dest is None: + dest = tempfile.gettempdir() + + if archive_name.endswith(".tar.gz"): + archive_name = archive_name[:-7] + + archive_path = os.path.join(dest, archive_name) + + if os.path.exists(archive_path): + shutil.rmtree(archive_path) + + os.makedirs(archive_path) + + def write_descriptors(descriptors, converter, name): + if descriptors: + os.makedirs(os.path.join(archive_path, name)) + + path = "{}/{{}}.xml".format(os.path.join(archive_path, name)) + for desc in descriptors: + xml = converter.to_xml_string(desc) + open(path.format(desc.id), 'w').write(xml) + + key = os.path.relpath(path.format(desc.id), archive_path) + self.checksums[key] = checksums.checksum_string(xml) + + def write_images(): + if self.images: + image_path = os.path.join(archive_path, "images") + os.makedirs(image_path) + + for image in self.images.values(): + shutil.copy2(image.location, image_path) + + def write_checksums(): + with open(os.path.join(archive_path, "checksums.txt"), "w") as fp: + for path, chksum in self.checksums.items(): + fp.write("{} {}\n".format(chksum, path)) + + # Start by writing the descriptors to the archive + write_descriptors(self.nsds, convert.NsdYangConverter(), "nsd") + write_descriptors(self.vlds, convert.VldYangConverter(), "vld") + write_descriptors(self.vnfds, convert.VnfdYangConverter(), "vnfd") + + # Copy the images to the archive + write_images() + + # Finally, write the checksums file + write_checksums() + + # Construct a tarball + cmd = "tar zcf {dest}/{name}.tar.gz.partial -C {dest} {name} &>/dev/null" + os.system(cmd.format(name=archive_name, dest=dest)) + + # Rename to final name + cmd = "mv {dest}/{name}.tar.gz.partial {dest}/{name}.tar.gz" + os.system(cmd.format(name=archive_name, dest=dest)) + + shutil.rmtree(archive_path) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/checksums.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/checksums.py new file mode 100644 index 0000000..93c1ce2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/checksums.py @@ -0,0 +1,65 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import hashlib +import re + +def checksum_string(s): + return hashlib.md5(s.encode('utf-8')).hexdigest() + + +def checksum(fd): + """ Calculate a md5 checksum of fd file handle + + Arguments: + fd: A file descriptor return from open() call + + Returns: + A md5 checksum of the file + + """ + current = hashlib.md5() + while True: + data = fd.read(2 ** 16) + if len(data) == 0: + return current.hexdigest() + current.update(data) + + +class ArchiveChecksums(dict): + @classmethod + def from_file_desc(cls, fd): + checksum_pattern = re.compile(r"(\S+)\s+(\S+)") + checksums = dict() + + for line in (line.decode('utf-8').strip() for line in fd if line): + + # Skip comments + if line.startswith('#'): + continue + + # Skip lines that do not contain the pattern we are looking for + result = checksum_pattern.search(line) + if result is None: + continue + + chksum, filepath = result.groups() + checksums[filepath] = chksum + + return cls(checksums) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert.py new file mode 100644 index 0000000..1b2ce63 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert.py @@ -0,0 +1,97 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import ( + NsdYang, + RwYang, + VldYang, + VnfdYang, + ) + + +class GenericYangConverter(object): + model = None + + def __init__(self): + cls = self.__class__ + + if cls.model is None: + cls.model = RwYang.model_create_libncx() + cls.model.load_schema_ypbc(cls.yang_namespace().get_schema()) + + @classmethod + def yang_namespace(cls): + return cls.YANG_NAMESPACE + + @classmethod + def yang_class(cls): + return cls.YANG_CLASS + + def from_xml_string(self, xml): + cls = self.__class__ + obj = cls.yang_class()() + obj.from_xml_v2(cls.model, xml) + return obj + + def from_xml_file(self, filename): + with open(filename, 'r') as fp: + xml = fp.read() + + cls = self.__class__ + obj = cls.yang_class()() + obj.from_xml_v2(cls.model, xml) + return obj + + def to_xml_string(self, obj): + return obj.to_xml_v2(self.__class__.model) + + def from_json_string(self, json): + cls = self.__class__ + obj = cls.yang_class()() + obj.from_json(cls.model, json) + return obj + + def from_json_file(self, filename): + with open(filename, 'r') as fp: + json = fp.read() + + cls = self.__class__ + obj = cls.yang_class()() + obj.from_json(cls.model, json) + return obj + + def to_json_string(self, obj): + return obj.to_json(self.__class__.model) + + +class VnfdYangConverter(GenericYangConverter): + YANG_NAMESPACE = VnfdYang + YANG_CLASS = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd + + +class NsdYangConverter(GenericYangConverter): + YANG_NAMESPACE = NsdYang + YANG_CLASS = NsdYang.YangData_Nsd_NsdCatalog_Nsd + + +class VldYangConverter(GenericYangConverter): + YANG_NAMESPACE = VldYang + YANG_CLASS = VldYang.YangData_Vld_VldCatalog_Vld \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py new file mode 100644 index 0000000..a6d1950 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py @@ -0,0 +1,134 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio + +from gi.repository import ( + RwDts, + RwLaunchpadYang, +) + +import rift.openmano.openmano_client as openmano_client +import rift.tasklets + + +class DataCenterPublisher(object): + """ + This class is reponsible for exposing the data centers associated with an + openmano cloud account. + """ + + XPATH = "D,/rw-launchpad:datacenters" + + def __init__(self, tasklet): + """Creates an instance of a DataCenterPublisher + + Arguments: + tasklet - the tasklet that this publisher is registered for + + """ + self.tasklet = tasklet + self.reg = None + + @property + def dts(self): + """The DTS instance used by this tasklet""" + return self.tasklet.dts + + @property + def log(self): + """The logger used by this tasklet""" + return self.tasklet.log + + @property + def loop(self): + """The event loop used by this tasklet""" + return self.tasklet.loop + + @property + def accounts(self): + """The known openmano cloud accounts""" + accounts = list() + for acc in self.tasklet.cloud_accounts: + if acc.account_type == "openmano": + accounts.append(acc.account_msg) + + return accounts + + @asyncio.coroutine + def register(self): + """Registers the publisher with DTS""" + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + try: + # Create a datacenters instance to hold all of the cloud + # account data. + datacenters = RwLaunchpadYang.DataCenters() + + # Iterate over the known openmano accounts and populate cloud + # account instances with the corresponding data center info + for account in self.accounts: + try: + cloud_account = RwLaunchpadYang.CloudAccount() + cloud_account.name = account.name + + # Create a client for this cloud account to query for + # the associated data centers + client = openmano_client.OpenmanoCliAPI( + self.log, + account.openmano.host, + account.openmano.port, + account.openmano.tenant_id, + ) + + # Populate the cloud account with the data center info + for uuid, name in client.datacenter_list(): + cloud_account.datacenters.append( + RwLaunchpadYang.DataCenter( + uuid=uuid, + name=name, + ) + ) + + datacenters.cloud_accounts.append(cloud_account) + + except Exception as e: + self.log.exception(e) + + xact_info.respond_xpath( + RwDts.XactRspCode.MORE, + 'D,/rw-launchpad:datacenters', + datacenters, + ) + + xact_info.respond_xpath(RwDts.XactRspCode.ACK) + + except Exception as e: + self.log.exception(e) + raise + + handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + + with self.dts.group_create() as group: + self.reg = group.register( + xpath=DataCenterPublisher.XPATH, + handler=handler, + flags=RwDts.Flag.PUBLISHER, + ) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py new file mode 100644 index 0000000..10df8f5 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py @@ -0,0 +1,346 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import logging +import time + + +class MessageException(Exception): + def __init__(self, msg): + self.msg = msg + + +class Message(object): + """ + Messages are events that describe stages of the onboarding process, and + any event that may occur during the onboarding process. + """ + + def __init__(self, level, name, text): + self._level = level + self._name = name + self._text = text + self._timestamp = time.time() + + def __repr__(self): + return "{} {}:{}:{}".format( + self.timestamp, + logging._levelNames.get(self.level, self.level), + self.name, + self.text, + ) + + @property + def level(self): + return self._level + + @property + def name(self): + return self._name + + @property + def text(self): + return self._text + + @property + def timestamp(self): + return self._timestamp + + def log(self, logger): + logger.log(self.level, self.text) + + +class WarningMessage(Message): + """ + A warning is a message that does not prevent the onboarding process for + continuing, but may not be the intention of the user when they initiated + the process. + """ + + def __init__(self, name, text): + super().__init__(logging.WARNING, name, text) + + +class ErrorMessage(Message): + """ + An error message alerts the user to an event that prevent the continuation + of the onboarding process. + """ + + def __init__(self, name, text): + super().__init__(logging.ERROR, name, text) + + +class StatusMessage(Message): + """ + A status message informs the user of an expected stage in the onboarding + process. + """ + + def __init__(self, name, text): + super().__init__(logging.INFO, name, text) + + def log(self, logger): + pass + + +class Logger(object): + """ + This class is used to augment a python logger class so that messages can be + passed to it. Messages are recorded so that the uploader application can + provide this information to the client, and the messages are also recorded + on the server via the standard logging facilities. + """ + + def __init__(self, logger, messages): + self._rift_logger = logger + self._messages = messages + + @property + def messages(self): + return self._messages + + def message(self, msg): + msg.log(self._rift_logger) + self._messages.append(msg) + + def debug(self, msg): + self._rift_logger.debug(msg) + + def info(self, msg): + self._rift_logger.info(msg) + + def error(self, msg): + self._rift_logger.error(msg) + + def fatal(self, msg): + self._rift_logger.fatal(msg) + + def warn(self, msg): + self._rift_logger.warn(msg) + + def warning(self, msg): + self._rift_logger.warning(msg) + + def critical(self, msg): + self._rift_logger.critical(msg) + + def exception(self, exc): + self._rift_logger.exception(exc) + + +class OnboardStart(StatusMessage): + def __init__(self): + super().__init__("onboard-started", "onboarding process started") + + +class OnboardError(ErrorMessage): + def __init__(self, msg): + super().__init__("onboard-error", msg) + + +class OnboardWarning(ErrorMessage): + def __init__(self, msg): + super().__init__("onboard-warning", msg) + + +class OnboardPackageUpload(StatusMessage): + def __init__(self): + super().__init__("onboard-pkg-upload", "uploading package") + + +class OnboardImageUpload(StatusMessage): + def __init__(self): + super().__init__("onboard-img-upload", "uploading image") + + +class OnboardPackageValidation(StatusMessage): + def __init__(self): + super().__init__("onboard-pkg-validation", "package contents validation") + + +class OnboardDescriptorValidation(StatusMessage): + def __init__(self): + super().__init__("onboard-dsc-validation", "descriptor validation") + + +class OnboardDescriptorError(OnboardError): + def __init__(self, filename): + super().__init__("unable to onboard {}".format(filename)) + + +class OnboardDescriptorOnboard(StatusMessage): + def __init__(self): + super().__init__("onboard-dsc-onboard", "onboarding descriptors") + + +class OnboardSuccess(StatusMessage): + def __init__(self): + super().__init__("onboard-success", "onboarding process successfully completed") + + +class OnboardFailure(StatusMessage): + def __init__(self): + super().__init__("onboard-failure", "onboarding process failed") + + +class OnboardMissingContentType(OnboardError): + def __init__(self): + super().__init__("missing content-type header") + + +class OnboardUnsupportedMediaType(OnboardError): + def __init__(self): + super().__init__("multipart/form-data required") + + +class OnboardMissingContentBoundary(OnboardError): + def __init__(self): + super().__init__("missing content boundary") + + +class OnboardMissingTerminalBoundary(OnboardError): + def __init__(self): + super().__init__("Unable to find terminal content boundary") + + +class OnboardUnreadableHeaders(OnboardError): + def __init__(self): + super().__init__("Unable to read message headers") + + +class OnboardUnreadablePackage(OnboardError): + def __init__(self): + super().__init__("Unable to read package") + + +class OnboardMissingChecksumsFile(OnboardError): + def __init__(self): + super().__init__("Package does not contain checksums.txt") + + +class OnboardChecksumMismatch(OnboardError): + def __init__(self, filename): + super().__init__("checksum mismatch for {}".format(filename)) + + +class OnboardMissingAccount(OnboardError): + def __init__(self): + super().__init__("no account information available") + + +class OnboardMissingFile(OnboardWarning): + def __init__(self, filename): + super().__init__("{} is not in the archive".format(filename)) + + +class OnboardInvalidPath(OnboardWarning): + def __init__(self, filename): + super().__init__("{} is not a valid package path".format(filename)) + + +class ExportStart(StatusMessage): + def __init__(self): + super().__init__("export-started", "export process started") + + +class ExportSuccess(StatusMessage): + def __init__(self): + super().__init__("export-success", "export process successfully completed") + + +class ExportFailure(StatusMessage): + def __init__(self): + super().__init__("export-failure", "export process failed") + + + + +class UpdateError(ErrorMessage): + def __init__(self, msg): + super().__init__("update-error", msg) + + +class UpdateMissingAccount(UpdateError): + def __init__(self): + super().__init__("no account information available") + +class UpdateMissingContentType(UpdateError): + def __init__(self): + super().__init__("missing content-type header") + + +class UpdateUnsupportedMediaType(UpdateError): + def __init__(self): + super().__init__("multipart/form-data required") + + +class UpdateMissingContentBoundary(UpdateError): + def __init__(self): + super().__init__("missing content boundary") + + +class UpdateStart(StatusMessage): + def __init__(self): + super().__init__("update-started", "update process started") + + +class UpdateSuccess(StatusMessage): + def __init__(self): + super().__init__("update-success", "updating process successfully completed") + + +class UpdateFailure(StatusMessage): + def __init__(self): + super().__init__("update-failure", "updating process failed") + + +class UpdatePackageUpload(StatusMessage): + def __init__(self): + super().__init__("update-pkg-upload", "uploading package") + + +class UpdateDescriptorError(UpdateError): + def __init__(self, filename): + super().__init__("unable to update {}".format(filename)) + + +class UpdateDescriptorUpdated(StatusMessage): + def __init__(self): + super().__init__("update-dsc-updated", "updated descriptors") + + +class UpdateUnreadableHeaders(UpdateError): + def __init__(self): + super().__init__("Unable to read message headers") + + +class UpdateUnreadablePackage(UpdateError): + def __init__(self): + super().__init__("Unable to read package") + + +class UpdateChecksumMismatch(UpdateError): + def __init__(self, filename): + super().__init__("checksum mismatch for {}".format(filename)) + + +class UpdateNewDescriptor(UpdateError): + def __init__(self, filename): + super().__init__("{} contains a new descriptor".format(filename)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py new file mode 100644 index 0000000..caa4e15 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py @@ -0,0 +1,511 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import math +import mmap +import os +import re +import tarfile +import tempfile +import sys + +import tornado +import tornado.httputil +import tornado.httpserver +import tornado.platform.asyncio + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwLaunchpadYang as rwlaunchpad, + RwcalYang as rwcal, + RwTypes, +) + +import rift.tasklets +import rift.mano.cloud + +from . import uploader +from . import datacenters + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class CatalogDtsHandler(object): + def __init__(self, tasklet, app): + self.app = app + self.reg = None + self.tasklet = tasklet + + @property + def log(self): + return self.tasklet.log + + @property + def dts(self): + return self.tasklet.dts + + +class VldCatalogDtsHandler(CatalogDtsHandler): + XPATH = "C,/vld:vld-catalog/vld:vld" + + def add_vld(self, vld): + self.log.debug('vld-catalog-handler:add:{}'.format(vld.id)) + if vld.id not in self.tasklet.vld_catalog: + self.tasklet.vld_catalog[vld.id] = vld + else: + self.log.error("vld already in catalog: {}".format(vld.id)) + + def update_vld(self, vld): + self.log.debug('vld-catalog-handler:update:{}'.format(vld.id)) + if vld.id in self.tasklet.vld_catalog: + self.tasklet.vld_catalog[vld.id] = vld + else: + self.log.error("unrecognized VLD: {}".format(vld.id)) + + def delete_vld(self, vld_id): + self.log.debug('vld-catalog-handler:delete:{}'.format(vld_id)) + if vld_id in self.tasklet.vld_catalog: + del self.tasklet.vld_catalog[vld_id] + else: + self.log.error("unrecognized VLD: {}".format(vld_id)) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="id", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_vld(cfg.id) + + # Handle Adds + for cfg in add_cfgs: + self.add_vld(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_vld(cfg) + + self.log.debug("Registering for VLD catalog") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=VldCatalogDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class NsdCatalogDtsHandler(CatalogDtsHandler): + XPATH = "C,/nsd:nsd-catalog/nsd:nsd" + + def add_nsd(self, nsd): + self.log.debug('nsd-catalog-handler:add:{}'.format(nsd.id)) + if nsd.id not in self.tasklet.nsd_catalog: + self.tasklet.nsd_catalog[nsd.id] = nsd + else: + self.log.error("nsd already in catalog: {}".format(nsd.id)) + + def update_nsd(self, nsd): + self.log.debug('nsd-catalog-handler:update:{}'.format(nsd.id)) + if nsd.id in self.tasklet.nsd_catalog: + self.tasklet.nsd_catalog[nsd.id] = nsd + else: + self.log.error("unrecognized NSD: {}".format(nsd.id)) + + def delete_nsd(self, nsd_id): + self.log.debug('nsd-catalog-handler:delete:{}'.format(nsd_id)) + if nsd_id in self.tasklet.nsd_catalog: + del self.tasklet.nsd_catalog[nsd_id] + else: + self.log.error("unrecognized NSD: {}".format(nsd_id)) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="id", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_nsd(cfg.id) + + # Handle Adds + for cfg in add_cfgs: + self.add_nsd(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_nsd(cfg) + + self.log.debug("Registering for NSD catalog") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=NsdCatalogDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class VnfdCatalogDtsHandler(CatalogDtsHandler): + XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd" + + def add_vnfd(self, vnfd): + self.log.debug('vnfd-catalog-handler:add:{}'.format(vnfd.id)) + if vnfd.id not in self.tasklet.vnfd_catalog: + self.tasklet.vnfd_catalog[vnfd.id] = vnfd + + else: + self.log.error("VNFD already in catalog: {}".format(vnfd.id)) + + def update_vnfd(self, vnfd): + self.log.debug('vnfd-catalog-handler:update:{}'.format(vnfd.id)) + if vnfd.id in self.tasklet.vnfd_catalog: + self.tasklet.vnfd_catalog[vnfd.id] = vnfd + + else: + self.log.error("unrecognized VNFD: {}".format(vnfd.id)) + + def delete_vnfd(self, vnfd_id): + self.log.debug('vnfd-catalog-handler:delete:{}'.format(vnfd_id)) + if vnfd_id in self.tasklet.vnfd_catalog: + del self.tasklet.vnfd_catalog[vnfd_id] + + else: + self.log.error("unrecognized VNFD: {}".format(vnfd_id)) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="id", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_vnfd(cfg.id) + + # Handle Adds + for cfg in add_cfgs: + self.add_vnfd(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_vnfd(cfg) + + self.log.debug("Registering for VNFD catalog") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=VnfdCatalogDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class LaunchpadConfigDtsHandler(object): + XPATH = "C,/rw-launchpad:launchpad-config" + + def __init__(self, dts, log, launchpad): + self.dts = dts + self.log = log + self.task = launchpad + self.reg = None + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + cfg = list(self.reg.get_xact_elements(xact))[0] + self.task.set_mode(cfg.operational_mode) + + self.log.debug("Registering for Launchpad Config") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=LaunchpadConfigDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class CloudAccountHandlers(object): + def __init__(self, dts, log, log_hdl, loop, app): + self._log = log + self._log_hdl = log_hdl + self._dts = dts + self._loop = loop + self._app = app + + self._log.debug("creating cloud account config handler") + self.cloud_cfg_handler = rift.mano.cloud.CloudAccountConfigSubscriber( + self._dts, self._log, self._log_hdl, + rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=self.on_cloud_account_added, + on_delete_apply=self.on_cloud_account_deleted, + ) + ) + + self._log.debug("creating cloud account opdata handler") + self.cloud_operdata_handler = rift.mano.cloud.CloudAccountDtsOperdataHandler( + self._dts, self._log, self._loop, + ) + + def on_cloud_account_deleted(self, account_name): + self._log.debug("cloud account deleted") + self._app.accounts = list(self.cloud_cfg_handler.accounts.values()) + self.cloud_operdata_handler.delete_cloud_account(account_name) + + def on_cloud_account_added(self, account): + self._log.debug("cloud account added") + self._app.accounts = list(self.cloud_cfg_handler.accounts.values()) + self._log.debug("accounts: %s", self._app.accounts) + self.cloud_operdata_handler.add_cloud_account(account) + + @asyncio.coroutine + def register(self): + self.cloud_cfg_handler.register() + yield from self.cloud_operdata_handler.register() + + +class LaunchpadTasklet(rift.tasklets.Tasklet): + UPLOAD_MAX_BODY_SIZE = 1e10 + UPLOAD_PORT = "4567" + + def __init__(self, *args, **kwargs): + super(LaunchpadTasklet, self).__init__(*args, **kwargs) + self.app = None + self.server = None + + self.account_handler = None + self.config_handler = None + self.nsd_catalog_handler = None + self.vld_catalog_handler = None + self.vnfd_catalog_handler = None + self.cloud_handler = None + self.datacenter_handler = None + + self.nsd_catalog = dict() + self.vld_catalog = dict() + self.vnfd_catalog = dict() + + self.mode = rwlaunchpad.OperationalMode.STANDALONE + + @property + def cloud_accounts(self): + if self.cloud_handler is None: + return list() + + return list(self.cloud_handler.cloud_cfg_handler.accounts.values()) + + def start(self): + super(LaunchpadTasklet, self).start() + self.log.info("Starting LaunchpadTasklet") + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self.dts = rift.tasklets.DTS( + self.tasklet_info, + rwlaunchpad.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self.dts) + + def stop(self): + try: + self.server.stop() + self.dts.deinit() + except Exception: + print("Caught Exception in LP stop:", sys.exc_info()[0]) + raise + def set_mode(self, mode): + """ Sets the mode of this launchpad""" + self.mode = mode + + @asyncio.coroutine + def init(self): + io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop) + self.app = uploader.UploaderApplication(self) + + manifest = self.tasklet_info.get_pb_manifest() + ssl_cert = manifest.bootstrap_phase.rwsecurity.cert + ssl_key = manifest.bootstrap_phase.rwsecurity.key + ssl_options = { + "certfile" : ssl_cert, + "keyfile" : ssl_key, + } + + if manifest.bootstrap_phase.rwsecurity.use_ssl: + self.server = tornado.httpserver.HTTPServer( + self.app, + max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE, + io_loop=io_loop, + ssl_options=ssl_options, + ) + + else: + self.server = tornado.httpserver.HTTPServer( + self.app, + max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE, + io_loop=io_loop, + ) + + self.log.debug("creating VLD catalog handler") + self.vld_catalog_handler = VldCatalogDtsHandler(self, self.app) + yield from self.vld_catalog_handler.register() + + self.log.debug("creating NSD catalog handler") + self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self.app) + yield from self.nsd_catalog_handler.register() + + self.log.debug("creating VNFD catalog handler") + self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self.app) + yield from self.vnfd_catalog_handler.register() + + self.log.debug("creating launchpad config handler") + self.lp_config_handler = LaunchpadConfigDtsHandler(self.dts, self.log, self) + yield from self.lp_config_handler.register() + + self.log.debug("creating datacenter handler") + self.datacenter_handler = datacenters.DataCenterPublisher(self) + yield from self.datacenter_handler.register() + + self.cloud_handler = CloudAccountHandlers( + self.dts, self.log, self.log_hdl, self.loop, self.app + ) + yield from self.cloud_handler.register() + + @asyncio.coroutine + def run(self): + self.server.listen(LaunchpadTasklet.UPLOAD_PORT) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Handle DTS state change + + Take action according to current DTS state to transition application + into the corresponding application state + + Arguments + state - current dts state + + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self.dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py new file mode 100644 index 0000000..d400cc4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py @@ -0,0 +1,1379 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import collections +import mmap +import os +import shutil +import tarfile +import tempfile +import threading +import uuid +import xml.etree.ElementTree as ET +import json + +import requests +import tornado +import tornado.escape +import tornado.ioloop +import tornado.web +import tornado.httputil + +import gi +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + RwLaunchpadYang as rwlaunchpad, + RwYang, + RwcalYang as rwcal, + NsdYang, + VnfdYang, + ) +import rift.mano.cloud + +from . import archive +from . import checksums +from . import convert +from . import message +from .message import ( + ExportFailure, + ExportStart, + ExportSuccess, + MessageException, + OnboardDescriptorError, + OnboardDescriptorOnboard, + OnboardDescriptorValidation, + OnboardFailure, + OnboardImageUpload, + OnboardInvalidPath, + OnboardMissingAccount, + OnboardMissingContentBoundary, + OnboardMissingContentType, + OnboardMissingTerminalBoundary, + OnboardPackageUpload, + OnboardPackageValidation, + OnboardStart, + OnboardSuccess, + OnboardUnreadableHeaders, + OnboardUnreadablePackage, + OnboardUnsupportedMediaType, + UpdateChecksumMismatch, + UpdateDescriptorError, + UpdateDescriptorUpdated, + UpdateFailure, + UpdateMissingAccount, + UpdateMissingContentBoundary, + UpdateMissingContentType, + UpdateNewDescriptor, + UpdatePackageUpload, + UpdateStart, + UpdateSuccess, + UpdateUnreadableHeaders, + UpdateUnreadablePackage, + UpdateUnsupportedMediaType, + ) + + +class UnreadableHeadersError(Exception): + pass + + +class UnreadablePackageError(Exception): + pass + + +class HttpMessageError(Exception): + def __init__(self, code, msg): + self.code = code + self.msg = msg + + +class OnboardError(Exception): + def __init__(self, msg): + self.msg = msg + + +class UpdateError(Exception): + def __init__(self, msg): + self.msg = msg + + +class RequestHandler(tornado.web.RequestHandler): + def options(self, *args, **kargs): + pass + + def set_default_headers(self): + self.set_header('Access-Control-Allow-Origin', '*') + self.set_header('Access-Control-Allow-Headers', 'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization') + self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE') + + +def boundary_search(fd, boundary): + """ + Use the Boyer-Moore-Horpool algorithm to find matches to a message boundary + in a file-like object. + + Arguments: + fd - a file-like object to search + boundary - a string defining a message boundary + + Returns: + An array of indices corresponding to matches in the file + + """ + # It is easier to work with a search pattern that is reversed with this + # algorithm + needle = ''.join(reversed(boundary)).encode() + + # Create a lookup to efficiently determine how far we can skip through the + # file based upon the characters in the pattern. + lookup = dict() + for i, c in enumerate(needle[1:], start=1): + if c not in lookup: + lookup[c] = i + + blength = len(boundary) + indices = list() + + # A buffer that same length as the pattern is used to read characters from + # the file. Note that characters are added from the left to make for a + # straight forward comparison with the reversed orientation of the needle. + buffer = collections.deque(maxlen=blength) + buffer.extendleft(fd.read(blength)) + + # Iterate through the file and construct an array of the indices where + # matches to the boundary occur. + index = 0 + while True: + tail = buffer[0] + + # If the "tail" of the buffer matches the first character in the + # needle, perform a character by character check. + if tail == needle[0]: + for x, y in zip(buffer, needle): + if x != y: + break + + else: + # Success! Record the index of the of beginning of the match + indices.append(index) + + # Determine how far to skip based upon the "tail" character + skip = lookup.get(tail, blength) + chunk = fd.read(skip) + + if chunk == b'': + break + + # Push the chunk into the buffer and update the index + buffer.extendleft(chunk) + index += skip + + return indices + + +def extract_package(log, fd, boundary, pkgfile): + """Extract tarball from multipart message on disk + + Arguments: + fd - A file object that the package can be read from + boundary - a string defining the boundary of different parts in the + multipart message. + + """ + log.debug("extracting archive from data") + + # Find the indices of the message boundaries + boundaries = boundary_search(fd, boundary) + if not boundaries: + raise UnreadablePackageError() + + # check that the message has a terminal boundary + fd.seek(boundaries[-1]) + terminal = fd.read(len(boundary) + 2) + if terminal != boundary.encode() + b'--': + raise OnboardError(OnboardMissingTerminalBoundary()) + + log.debug("search for part containing archive") + # find the part of the message that contains the descriptor + for alpha, bravo in zip(boundaries[:-1], boundaries[1:]): + # Move to the beginning of the message part (and trim the + # boundary) + fd.seek(alpha) + fd.readline() + + # Extract the headers from the beginning of the message + headers = tornado.httputil.HTTPHeaders() + while fd.tell() < bravo: + line = fd.readline() + if not line.strip(): + break + + headers.parse_line(line.decode('utf-8')) + + else: + raise UnreadableHeadersError() + + # extract the content disposition and options or move on to the next + # part of the message. + try: + content_disposition = headers['content-disposition'] + disposition, options = tornado.httputil._parse_header(content_disposition) + except KeyError: + continue + + # If this is not form-data, it is not what we are looking for + if disposition != 'form-data': + continue + + # If there is no descriptor in the options, this data does not + # represent a descriptor archive. + if options.get('name', '') != 'descriptor': + continue + + # Write the archive section to disk + with open(pkgfile + ".partial", 'wb') as tp: + log.debug("writing archive ({}) to filesystem".format(pkgfile)) + + remaining = bravo - fd.tell() + while remaining > 0: + length = min(remaining, 1024) + tp.write(fd.read(length)) + remaining -= length + + tp.flush() + + # If the data contains a end-of-feed and carriage-return + # characters, this can cause gzip to issue warning or errors. Here, + # we check the last to bytes of the data and remove them if they + # corresponding to '\r\n'. + with open(pkgfile + ".partial", "rb+") as tp: + tp.seek(-2, 2) + if tp.read(2) == "\r\n": + tp.seek(-2, 2) + tp.truncate() + + log.debug("finished writing archive") + + # Strip the "upload" suffix from the basename + shutil.move(pkgfile + ".partial", pkgfile) + + return + + raise UnreadablePackageError() + + +@tornado.web.stream_request_body +class StreamingUploadHandler(RequestHandler): + def initialize(self, log, loop): + """Initialize the handler + + Arguments: + log - the logger that this handler should use + loop - the tasklets ioloop + + """ + self.transaction_id = str(uuid.uuid4()) + + self.loop = loop + self.log = self.application.get_logger(self.transaction_id) + + self.package_name = None + self.package_fp = None + self.boundary = None + + self.log.debug('created handler (transaction_id = {})'.format(self.transaction_id)) + + def msg_missing_account(self): + raise NotImplementedError() + + def msg_missing_content_type(self): + raise NotImplementedError() + + def msg_unsupported_media_type(self): + raise NotImplementedError() + + def msg_missing_content_boundary(self): + raise NotImplementedError() + + def msg_start(self): + raise NotImplementedError() + + def msg_success(self): + raise NotImplementedError() + + def msg_failure(self): + raise NotImplementedError() + + def msg_package_upload(self): + raise NotImplementedError() + + @tornado.gen.coroutine + def prepare(self): + """Prepare the handler for a request + + The prepare function is the first part of a request transaction. It + creates a temporary file that uploaded data can be written to. + + """ + if self.request.method != "POST": + return + + self.log.message(self.msg_start()) + + try: + # Retrieve the content type and parameters from the request + content_type = self.request.headers.get('content-type', None) + if content_type is None: + raise HttpMessageError(400, self.msg_missing_content_type()) + + content_type, params = tornado.httputil._parse_header(content_type) + + if "multipart/form-data" != content_type.lower(): + raise HttpMessageError(415, self.msg_unsupported_media_type()) + + if "boundary" not in params: + raise HttpMessageError(400, self.msg_missing_content_boundary()) + + self.boundary = params["boundary"] + self.package_fp = tempfile.NamedTemporaryFile( + prefix="pkg-", + delete=False, + ) + + self.package_name = self.package_fp.name + + self.log.debug('writing to {}'.format(self.package_name)) + + except HttpMessageError as e: + self.log.message(e.msg) + self.log.message(self.msg_failure()) + + raise tornado.web.HTTPError(e.code, e.msg.name) + + except Exception as e: + self.log.exception(e) + self.log.message(self.msg_failure()) + + @tornado.gen.coroutine + def data_received(self, data): + """Write data to the current file + + Arguments: + data - a chunk of data to write to file + + """ + self.package_fp.write(data) + + def post(self): + """Handle a post request + + The function is called after any data associated with the body of the + request has been received. + + """ + self.package_fp.close() + self.log.message(self.msg_package_upload()) + + +class UploadHandler(StreamingUploadHandler): + """ + This handler is used to upload archives that contain VNFDs, NSDs, and PNFDs + to the launchpad. This is a streaming handler that writes uploaded archives + to disk without loading them all into memory. + """ + + def msg_missing_account(self): + return OnboardMissingAccount() + + def msg_missing_content_type(self): + return OnboardMissingContentType() + + def msg_unsupported_media_type(self): + return OnboardUnsupportedMediaType() + + def msg_missing_content_boundary(self): + return OnboardMissingContentBoundary() + + def msg_start(self): + return OnboardStart() + + def msg_success(self): + return OnboardSuccess() + + def msg_failure(self): + return OnboardFailure() + + def msg_package_upload(self): + return OnboardPackageUpload() + + def post(self): + """Handle a post request + + The function is called after any data associated with the body of the + request has been received. + + """ + super().post() + + filesize = os.stat(self.package_name).st_size + self.log.debug('wrote {} bytes to {}'.format(filesize, self.package_name)) + + self.application.onboard( + self.package_name, + self.boundary, + self.transaction_id, + auth=self.request.headers.get('authorization', None), + ) + + self.set_status(200) + self.write(tornado.escape.json_encode({ + "transaction_id": self.transaction_id, + })) + + +class UpdateHandler(StreamingUploadHandler): + def msg_missing_account(self): + return UpdateMissingAccount() + + def msg_missing_content_type(self): + return UpdateMissingContentType() + + def msg_unsupported_media_type(self): + return UpdateUnsupportedMediaType() + + def msg_missing_content_boundary(self): + return UpdateMissingContentBoundary() + + def msg_start(self): + return UpdateStart() + + def msg_success(self): + return UpdateSuccess() + + def msg_failure(self): + return UpdateFailure() + + def msg_package_upload(self): + return UpdatePackageUpload() + + def post(self): + """Handle a post request + + The function is called after any data associated with the body of the + request has been received. + + """ + super().post() + + filesize = os.stat(self.package_name).st_size + self.log.debug('wrote {} bytes to {}'.format(filesize, self.package_name)) + + self.application.update( + self.package_name, + self.boundary, + self.transaction_id, + auth=self.request.headers.get('authorization', None), + ) + + self.set_status(200) + self.write(tornado.escape.json_encode({ + "transaction_id": self.transaction_id, + })) + + +class StateHandler(RequestHandler): + def initialize(self, log, loop): + self.log = log + self.loop = loop + + def success(self, messages): + success = self.__class__.SUCCESS + return any(isinstance(msg, success) for msg in messages) + + def failure(self, messages): + failure = self.__class__.FAILURE + return any(isinstance(msg, failure) for msg in messages) + + def started(self, messages): + started = self.__class__.STARTED + return any(isinstance(msg, started) for msg in messages) + + def status(self, messages): + if self.failure(messages): + return "failure" + elif self.success(messages): + return "success" + return "pending" + + def notifications(self, messages): + notifications = { + "errors": list(), + "events": list(), + "warnings": list(), + } + + for msg in messages: + if isinstance(msg, message.StatusMessage): + notifications["events"].append({ + 'value': msg.name, + 'text': msg.text, + 'timestamp': msg.timestamp, + }) + continue + + elif isinstance(msg, message.WarningMessage): + notifications["warnings"].append({ + 'value': msg.text, + 'timestamp': msg.timestamp, + }) + continue + + elif isinstance(msg, message.ErrorMessage): + notifications["errors"].append({ + 'value': msg.text, + 'timestamp': msg.timestamp, + }) + continue + + self.log.warning('unrecognized message: {}'.format(msg)) + + return notifications + + def get(self, transaction_id): + if transaction_id not in self.application.messages: + raise tornado.web.HTTPError(404, "unrecognized transaction ID") + + messages = self.application.messages[transaction_id] + messages.sort(key=lambda m: m.timestamp) + + if not self.started(messages): + raise tornado.web.HTTPError(404, "unrecognized transaction ID") + + notifications = self.notifications(messages) + notifications["status"] = self.status(messages) + + self.write(tornado.escape.json_encode(notifications)) + + +class ExportStateHandler(StateHandler): + STARTED = ExportStart + SUCCESS = ExportSuccess + FAILURE = ExportFailure + + +class UploadStateHandler(StateHandler): + STARTED = OnboardStart + SUCCESS = OnboardSuccess + FAILURE = OnboardFailure + + +class UpdateStateHandler(StateHandler): + STARTED = UpdateStart + SUCCESS = UpdateSuccess + FAILURE = UpdateFailure + + +class UpdatePackage(threading.Thread): + def __init__(self, log, app, accounts, filename, boundary, pkg_id, auth, use_ssl , ssl_cert, ssl_key): + super().__init__() + self.app = app + self.log = log + self.auth = auth + self.pkg_id = pkg_id + self.accounts = accounts + self.filename = filename + self.boundary = boundary + self.updates_dir = os.path.join( + os.environ['RIFT_ARTIFACTS'], + "launchpad/updates", + ) + self.pkg_dir = os.path.join( + self.updates_dir, + self.pkg_id, + ) + self.use_ssl = use_ssl + self.ssl_cert = ssl_cert + self.ssl_key = ssl_key + + # Get the IO loop from the import main thred + self.io_loop = tornado.ioloop.IOLoop.current() + + def run(self): + try: + arch = self.extract_package() + self.validate_images(arch) + self.validate_descriptors(arch) + + try: + self.update_images(arch) + finally: + self.remove_images(arch) + + self.update_descriptors(arch) + + self.log.message(UpdateSuccess()) + + except UpdateError as e: + self.log.message(e.msg) + self.log.message(UpdateFailure()) + + except Exception as e: + self.log.exception(e) + if str(e): + self.log.message(message.UpdateError(str(e))) + self.log.message(UpdateFailure()) + + finally: + self.remove_images(arch) + os.remove(self.filename) + + def validate_images(self, arch): + for filename in arch.images: + with open(os.path.join(self.pkg_dir, filename), 'r+b') as fp: + chksum = checksums.checksum(fp) + + if chksum != arch.checksums[filename]: + raise UpdateError(UpdateChecksumMismatch(filename)) + + def remove_images(self, arch): + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + for image in arch.images: + try: + os.remove(os.path.join(pkg_dir, image)) + except OSError: + pass + + def validate_descriptors(self, arch): + self.validate_descriptor_checksums(arch) + self.validate_descriptor_existence(arch) + + def validate_descriptor_checksums(self, arch): + def checksum_comparison(filename): + with open(os.path.join(self.pkg_dir, filename), 'r+b') as fp: + chksum = checksums.checksum(fp) + + if chksum != arch.checksums[filename]: + raise UpdateError(UpdateChecksumMismatch(filename)) + + for filename in arch.vnfds: + checksum_comparison(filename) + + for filename in arch.nsds: + checksum_comparison(filename) + + def validate_descriptor_existence(self, arch): + def validate_descriptor_existence_vnfd(): + converter = convert.VnfdYangConverter() + + descriptor_ids = set() + for desc in self.app.tasklet.vnfd_catalog_handler.reg.elements: + self.log.debug("validating descriptor: {}".format(desc.id)) + descriptor_ids.add(desc.id) + + for filename in arch.vnfds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the VNFD descriptor object from the XML/JSON data. We + # use this to determine the ID of the VNFD, which is a + # necessary part of the URL. + if 'xml' in filename: + vnfd = converter.from_xml_string(data) + elif 'json' in filename: + vnfd = converter.from_json_string(data) + + if vnfd.id not in descriptor_ids: + raise UpdateError(UpdateNewDescriptor(filename)) + + def validate_descriptor_existence_nsd(): + converter = convert.NsdYangConverter() + + descriptor_ids = set() + for desc in self.app.tasklet.nsd_catalog_handler.reg.elements: + self.log.debug("validating descriptor: {}".format(desc.id)) + descriptor_ids.add(desc.id) + + for filename in arch.nsds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the NSD descriptor object from the XML data. We use + # this to determine the ID of the NSD, which is a necessary + # part of the URL. + if 'xml' in filename: + vnfd = converter.from_xml_string(data) + elif 'json' in filename: + vnfd = converter.from_json_string(data) + + if vnfd.id not in descriptor_ids: + raise UpdateError(UpdateNewDescriptor(filename)) + + done = threading.Condition() + error = None + + # Define a callback that can be executed in the main thread in order to + # safely interact with the tasklet + def callback(): + nonlocal error + + done.acquire() + + try: + validate_descriptor_existence_vnfd() + validate_descriptor_existence_nsd() + + except UpdateError as e: + error = e + + except Exception as e: + error = UpdateError(str(e)) + + finally: + done.notify() + done.release() + + self.io_loop.add_callback(callback) + + done.acquire() + done.wait() + done.release() + + if error is not None: + raise error + + def update_images(self, arch): + if not arch.images: + return + + self.log.debug("cloud accounts: {}".format(self.accounts)) + + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + + account_images = {} + for account in self.accounts: + self.log.debug("getting image list for account {}".format(account.name)) + account_images[account] = [] + try: + account_images[account] = account.get_image_list() + except rift.mano.cloud.CloudAccountCalError as e: + self.log.warning("could not get image list for account {}".format(account.name)) + continue + + for filename in arch.images: + self.log.debug('uploading image: {}'.format(filename)) + + image = rwcal.ImageInfoItem() + image.name = os.path.basename(filename) + image.location = os.path.join(pkg_dir, filename) + image.checksum = arch.checksums[filename] + + for account in self.accounts: + # Find images on the cloud account which have the same name + matching_images = [i for i in account_images[account] if i.name == image.name] + matching_checksum = [i for i in matching_images if i.checksum == image.checksum] + if len(matching_checksum) > 0: + self.log.debug("found matching image with checksum, not uploading to {}".format(account.name)) + continue + + self.log.debug("uploading to account {}: {}".format(account.name, image)) + try: + image.id = account.create_image(filename) + except rift.mano.cloud.CloudAccountCalError as e: + self.log.error("error when uploading image {} to cloud account: {}".format( + filename, str(e))) + else: + self.log.debug('uploaded image to account{}: {}'.format(account.name, filename)) + + self.log.message(OnboardImageUpload()) + + def update_descriptors(self, arch): + self.update_descriptors_vnfd(arch) + self.update_descriptors_nsd(arch) + + self.log.message(UpdateDescriptorUpdated()) + self.log.debug("update complete") + + def update_descriptors_vnfd(self, arch): + converter = convert.VnfdYangConverter() + + auth = ('admin', 'admin') + + for filename in arch.vnfds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the VNFD descriptor object from the XML/JSON data. We use + # this to determine the ID of the VNFD, which is a necessary part + # of the URL. + if 'xml' in filename: + vnfd = converter.from_xml_string(data) + + # Remove the top-level element of the XML (the 'catalog' element) + tree = ET.fromstring(data) + data = ET.tostring(tree.getchildren()[0]) + headers = {"content-type": "application/vnd.yang.data+xml"} + elif 'json' in filename: + vnfd = converter.from_json_string(data) + + # Remove the top-level element of the JSON (the 'catalog' element) + key = "vnfd:vnfd-catalog" + if key in data: + newdict = json.loads(data) + if (key in newdict): + data = json.dumps(newdict[key]) + headers = {"content-type": "application/vnd.yang.data+json"} + + # Add authorization header if it has been specified + if self.auth is not None: + headers['authorization'] = self.auth + + # Send request to restconf + + if self.use_ssl: + url = "https://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}" + response = requests.put( + url.format(vnfd.id), + data=data, + headers=headers, + auth=auth, + verify=False, + cert=(self.ssl_cert, self.ssl_key), + ) + else: + url = "http://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}" + response = requests.put( + url.format(vnfd.id), + data=data, + headers=headers, + auth=auth, + ) + + if not response.ok: + self.log.error(response.text) + raise UpdateError(UpdateDescriptorError(filename)) + + self.log.debug('successfully updated: {}'.format(filename)) + + def update_descriptors_nsd(self, arch): + converter = convert.NsdYangConverter() + + auth = ('admin', 'admin') + + for filename in arch.nsds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the NSD descriptor object from the XML/JSON data. We use + # this to determine the ID of the NSD, which is a necessary part + # of the URL. + if 'xml' in filename: + nsd = converter.from_xml_string(data) + + # Remove the top-level element of the XML (the 'catalog' element) + tree = ET.fromstring(data) + data = ET.tostring(tree.getchildren()[0]) + headers = {"content-type": "application/vnd.yang.data+xml"} + elif 'json' in filename: + nsd = converter.from_json_string(data) + + # Remove the top-level element of the JSON (the 'catalog' element) + key = "nsd:nsd-catalog" + if key in data: + newdict = json.loads(data) + if (key in newdict): + data = json.dumps(newdict[key]) + headers = {"content-type": "application/vnd.yang.data+json"} + + # Add authorization header if it has been specified + if self.auth is not None: + headers['authorization'] = self.auth + + # Send request to restconf + + if self.use_ssl: + url = "https://127.0.0.1:8008/api/config/nsd-catalog/nsd/{}" + response = requests.put( + url.format(nsd.id), + data=data, + headers=headers, + auth=auth, + verify=False, + cert=(self.ssl_cert, self.ssl_key), + ) + else: + url = "http://127.0.0.1:8008/api/config/nsd-catalog/nsd/{}" + response = requests.put( + url.format(nsd.id), + data=data, + headers=headers, + auth=auth, + ) + + if not response.ok: + self.log.error(response.text) + raise UpdateError(UpdateDescriptorError(filename)) + + self.log.debug('successfully updated: {}'.format(filename)) + + def extract_package(self): + """Extract tarball from multipart message on disk + + The tarball contained in the message may be very large; Too large to + load into memory without possibly affecting the behavior of the + webserver. So the message is memory mapped and parsed in order to + extract just the tarball, and then to extract the contents of the + tarball. + + Arguments: + filename - The name of a file that contains a multipart message + boundary - a string defining the boundary of different parts in the + multipart message. + + """ + # Ensure the updates directory exists + try: + os.makedirs(self.updates_dir, exist_ok=True) + except FileExistsError as e: + pass + + try: + pkgpath = os.path.join(self.updates_dir, self.pkg_id) + pkgfile = pkgpath + ".tar.gz" + with open(self.filename, 'r+b') as fp: + # A memory mapped representation of the file is used to reduce + # the memory footprint of the running application. + mapped = mmap.mmap(fp.fileno(), 0) + extract_package( + self.log, + mapped, + self.boundary, + pkgfile, + ) + + # Process the package archive + tar = tarfile.open(pkgfile, mode="r:gz") + arc = archive.LaunchpadArchive(tar, self.log) + self.log.debug("archive extraction complete") + + arc.extract(pkgpath) + + return arc + + except MessageException as e: + raise OnboardError(e.msg) + + except UnreadableHeadersError: + raise UpdateError(UpdateUnreadableHeaders()) + + except UnreadablePackageError: + raise UpdateError(UpdateUnreadablePackage()) + + +class OnboardPackage(threading.Thread): + def __init__(self, log, app, accounts, filename, boundary, pkg_id, auth, use_ssl, ssl_cert, ssl_key): + super().__init__() + self.app = app + self.log = log + self.auth = auth + self.pkg_id = pkg_id + self.accounts = accounts + self.filename = filename + self.boundary = boundary + self.io_loop = tornado.ioloop.IOLoop.current() + self.use_ssl = use_ssl + self.ssl_cert = ssl_cert + self.ssl_key = ssl_key + + def run(self): + try: + arch = self.extract_package() + + try: + self.upload_images(arch) + finally: + self.remove_images(arch) + + self.onboard_descriptors(arch) + + self.log.message(OnboardSuccess()) + + except OnboardError as e: + self.log.message(e.msg) + self.log.message(OnboardFailure()) + + except Exception as e: + self.log.exception(e) + if str(e): + self.log.message(message.OnboardError(str(e))) + self.log.message(OnboardFailure()) + + finally: + os.remove(self.filename) + + def remove_images(self, arch): + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + for image in arch.images: + try: + os.remove(os.path.join(pkg_dir, image)) + except OSError: + pass + + def upload_images(self, arch): + if not arch.images: + return + + self.log.debug("cloud accounts: {}".format(self.accounts)) + + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + + account_images = {} + for account in self.accounts: + self.log.debug("getting image list for account {}".format(account.name)) + account_images[account] = [] + try: + account_images[account] = account.get_image_list() + except rift.mano.cloud.CloudAccountCalError as e: + self.log.warning("could not get image list for account {}".format(account.name)) + continue + + for filename in arch.images: + self.log.debug('uploading image: {}'.format(filename)) + + image = rwcal.ImageInfoItem() + image.name = os.path.basename(filename) + image.location = os.path.join(pkg_dir, filename) + image.checksum = arch.checksums[filename] + + for account in self.accounts: + # Find images on the cloud account which have the same name + matching_images = [i for i in account_images[account] if i.name == image.name] + matching_checksum = [i for i in matching_images if i.checksum == image.checksum] + if len(matching_checksum) > 0: + self.log.debug("found matching image with checksum, not uploading to {}".format(account.name)) + continue + + self.log.debug("uploading to account {}: {}".format(account.name, image)) + try: + image.id = account.create_image(image) + except rift.mano.cloud.CloudAccountCalError as e: + self.log.error("error when uploading image {} to cloud account: {}".format( + filename, str(e))) + else: + self.log.debug('uploaded image to account{}: {}'.format(account.name, filename)) + + self.log.message(OnboardImageUpload()) + + def onboard_descriptors(self, arch): + + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], "launchpad/packages", self.pkg_id) + + def post(url, data, headers): + auth = ('admin', 'admin') + + if self.use_ssl: + response = requests.post(url, data=data, headers=headers, auth=auth, verify=False, cert=(self.ssl_cert, self.ssl_key)) + else: + response = requests.post(url, data=data, headers=headers, auth=auth) + if not response.ok: + self.log.error(response.text) + raise OnboardError(OnboardDescriptorError(filename)) + + self.log.debug('successfully uploaded: {}'.format(filename)) + + self.log.message(OnboardDescriptorValidation()) + + def prepare_xml(filename): + # Read the uploaded XML + with open(filename, 'r') as fp: + data = fp.read() + + # Remove the top-level element of the XML (the 'catalog' element) + tree = ET.fromstring(data) + data = ET.tostring(tree.getchildren()[0]) + + return data + + json_toplevel_keys = ["vnfd:vnfd-catalog", "nsd:nsd-catalog"] + + def prepare_json(filename): + # Read the uploaded JSON + with open(filename, 'r') as fp: + data = fp.read() + # Remove the top-level element of the JSON (the 'catalog' element) + for key in json_toplevel_keys: + if key in data: + newdict = json.loads(data) + if (key in newdict): + newstr = json.dumps(newdict[key]) + return newstr + + return data + + endpoints = ( + ("vnfd-catalog", arch.vnfds), + ("pnfd-catalog", arch.pnfds), + ("vld-catalog", arch.vlds), + ("nsd-catalog", arch.nsds), + ("vnffgd-catalog", arch.vnffgds), + ) + + if self.use_ssl: + url = "https://127.0.0.1:8008/api/config/{catalog}" + else: + url = "http://127.0.0.1:8008/api/config/{catalog}" + + try: + for catalog, filenames in endpoints: + for filename in filenames: + path = os.path.join(pkg_dir, filename) + if 'xml' in filename: + data = prepare_xml(path) + headers = {"content-type": "application/vnd.yang.data+xml"} + elif 'json' in filename: + data = prepare_json(path) + headers = {"content-type": "application/vnd.yang.data+json"} + + # Add authorization header if it has been specified + if self.auth is not None: + headers['authorization'] = self.auth + + post(url.format(catalog=catalog), data, headers) + + self.log.message(OnboardDescriptorOnboard()) + self.log.debug("onboard complete") + + except Exception: + # TODO: At this point we need to roll back all of the descriptors + # that were successfully onboarded. + self.log.error("Unable to onboard {}".format(filename)) + raise + + def extract_package(self): + """Extract tarball from multipart message on disk + + The tarball contained in the message may be very large; Too large to + load into memory without possibly affecting the behavior of the + webserver. So the message is memory mapped and parsed in order to + extract just the tarball, and then to extract the contents of the + tarball. + + Arguments: + filename - The name of a file that contains a multipart message + boundary - a string defining the boundary of different parts in the + multipart message. + + """ + # Ensure the packages directory exists + packages = os.path.join(os.environ["RIFT_ARTIFACTS"], "launchpad/packages") + try: + os.makedirs(packages, exist_ok=True) + except FileExistsError as e: + pass + + try: + pkgpath = os.path.join(packages, self.pkg_id) + pkgfile = pkgpath + ".tar.gz" + with open(self.filename, 'r+b') as fp: + # A memory mapped representation of the file is used to reduce + # the memory footprint of the running application. + mapped = mmap.mmap(fp.fileno(), 0) + extract_package( + self.log, + mapped, + self.boundary, + pkgfile, + ) + + # Process the package archive + tar = tarfile.open(pkgfile, mode="r:gz") + arc = archive.LaunchpadArchive(tar, self.log) + self.log.debug("archive extraction complete") + + arc.extract(pkgpath) + + return arc + + except MessageException as e: + raise OnboardError(e.msg) + + except UnreadableHeadersError: + raise OnboardError(OnboardUnreadableHeaders()) + + except UnreadablePackageError: + raise OnboardError(OnboardUnreadablePackage()) + + +class ExportHandler(RequestHandler): + def initialize(self, log, loop): + self.loop = loop + self.transaction_id = str(uuid.uuid4()) + self.log = message.Logger( + log, + self.application.messages[self.transaction_id], + ) + + def get(self): + self.log.message(ExportStart()) + + # Parse the IDs + ids_query = self.get_query_argument("ids") + ids = [id.strip() for id in ids_query.split(',')] + + # Retrieve the list of the descriptors + descriptors = list() + for id in ids: + if id in self.application.vnfd_catalog: + descriptors.append(self.application.vnfd_catalog[id]) + continue + + if id in self.application.nsd_catalog: + descriptors.append(self.application.nsd_catalog[id]) + continue + + raise tornado.web.HTTPError(400, "unknown descriptor: {}".format(id)) + + pkg = archive.PackageArchive() + + # Add the VNFDs to the package + for desc in descriptors: + if isinstance(desc, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd): + pkg.add_vnfd(desc) + + # Add any NSDs to the package + for desc in descriptors: + if isinstance(desc, NsdYang.YangData_Nsd_NsdCatalog_Nsd): + pkg.add_nsd(desc) + + # Create a closure to create the actual package and run it in a + # separate thread + def run(): + pkg.create_archive( + self.transaction_id, + dest=self.application.export_dir, + ) + + self.application.tasklet.loop.run_in_executor(None, run) + + self.log.message(ExportSuccess()) + + self.write(tornado.escape.json_encode({ + "transaction_id": self.transaction_id, + })) + + +class UploaderApplication(tornado.web.Application): + def __init__(self, tasklet): + self.tasklet = tasklet + self.accounts = [] + self.messages = collections.defaultdict(list) + self.export_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/exports') + + manifest = tasklet.tasklet_info.get_pb_manifest() + self.use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl + self.ssl_cert = manifest.bootstrap_phase.rwsecurity.cert + self.ssl_key = manifest.bootstrap_phase.rwsecurity.key + + attrs = dict(log=self.log, loop=self.loop) + + super(UploaderApplication, self).__init__([ + (r"/api/update", UpdateHandler, attrs), + (r"/api/upload", UploadHandler, attrs), + (r"/api/export", ExportHandler, attrs), + (r"/api/upload/([^/]+)/state", UploadStateHandler, attrs), + (r"/api/update/([^/]+)/state", UpdateStateHandler, attrs), + (r"/api/export/([^/]+)/state", ExportStateHandler, attrs), + (r"/api/export/([^/]+.tar.gz)", tornado.web.StaticFileHandler, { + "path": self.export_dir, + }) + ]) + + @property + def log(self): + return self.tasklet.log + + @property + def loop(self): + return self.tasklet.loop + + def get_logger(self, transaction_id): + return message.Logger(self.log, self.messages[transaction_id]) + + def onboard(self, package, boundary, transaction_id, auth=None): + log = message.Logger(self.log, self.messages[transaction_id]) + + pkg_id = str(uuid.uuid1()) + OnboardPackage( + log, + self, + self.accounts, + package, + boundary, + pkg_id, + auth, + self.use_ssl, + self.ssl_cert, + self.ssl_key, + ).start() + + def update(self, package, boundary, transaction_id, auth=None): + log = message.Logger(self.log, self.messages[transaction_id]) + + pkg_id = str(uuid.uuid1()) + UpdatePackage( + log, + self, + self.accounts, + package, + boundary, + pkg_id, + auth, + self.use_ssl, + self.ssl_cert, + self.ssl_key, + ).start() + + @property + def vnfd_catalog(self): + return self.tasklet.vnfd_catalog + + @property + def nsd_catalog(self): + return self.tasklet.nsd_catalog + + @property + def vld_catalog(self): + return self.tasklet.vld_catalog + + def get_vlds(self, vld_ids): + vlds = list() + for id in vld_ids: + vlds.append(self.vld_catalog[id]) + + return vlds \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py new file mode 100755 index 0000000..a8cb871 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwlaunchpad + +class Tasklet(rift.tasklets.rwlaunchpad.LaunchpadTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt new file mode 100644 index 0000000..9abde92 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Creation Date: 2015/10/30 +# + +include(rift_plugin) + +set(TASKLET_NAME rwmonitor) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/core.py + rift/tasklets/${TASKLET_NAME}/tasklet.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py new file mode 100644 index 0000000..1f96a0e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .tasklet import MonitorTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py new file mode 100644 index 0000000..4d653b2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py @@ -0,0 +1,542 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import time + +import gi +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwDts', '1.0') +from gi.repository import ( + RwVnfrYang, + RwNsrYang, + RwDts, + ) + +import rift.tasklets + + +class NfviMetricsAggregator(object): + def __init__(self, + tasklet, + cloud_account=None, + nfvi_monitor=None, + ): + """Create an instance of NfviMetricsAggregator + + Arguments: + tasklet - a tasklet object that provides access to DTS, + logging, the asyncio ioloop, and monitoring state + cloud_account - a cloud account + nfvi_monitor - an NFVI monitor plugin + + """ + self.tasklet = tasklet + self.nfvi_monitor = nfvi_monitor + self.cloud_account = cloud_account + + @property + def dts(self): + return self.tasklet.dts + + @property + def log(self): + return self.tasklet.log + + @property + def loop(self): + return self.tasklet.loop + + @property + def records(self): + return self.tasklet.records + + @property + def polling_period(self): + return self.tasklet.polling_period + + @asyncio.coroutine + def request_vdu_metrics(self, vdur): + try: + # self.log.debug('request_vdu_metrics: {}'.format(vdur.vim_id)) + + # Create uninitialized metric structure + vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + + # No metrics can be collected if the monitor has not been + # specified. + if self.nfvi_monitor is None: + return vdu_metrics + + # Retrieve the NFVI metrics for this VDU + try: + _, metrics = yield from self.loop.run_in_executor( + self.tasklet.executor, + self.nfvi_monitor.nfvi_metrics, + self.cloud_account, + vdur.vim_id, + ) + + except Exception as e: + self.log.exception(e) + return vdu_metrics + + # VCPU + vdu_metrics.vcpu.total = vdur.vm_flavor.vcpu_count + vdu_metrics.vcpu.utilization = metrics.vcpu.utilization + + # Memory (in bytes) + vdu_metrics.memory.used = metrics.memory.used + vdu_metrics.memory.total = 1e6 * vdur.vm_flavor.memory_mb + vdu_metrics.memory.utilization = 100 * vdu_metrics.memory.used / vdu_metrics.memory.total + + # Storage + vdu_metrics.storage.used = metrics.storage.used + vdu_metrics.storage.total = 1e9 * vdur.vm_flavor.storage_gb + vdu_metrics.storage.utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total + + # Network (incoming) + vdu_metrics.network.incoming.packets = metrics.network.incoming.packets + vdu_metrics.network.incoming.packet_rate = metrics.network.incoming.packet_rate + vdu_metrics.network.incoming.bytes = metrics.network.incoming.bytes + vdu_metrics.network.incoming.byte_rate = metrics.network.incoming.byte_rate + + # Network (outgoing) + vdu_metrics.network.outgoing.packets = metrics.network.outgoing.packets + vdu_metrics.network.outgoing.packet_rate = metrics.network.outgoing.packet_rate + vdu_metrics.network.outgoing.bytes = metrics.network.outgoing.bytes + vdu_metrics.network.outgoing.byte_rate = metrics.network.outgoing.byte_rate + + # External ports + vdu_metrics.external_ports.total = len(vdur.external_interface) + + # Internal ports + vdu_metrics.internal_ports.total = len(vdur.internal_interface) + + # TODO publish the metrics at the VDU-level + + return vdu_metrics + + except Exception as e: + self.log.exception(e) + raise + + @asyncio.coroutine + def request_vnf_metrics(self, vnfr_id): + try: + # self.log.debug('request_vnf_metrics: {}'.format(vnfr_id)) + + # For each VDU contained within the VNF, create a task to + # retrieve the NFVI metrics associated with that VDU. + tasks = list() + for vdu in self.records.vdurs(vnfr_id): + task = self.loop.create_task(self.request_vdu_metrics(vdu)) + tasks.append(task) + + vnf_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_NfviMetrics() + + # If there is no pending data, early out + if not tasks: + return vnf_metrics + + # Wait for the tasks to complete. Aggregate the results and + # return them. + yield from asyncio.wait(tasks, loop=self.loop) + + # TODO aggregated the metrics + for task in tasks: + vdu_metrics = task.result() + + # VCPU + vnf_metrics.vcpu.total += vdu_metrics.vcpu.total + vnf_metrics.vcpu.utilization += vdu_metrics.vcpu.total * vdu_metrics.vcpu.utilization + + # Memory (in bytes) + vnf_metrics.memory.used += vdu_metrics.memory.used + vnf_metrics.memory.total += vdu_metrics.memory.total + vnf_metrics.memory.utilization += vdu_metrics.memory.used + + # Storage + vnf_metrics.storage.used += vdu_metrics.storage.used + vnf_metrics.storage.total += vdu_metrics.storage.total + vnf_metrics.storage.utilization += vdu_metrics.storage.used + + # Network (incoming) + vnf_metrics.network.incoming.packets += vdu_metrics.network.incoming.packets + vnf_metrics.network.incoming.packet_rate += vdu_metrics.network.incoming.packet_rate + vnf_metrics.network.incoming.bytes += vdu_metrics.network.incoming.bytes + vnf_metrics.network.incoming.byte_rate += vdu_metrics.network.incoming.byte_rate + + # Network (outgoing) + vnf_metrics.network.outgoing.packets += vdu_metrics.network.outgoing.packets + vnf_metrics.network.outgoing.packet_rate += vdu_metrics.network.outgoing.packet_rate + vnf_metrics.network.outgoing.bytes += vdu_metrics.network.outgoing.bytes + vnf_metrics.network.outgoing.byte_rate += vdu_metrics.network.outgoing.byte_rate + + # External ports + vnf_metrics.external_ports.total += vdu_metrics.external_ports.total + + # Internal ports + vnf_metrics.internal_ports.total += vdu_metrics.internal_ports.total + + + # TODO find out the correct way to determine the number of + # active and inactive VMs in a VNF + vnf_metrics.vm.active_vm = len(tasks) + vnf_metrics.vm.inactive_vm = 0 + + # VCPU (note that VCPU utilization if a weighted average) + if vnf_metrics.vcpu.total > 0: + vnf_metrics.vcpu.utilization /= vnf_metrics.vcpu.total + + # Memory (in bytes) + if vnf_metrics.memory.total > 0: + vnf_metrics.memory.utilization *= 100.0 / vnf_metrics.memory.total + + # Storage + if vnf_metrics.storage.total > 0: + vnf_metrics.storage.utilization *= 100.0 / vnf_metrics.storage.total + + # TODO publish the VNF-level metrics + + return vnf_metrics + + except Exception as e: + self.log.exception(e) + raise + + @asyncio.coroutine + def request_ns_metrics(self, ns_instance_config_ref): + try: + # self.log.debug('request_ns_metrics: {}'.format(ns_instance_config_ref)) + + # Create a task for each VNFR to retrieve the NFVI metrics + # associated with that VNFR. + vnfrs = self.records.vnfr_ids(ns_instance_config_ref) + tasks = list() + for vnfr in vnfrs: + task = self.loop.create_task(self.request_vnf_metrics(vnfr)) + tasks.append(task) + + ns_metrics = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_NfviMetrics() + + # If there are any VNFR tasks, wait for them to finish + # before beginning the next iteration. + if tasks: + yield from asyncio.wait(tasks, loop=self.loop) + + # Aggregate the VNFR metrics + for task in tasks: + vnf_metrics = task.result() + + ns_metrics.vm.active_vm += vnf_metrics.vm.active_vm + ns_metrics.vm.inactive_vm += vnf_metrics.vm.inactive_vm + + # VCPU + ns_metrics.vcpu.total += vnf_metrics.vcpu.total + ns_metrics.vcpu.utilization += vnf_metrics.vcpu.total * vnf_metrics.vcpu.utilization + + # Memory (in bytes) + ns_metrics.memory.used += vnf_metrics.memory.used + ns_metrics.memory.total += vnf_metrics.memory.total + ns_metrics.memory.utilization += vnf_metrics.memory.used + + # Storage + ns_metrics.storage.used += vnf_metrics.storage.used + ns_metrics.storage.total += vnf_metrics.storage.total + ns_metrics.storage.utilization += vnf_metrics.storage.used + + # Network (incoming) + ns_metrics.network.incoming.packets += vnf_metrics.network.incoming.packets + ns_metrics.network.incoming.packet_rate += vnf_metrics.network.incoming.packet_rate + ns_metrics.network.incoming.bytes += vnf_metrics.network.incoming.bytes + ns_metrics.network.incoming.byte_rate += vnf_metrics.network.incoming.byte_rate + + # Network (outgoing) + ns_metrics.network.outgoing.packets += vnf_metrics.network.outgoing.packets + ns_metrics.network.outgoing.packet_rate += vnf_metrics.network.outgoing.packet_rate + ns_metrics.network.outgoing.bytes += vnf_metrics.network.outgoing.bytes + ns_metrics.network.outgoing.byte_rate += vnf_metrics.network.outgoing.byte_rate + + # External ports + ns_metrics.external_ports.total += vnf_metrics.external_ports.total + + # Internal ports + ns_metrics.internal_ports.total += vnf_metrics.internal_ports.total + + # VCPU (note that VCPU utilization if a weighted average) + if ns_metrics.vcpu.total > 0: + ns_metrics.vcpu.utilization /= ns_metrics.vcpu.total + + # Memory (in bytes) + if ns_metrics.memory.total > 0: + ns_metrics.memory.utilization *= 100.0 / ns_metrics.memory.total + + # Storage + if ns_metrics.storage.total > 0: + ns_metrics.storage.utilization *= 100.0 / ns_metrics.storage.total + + return ns_metrics + + except Exception as e: + self.log.exception(e) + raise + + @asyncio.coroutine + def publish_nfvi_metrics(self, ns_instance_config_ref): + nfvi_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr[nsr:ns-instance-config-ref='{}']/rw-nsr:nfvi-metrics" + nfvi_xpath = nfvi_xpath.format(ns_instance_config_ref) + + registration_handle = yield from self.dts.register( + xpath=nfvi_xpath, + handler=rift.tasklets.DTS.RegistrationHandler(), + flags=(RwDts.Flag.PUBLISHER | RwDts.Flag.NO_PREP_READ), + ) + + self.log.debug('preparing to publish NFVI metrics for {}'.format(ns_instance_config_ref)) + + try: + # Create the initial metrics element + ns_metrics = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_NfviMetrics() + registration_handle.create_element(nfvi_xpath, ns_metrics) + + prev = time.time() + while True: + # Use a simple throttle to regulate the frequency that the + # VDUs are sampled at. + curr = time.time() + + if curr - prev < self.polling_period: + pause = self.polling_period - (curr - prev) + yield from asyncio.sleep(pause, loop=self.loop) + + prev = time.time() + + # Retrieve the NS NFVI metrics + ns_metrics = yield from self.request_ns_metrics(ns_instance_config_ref) + + # Check that that NSR still exists + if not self.records.has_nsr(ns_instance_config_ref): + break + + # Publish the NSR metrics + registration_handle.update_element(nfvi_xpath, ns_metrics) + + except Exception as e: + self.log.exception(e) + raise + + finally: + # Make sure that the NFVI metrics are removed from the operational + # data + yield from registration_handle.delete_element(nfvi_xpath) + self.log.debug('deleted: {}'.format(nfvi_xpath)) + + # Now that we are done with the registration handle, tell DTS to + # deregister it + registration_handle.deregister() + + self.log.debug('finished publishing NFVI metrics for {}'.format(ns_instance_config_ref)) + + +class RecordManager(object): + """ + There are two mappings that this class is reponsible for maintaining. The + first is a mapping from the set of NSR IDs to the VNFR IDs contained within + the network service, + + nsr-id + |-- vnfr-id-1 + |-- vnfr-id-2 + |-- ... + \-- vnfr-id-n + + The second, maps the set of VNFR IDs to the VDUR structures contains within + those network functions, + + vnfr-id + |-- vdur-1 + |-- vdur-2 + |-- ... + \-- vdur-m + + + Note that the VDURs can be identified by the vim-id contained in the VDUR + structure. + + It is important to understand that the current model of the system does not + have a direct connection from an NSR to a VNFR or VDUR. This means that the + NSR structure does not contain any VNFR/VDUR information, and it would be + necessary to query DTS in order to retrieve VNFR/VDUR information. This + class manages the two mappings to keep track of the NSRs and VNFRs so that + it is unnecessary to query DTS in order to determine which VNFRs/VDURs are + contained within a given NSR. On the other hand, a VNFR does in fact + contain VDUR information. + + Finally, note that it is necessary to retain the mapping from the VNFR to + the VDUR because NFVI metric aggregation needs to publish aggregate + information at both the NS ans VNF levels. + + """ + + def __init__(self): + self._nsr_to_vnfrs = dict() + self._vnfr_to_vdurs = dict() + + # A mapping from the VDURs VIM ID to the VDUR structure + self._vdurs = dict() + + def add_nsr(self, nsr): + """Add an NSR to the manager + + Arguments: + nsr - an NSR structure + + """ + if nsr.constituent_vnfr_ref: + if nsr.ns_instance_config_ref not in self._nsr_to_vnfrs: + self._nsr_to_vnfrs[nsr.ns_instance_config_ref] = set() + + mapping = self._nsr_to_vnfrs[nsr.ns_instance_config_ref] + mapping.update(nsr.constituent_vnfr_ref) + + def add_vnfr(self, vnfr): + """Add a VNFR to the manager + + Arguments: + vnfr - a VNFR structure + + """ + # Create a list of VDURs filtering out the VDURs that have not been + # assigned a vim-id + vdurs = [vdur for vdur in vnfr.vdur if vdur.vim_id is not None] + + # There are no valid VDURs, early out now + if not vdurs: + return + + # Create a set for the VNFR if necessary + if vnfr.id not in self._vnfr_to_vdurs: + self._vnfr_to_vdurs[vnfr.id] = set() + + # Update the vnfr-id mapping + mapping = self._vnfr_to_vdurs[vnfr.id] + mapping.update(vdur.vim_id for vdur in vdurs) + + # Update the vdur mapping + self._vdurs.update((vdur.vim_id, vdur) for vdur in vdurs) + + def has_nsr(self, nsr_id): + """Returns True if the specified NSR ID is in the record manager + + Arguments: + nsr_id - the ID of the NSR to check + + Returns: + a boolean indicating whether the record manager contains the NSR + + """ + return nsr_id in self._nsr_to_vnfrs + + def has_vnfr(self, vnfr_id): + """Returns True if the specified VNFR ID is in the record manager + + Arguments: + vnfr_id - the ID of the VNFR to check + + Returns: + a boolean indicating whether the record manager contains the VNFR + + """ + return vnfr_id in self._vnfr_to_vdurs + + def remove_vnfr(self, vnfr_id): + """Remove the specified VNFR + + The VNFR will be removed along with any of the associated VDURs. + + Arguments: + vnfr_id - the ID of the VNFR to remove + + """ + if vnfr_id not in self._vnfr_to_vdurs: + return + + # Construct a set of VDURs to be deleted from the dict of vdurs + vdur_ids = self._vnfr_to_vdurs[vnfr_id] + vdur_ids &= set(self._vdurs.keys()) + + # Remove the VDUR structures + for vdur_id in vdur_ids: + del self._vdurs[vdur_id] + + # Remove the mapping from the VNFR to the VDURs + del self._vnfr_to_vdurs[vnfr_id] + + def remove_nsr(self, nsr_id): + """Removes the specified NSR + + Note that none of the VNFRs associated with the NSR are removed; This + is related to the separation between the NSR and VNFR in the yang model + (see above). The removal of VNFRs is assumed to be a separate action. + + Arguments: + nsr_id - the ID of the NSR to remove + + """ + del self._nsr_to_vnfrs[nsr_id] + + def vdurs(self, vnfr_id): + """Return a list of the VDURs associated with a VNFR + + Arguments: + vnfr_id - the ID of the VNFR + + Returns: + a list of VDURs + + """ + vdurs = self._vnfr_to_vdurs.get(vnfr_id, set()) + return [self._vdurs[id] for id in vdurs] + + def vdur_ids(self, vnfr_id): + """Return a list of the VDUR IDs associated with a VNFR + + Arguments: + vnfr_id - the ID of the VNFR + + Returns: + a list of VDUR IDs + + """ + return list(self._vnfr_to_vdurs.get(vnfr_id, list())) + + def vnfr_ids(self, nsr_id): + """Return a list of the VNFR IDs associated with a NSR + + Arguments: + nsr_id - the ID of the NSR + + Returns: + a list of VNFR IDs + + """ + return list(self._nsr_to_vnfrs.get(nsr_id, list())) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py new file mode 100644 index 0000000..3ed50e1 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py @@ -0,0 +1,443 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import concurrent.futures +import os +import time +import uuid +import sys + +import gi +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwLog', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwMonitorYang', '1.0') +gi.require_version('RwmonYang', '1.0') +gi.require_version('RwNsdYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwYang', '1.0') +from gi.repository import ( + NsrYang, + RwBaseYang, + RwDts as rwdts, + RwLaunchpadYang, + RwLog as rwlog, + RwcalYang as rwcal, + RwMonitorYang as rwmonitor, + RwmonYang as rwmon, + RwNsdYang as rwnsd, + RwTypes, + RwYang, + VnfrYang, +) + +import rift.tasklets +import rift.mano.cloud + +import rw_peas + +from .core import (NfviMetricsAggregator, RecordManager) + + +class DtsHandler(object): + def __init__(self, tasklet): + self.reg = None + self.tasklet = tasklet + + @property + def log(self): + return self.tasklet.log + + @property + def log_hdl(self): + return self.tasklet.log_hdl + + @property + def dts(self): + return self.tasklet.dts + + @property + def loop(self): + return self.tasklet.loop + + @property + def classname(self): + return self.__class__.__name__ + + +class NsInstanceOpdataSubscriber(DtsHandler): + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + @asyncio.coroutine + def register(self): + def handle_create(msg): + self.tasklet.records.add_nsr(msg) + self.tasklet.start_ns_monitor(msg) + + def handle_update(msg): + self.tasklet.records.add_nsr(msg) + + def handle_delete(msg): + self.tasklet.records.remove_nsr(msg.ns_instance_config_ref) + + def ignore(msg): + pass + + dispatch = { + rwdts.QueryAction.CREATE: handle_create, + rwdts.QueryAction.UPDATE: handle_update, + rwdts.QueryAction.DELETE: handle_delete, + } + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + try: + # Disabling the following comments since they are too frequent + # self.log.debug("{}:on_prepare:msg {}".format(self.classname, msg)) + + if msg is not None: + dispatch.get(action, ignore)(msg) + + except Exception as e: + self.log.exception(e) + + finally: + # Disabling the following comments since they are too frequent + # self.log.debug("{}:on_prepare complete".format(self.classname)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + handler = rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare, + ) + + with self.dts.group_create() as group: + group.register( + xpath=NsInstanceOpdataSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + handler=handler, + ) + + +class VnfrCatalogSubscriber(DtsHandler): + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + @asyncio.coroutine + def register(self): + def handle_create(msg): + self.log.debug("{}:handle_create:{}".format(self.classname, msg)) + self.tasklet.records.add_vnfr(msg) + + def handle_update(msg): + self.log.debug("{}:handle_update:{}".format(self.classname, msg)) + self.tasklet.records.add_vnfr(msg) + + def handle_delete(msg): + self.tasklet.records.remove_vnfr(msg) + + def ignore(msg): + pass + + dispatch = { + rwdts.QueryAction.CREATE: handle_create, + rwdts.QueryAction.UPDATE: handle_update, + rwdts.QueryAction.DELETE: handle_delete, + } + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + try: + self.log.debug("{}:on_prepare".format(self.classname)) + self.log.debug("{}:on_preparef:msg {}".format(self.classname, msg)) + + xpath = ks_path.to_xpath(VnfrYang.get_schema()) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath) + + dispatch.get(action, ignore)(msg) + + except Exception as e: + self.log.exception(e) + + finally: + self.log.debug("{}:on_prepare complete".format(self.classname)) + + handler = rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare, + ) + + with self.dts.group_create() as group: + group.register( + xpath=VnfrCatalogSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + handler=handler, + ) + + +class NfviPollingPeriodSubscriber(DtsHandler): + XPATH = "C,/nsr:ns-instance-config" + + @asyncio.coroutine + def register(self): + def on_apply(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + xact_config = list(self.reg.get_xact_elements(xact)) + for config in xact_config: + if config.nfvi_polling_period is not None: + self.tasklet.polling_period = config.nfvi_polling_period + self.log.debug("new polling period: {}".format(self.tasklet.polling_period)) + + self.log.debug( + "Registering for NFVI polling period config using xpath: %s", + NfviPollingPeriodSubscriber.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=on_apply, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=NfviPollingPeriodSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class CloudAccountDtsHandler(DtsHandler): + def __init__(self, tasklet): + super().__init__(tasklet) + self._cloud_cfg_subscriber = None + + def on_account_added_apply(self, account): + self.log.info("adding cloud account: {}".format(account)) + self.tasklet.cloud_accounts[account.name] = account.cal_account_msg + self.tasklet.account_nfvi_monitors[account.name] = self.load_nfvi_monitor_plugin(account.cal_account_msg) + + def on_account_deleted_apply(self, account_name): + self.log.info("deleting cloud account: {}".format(account_name)) + if account_name in self.tasklet.cloud_accounts: + del self.tasklet.cloud_accounts[account_name] + + if account_name in self.tasklet.account_nfvi_monitors: + del self.tasklet.account_nfvi_monitors[account_name] + + @asyncio.coroutine + def on_account_updated_prepare(self, account): + raise NotImplementedError("Monitor does not support updating cloud account") + + def load_nfvi_monitor_plugin(self, cloud_account): + if cloud_account.account_type == "openstack": + self.log.debug('loading ceilometer plugin for NFVI metrics') + plugin = rw_peas.PeasPlugin( + "rwmon_ceilometer", + 'RwMon-1.0', + ) + + else: + self.log.debug('loading mock plugin for NFVI metrics') + plugin = rw_peas.PeasPlugin( + "rwmon_mock", + 'RwMon-1.0', + ) + + impl = plugin.get_interface("Monitoring") + impl.init(self.log_hdl) + + # Check that the plugin is available on this platform + _, available = impl.nfvi_metrics_available(cloud_account) + if not available: + self.log.warning('NFVI monitoring unavailable on this host') + return None + + return impl + + def register(self): + self.log.debug("creating cloud account config handler") + self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber( + self.dts, self.log, self.log_hdl, + rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=self.on_account_added_apply, + on_delete_apply=self.on_account_deleted_apply, + on_update_prepare=self.on_account_updated_prepare, + ) + ) + self._cloud_cfg_subscriber.register() + + +class MonitorTasklet(rift.tasklets.Tasklet): + """ + The MonitorTasklet is responsible for sampling NFVI mettrics (via a CAL + plugin) and publishing the aggregate information. + """ + + DEFAULT_POLLING_PERIOD = 1.0 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.nsr_subscriber = NsInstanceOpdataSubscriber(self) + self.vnfr_subscriber = VnfrCatalogSubscriber(self) + self.cloud_cfg_subscriber = CloudAccountDtsHandler(self) + self.poll_period_subscriber = NfviPollingPeriodSubscriber(self) + self.cloud_account_handler = CloudAccountDtsHandler(self) + + self.vnfrs = collections.defaultdict(list) + self.vdurs = collections.defaultdict(list) + + self.monitors = dict() + self.cloud_accounts = {} + self.account_nfvi_monitors = {} + + self.records = RecordManager() + self.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=16) + + def start(self): + super().start() + self.log.info("Starting MonitoringTasklet") + + self.log.debug("Registering with dts") + self.dts = rift.tasklets.DTS( + self.tasklet_info, + rwmonitor.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self.dts) + + def stop(self): + try: + self.dts.deinit() + except Exception: + print("Caught Exception in RWMON stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + self.log.debug("creating cloud account handler") + self.cloud_cfg_subscriber.register() + + self.log.debug("creating NFVI poll period subscriber") + yield from self.poll_period_subscriber.register() + + self.log.debug("creating network service record subscriber") + yield from self.nsr_subscriber.register() + + self.log.debug("creating vnfr subscriber") + yield from self.vnfr_subscriber.register() + + def on_cloud_account_created(self, cloud_account): + pass + + def on_cloud_account_deleted(self, cloud_account): + pass + + @asyncio.coroutine + def run(self): + pass + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Handle DTS state change + + Take action according to current DTS state to transition application + into the corresponding application state + + Arguments + state - current dts state + + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self.dts.handle.set_state(next_state) + + def start_ns_monitor(self, ns_instance_opdata_msg): + ns_instance_config_ref = ns_instance_opdata_msg.ns_instance_config_ref + nsr_cloud_account = ns_instance_opdata_msg.cloud_account + + if nsr_cloud_account not in self.cloud_accounts: + self.log.error("cloud account %s has not been configured", nsr_cloud_account) + return + + if nsr_cloud_account not in self.account_nfvi_monitors: + self.log.warning("No NFVI monitoring available for cloud account %s", + nsr_cloud_account) + return + + cloud_account = self.cloud_accounts[nsr_cloud_account] + nfvi_monitor = self.account_nfvi_monitors[nsr_cloud_account] + + try: + if ns_instance_config_ref not in self.monitors: + aggregator = NfviMetricsAggregator( + tasklet=self, + cloud_account=cloud_account, + nfvi_monitor=nfvi_monitor, + ) + + # Create a task to run the aggregator independently + coro = aggregator.publish_nfvi_metrics(ns_instance_config_ref) + task = self.loop.create_task(coro) + self.monitors[ns_instance_config_ref] = task + + msg = 'started monitoring NFVI metrics for {}' + self.log.info(msg.format(ns_instance_config_ref)) + + except Exception as e: + self.log.exception(e) + raise + + def stop_ns_monitor(self, ns_instance_config_ref): + if ns_instance_config_ref not in self.monitors: + msg = "Trying the destroy non-existent monitor for {}" + self.log.error(msg.format(ns_instance_config_ref)) + + else: + self.monitors[ns_instance_config_ref].cancel() + del self.monitors[ns_instance_config_ref] \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rwmonitor.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rwmonitor.py new file mode 100755 index 0000000..24b37db --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rwmonitor.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwmonitor + +class Tasklet(rift.tasklets.rwmonitor.MonitorTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwnsm/CMakeLists.txt new file mode 100644 index 0000000..9265ce1 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/CMakeLists.txt @@ -0,0 +1,38 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwnsmtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/rwnsm_conman.py + rift/tasklets/${TASKLET_NAME}/rwnsmplugin.py + rift/tasklets/${TASKLET_NAME}/openmano_nsm.py + rift/tasklets/${TASKLET_NAME}/cloud.py + rift/tasklets/${TASKLET_NAME}/config_value_pool.py + rift/tasklets/${TASKLET_NAME}/publisher.py + rift/tasklets/${TASKLET_NAME}/xpath.py + rift/tasklets/${TASKLET_NAME}/rwnsmconfigplugin.py + rift/tasklets/${TASKLET_NAME}/rwnsm_conagent.py + rift/tasklets/${TASKLET_NAME}/jujuconf_nsm.py + rift/tasklets/${TASKLET_NAME}/juju_intf.py + rift/tasklets/${TASKLET_NAME}/rwvnffgmgr.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwnsm/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py new file mode 100644 index 0000000..3fd29de --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwnsmtasklet import NsmTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py new file mode 100644 index 0000000..48c0770 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py @@ -0,0 +1,211 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +from gi.repository import ( + RwDts as rwdts, + RwcalYang as rwcal, + RwTypes, + ProtobufC, + ) + +import rift.tasklets +import rift.mano.cloud + +from . import openmano_nsm +from . import rwnsmplugin + + +class CloudAccountNotFoundError(Exception): + pass + + +class RwNsPlugin(rwnsmplugin.NsmPluginBase): + """ + RW Implentation of the NsmPluginBase + """ + def __init__(self, dts, log, loop, publisher, cloud_account): + self._dts = dts + self._log = log + self._loop = loop + + def create_nsr(self, nsr_msg, nsd): + """ + Create Network service record + """ + pass + + @asyncio.coroutine + def deploy(self, nsr): + pass + + @asyncio.coroutine + def instantiate_ns(self, nsr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from nsr.instantiate(xact) + + @asyncio.coroutine + def instantiate_vnf(self, nsr, vnfr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from vnfr.instantiate(nsr, xact) + + @asyncio.coroutine + def instantiate_vl(self, nsr, vlr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from vlr.instantiate(xact) + + @asyncio.coroutine + def terminate_ns(self, nsr, xact): + """ + Terminate the network service + """ + pass + + @asyncio.coroutine + def terminate_vnf(self, vnfr, xact): + """ + Terminate the network service + """ + yield from vnfr.terminate(xact) + + @asyncio.coroutine + def terminate_vl(self, vlr, xact): + """ + Terminate the virtual link + """ + yield from vlr.terminate(xact) + + +class NsmPlugins(object): + """ NSM Plugins """ + def __init__(self): + self._plugin_classes = { + "openmano": openmano_nsm.OpenmanoNsPlugin, + } + + @property + def plugins(self): + """ Plugin info """ + return self._plugin_classes + + def __getitem__(self, name): + """ Get item """ + print("%s", self._plugin_classes) + return self._plugin_classes[name] + + def register(self, plugin_name, plugin_class, *args): + """ Register a plugin to this Nsm""" + self._plugin_classes[plugin_name] = plugin_class + + def deregister(self, plugin_name, plugin_class, *args): + """ Deregister a plugin to this Nsm""" + if plugin_name in self._plugin_classes: + del self._plugin_classes[plugin_name] + + def class_by_plugin_name(self, name): + """ Get class by plugin name """ + return self._plugin_classes[name] + + +class CloudAccountNsmPluginSelector(object): + def __init__(self, dts, log, log_hdl, loop, records_publisher): + self._dts = dts + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._records_publisher = records_publisher + + self._nsm_plugins = NsmPlugins() + + self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber( + self._dts, + self._log, + self._log_hdl, + rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=self._on_cloud_account_added, + on_delete_apply=self._on_cloud_account_deleted, + ) + ) + + self._cloud_plugins = {} + self._plugin_instances = {} + + def _on_cloud_account_added(self, cloud_account): + self._log.debug("Got nsm plugin cloud account: %s", cloud_account) + try: + nsm_cls = self._nsm_plugins.class_by_plugin_name( + cloud_account.account_type + ) + except KeyError as e: + self._log.debug( + "Cloud account nsm plugin not found: %s. Using standard rift nsm.", + cloud_account.name + ) + nsm_cls = RwNsPlugin + + # Check to see if the plugin was already instantiated + if nsm_cls in self._plugin_instances: + self._log.debug("Cloud account nsm plugin already instantiated. Using existing.") + self._cloud_plugins[cloud_account.name] = self._plugin_instances[nsm_cls] + return + + # Otherwise, instantiate a new plugin using the cloud account + self._log.debug("Instantiating new cloud account using class: %s", nsm_cls) + nsm_instance = nsm_cls(self._dts, self._log, self._loop, + self._records_publisher, cloud_account.account_msg) + self._plugin_instances[nsm_cls] = nsm_instance + + self._cloud_plugins[cloud_account.name] = nsm_instance + + def _on_cloud_account_deleted(self, account_name): + del self._cloud_plugins[account_name] + + def get_cloud_account_plugin_instance(self, account_name): + if account_name not in self._cloud_plugins: + msg = "Account %s was not configured" % account_name + self._log.error(msg) + raise CloudAccountNotFoundError(msg) + + instance = self._cloud_plugins[account_name] + self._log.debug("Got NSM plugin instance for account %s: %s", + account_name, instance) + + return instance + + def get_cloud_account_sdn_name(self, account_name): + if account_name in self._cloud_sub.accounts: + self._log.debug("Cloud accnt msg is %s",self._cloud_sub.accounts[account_name].account_msg) + if self._cloud_sub.accounts[account_name].account_msg.has_field("sdn_account"): + sdn_account = self._cloud_sub.accounts[account_name].account_msg.sdn_account + self._log.info("SDN associated with Cloud name %s is %s", account_name, sdn_account) + return sdn_account + else: + self._log.debug("No SDN Account associated with Cloud name %s", account_name) + return None + + + @asyncio.coroutine + def register(self): + self._cloud_sub.register() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py new file mode 100644 index 0000000..8134a18 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py @@ -0,0 +1,152 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pickle +import uuid + + +class ParameterValueError(Exception): + pass + + +class ParameterValuePool(object): + def __init__(self, log, name, value_iter): + self._log = log + self._name = name + + self._used_pool_values = [] + self._available_pool_values = list(value_iter) + + self._backing_filepath = os.path.join( + os.environ["RIFT_ARTIFACTS"], + "parameter_pools", + self._name + ) + + self._read_used_pool_values() + + def _save_used_pool_values(self): + dir_path = os.path.dirname(self._backing_filepath) + if not os.path.exists(dir_path): + try: + os.makedirs(dir_path, exist_ok=True) + except OSError as e: + self._log.error("Could not create directory for save used pool: %s", str(e)) + + try: + with open(self._backing_filepath, "wb") as hdl: + pickle.dump(self._used_pool_values, hdl) + except OSError as e: + self._log.error( + "Could not open the parameter value pool file: %s", + str(e)) + except pickle.PickleError as e: + self._log.error( + "Could not pickle the used parameter value pool: %s", + str(e)) + + def _read_used_pool_values(self): + try: + with open(self._backing_filepath, 'rb') as hdl: + self._used_pool_values = pickle.load(hdl) + + except (OSError, EOFError): + self._log.warning("Could not read from backing file: %s", + self._backing_filepath) + self._used_pool_values = [] + + except pickle.PickleError as e: + self._log.warning("Could not unpickle the used parameter value pool from %s: %s", + self._backing_filepath, str(e)) + self._used_pool_values = [] + + for value in self._used_pool_values: + self._available_pool_values.remove(value) + + def get_next_unused_value(self): + if len(self._available_pool_values) == 0: + raise ParameterValueError("Not more parameter values to to allocate") + + next_value = self._available_pool_values[0] + self._log.debug("Got next value for parameter pool %s: %s", self._name, next_value) + + return next_value + + def add_used_value(self, value): + value = int(value) + + if len(self._available_pool_values) == 0: + raise ParameterValueError("Not more parameter values to to allocate") + + if value in self._used_pool_values: + raise ParameterValueError( + "Primitive value of {} was already used for pool name: {}".format( + value, + self._name, + ) + ) + + if value != self._available_pool_values[0]: + raise ParameterValueError("Parameter value not the next in the available list: %s", value) + + self._available_pool_values.pop(0) + self._used_pool_values.append(value) + self._save_used_pool_values() + + def remove_used_value(self, value): + if value not in self._used_pool_values: + self._log.warning("Primitive value of %s was never allocated for pool name: %s", + value, self._name + ) + return + + self._used_pool_values.remove(value) + self._available_pool_values.insert(0, value) + self._save_used_pool_values() + + +if __name__ == "__main__": + import logging + logging.basicConfig(level=logging.DEBUG) + logger = logging.getLogger("config_value_pool.py") + name = str(uuid.uuid4()) + param_pool = ParameterValuePool(logger, name, range(1000, 2000)) + + a = param_pool.get_next_unused_value() + assert a == 1000 + + param_pool.add_used_value(a) + + a = param_pool.get_next_unused_value() + assert a == 1001 + param_pool.add_used_value(a) + + param_pool = ParameterValuePool(logger, name, range(1000, 2000)) + a = param_pool.get_next_unused_value() + assert a == 1002 + + try: + param_pool.add_used_value(1004) + except ParameterValueError: + pass + else: + assert False + + a = param_pool.get_next_unused_value() + assert a == 1002 + param_pool.add_used_value(1002) + + param_pool = ParameterValuePool(logger, name, range(1005, 2000)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/juju_intf.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/juju_intf.py new file mode 100644 index 0000000..07efb52 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/juju_intf.py @@ -0,0 +1,634 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +# Part of the code taken from +# https://github.com/chuckbutler/juju_action_api_class/blob/master/juju_actions.py + +import asyncio +import jujuclient +import os +import ssl +import sys +import time + + +class Action(object): + def __init__(self, data): + # I am undecided if we need this + # model_id = "" + self.uuid = data['action']['tag'] + self.data = data # straight from juju api + self.juju_status = data['status'] + + @classmethod + def from_data(cls, data): + o = cls(data=data) + return o + + +def get_service_units(status): + results = {} + services = status.get('Services', {}) + for svc_name, svc_data in services.items(): + units = svc_data['Units'] or {} + sub_to = svc_data['SubordinateTo'] + if not units and sub_to: + for sub in sub_to: + for unit_name, unit_data in \ + (services[sub].get('Units') or {}).items(): + for sub_name, sub_data in \ + (unit_data['Subordinates'] or {}).items(): + if sub_name.startswith(svc_name): + units[sub_name] = sub_data + results[svc_name] = units + return results + + +class ApiEnvironment(jujuclient.Environment): + def actions_available(self, service=None): + args = { + "Type": 'Action', + "Request": 'ServicesCharmActions', + "Params": { + "Entities": [] + } + } + + services = self.status().get('Services', {}) + service_names = [service] if service else services + for name in service_names: + args['Params']['Entities'].append( + { + "Tag": 'service-' + name + } + ) + + return self._rpc(args) + + def actions_list_all(self, service=None): + args = { + "Type": 'Action', + "Request": 'ListAll', + "Params": { + "Entities": [] + } + } + + service_units = get_service_units(self.status()) + service_names = [service] if service else service_units.keys() + units = [] + + for name in service_names: + units += service_units[name].keys() + + for unit in set(units): + args['Params']['Entities'].append( + { + "Tag": "unit-%s" % unit.replace('/', '-'), + } + ) + + return self._rpc(args) + + def actions_enqueue(self, action, receivers, params=None): + args = { + "Type": "Action", + "Request": "Enqueue", + "Params": { + "Actions": [] + } + } + + for receiver in receivers: + args['Params']['Actions'].append({ + "Receiver": receiver, + "Name": action, + "Parameters": params or {}, + }) + + return self._rpc(args) + + def actions_cancel(self, uuid): + return self._rpc({ + 'Type': 'Action', + 'Request': 'Cancel', + "Params": { + "Entities": [{'Tag': 'action-' + uuid}] + } + }) + + +def _parse_action_specs(api_results): + results = {} + + r = api_results['results'] + for service in r: + servicetag = service['servicetag'] + service_name = servicetag[8:] # remove 'service-' prefix + specs = {} + if service['actions']['ActionSpecs']: + for spec_name, spec_def in \ + service['actions']['ActionSpecs'].items(): + specs[spec_name] = ActionSpec(spec_name, spec_def) + results[service_name] = specs + return results + + +def _parse_action_properties(action_properties_dict): + results = {} + + d = action_properties_dict + for prop_name, prop_def in d.items(): + results[prop_name] = ActionProperty(prop_name, prop_def) + return results + + +class Dict(dict): + def __getattr__(self, name): + return self[name] + + +class ActionSpec(Dict): + def __init__(self, name, data_dict): + params = data_dict['Params'] + super(ActionSpec, self).__init__( + name=name, + title=params['title'], + description=params['description'], + properties=_parse_action_properties(params['properties']) + ) + + +class ActionProperty(Dict): + types = { + 'string': str, + 'integer': int, + 'boolean': bool, + 'number': float, + } + type_checks = { + str: 'string', + int: 'integer', + bool: 'boolean', + float: 'number', + } + + def __init__(self, name, data_dict): + super(ActionProperty, self).__init__( + name=name, + description=data_dict.get('description', ''), + default=data_dict.get('default', ''), + type=data_dict.get( + 'type', self._infer_type(data_dict.get('default'))), + ) + + def _infer_type(self, default): + if default is None: + return 'string' + for _type in self.type_checks: + if isinstance(default, _type): + return self.type_checks[_type] + return 'string' + + def to_python(self, value): + f = self.types.get(self.type) + return f(value) if f else value + + +class JujuApi(object): + + def __init__ (self, log, server, port, user, secret, loop): + ''' Connect to the juju host ''' + self.log = log + self.server = server + self.user = user + self.port = port + self.secret = secret + self.loop = loop + endpoint = 'wss://%s:%d' % (server.split()[0], int(port)) + self.endpoint = endpoint + self.env = ApiEnvironment(endpoint) + self.env.login(secret, user=user) + self.deploy_timeout = 600 + # Check python version and setup up SSL + if sys.version_info >= (3,4): + # This is needed for python 3.4 above as by default certificate + # validation is enabled + ssl._create_default_https_context = ssl._create_unverified_context + + def reconnect(self): + ''' Reconnect on error cases''' + self.log.info("Juju: try reconnect to endpoint {}". + format(self.endpoint)) + try: + self.env.close() + del self.env + except Exception as e: + self.log.debug("Juju: env close threw e {}". + format(e)) + self.log.exception(e) + + try: + self.env = ApiEnvironment(self.endpoint) + self.env.login(self.secret, user=self.user) + self.log.info("Juju: reconnected to endpoint {}". + format(self.endpoint)) + except Exception as e: + self.log.error("Juju: exception in reconnect e={}".format(e)) + self.log.exception(e) + + + def get_status(self): + try: + status = self.env.status() + return status + except Exception as e: + self.log.error("Juju: exception while getting status: {}".format(e)) + self.log.exception(e) + self.reconnect() + return None + + def get_annotations(self, services): + ''' + Return dict of (servicename: annotations) for each servicename + in `services`. + ''' + if not services: + return None + + d = {} + for s in services: + d[s] = self.env.get_annotation(s, 'service')['Annotations'] + return d + + def get_actions(self, service=None): + return self.env.actions_list_all(service) + + def get_action_status(self, action_tag): + ''' + responds with the action status, which is one of three values: + + - completed + - pending + - failed + + @param action_tag - the action UUID return from the enqueue method + eg: action-3428e20d-fcd7-4911-803b-9b857a2e5ec9 + ''' + try: + receiver = self.get_actions() + except Exception as e: + self.log.error("Juju: exception is get actions: {}".format(e)) + self.log.exception(e) + + try: + for receiver in receiver['actions']: + if 'actions' in receiver.keys(): + for action_record in receiver['actions']: + if 'action' in action_record.keys(): + if action_record['action']['tag'] == action_tag: + return action_record['status'] + except Exception as e: + self.log.error("Juju: exception in get action status {}".format(e)) + self.log.exception(e) + + def cancel_action(self, uuid): + return self.env.actions_cancel(uuid) + + def get_service_units(self): + return get_service_units(self.get_status()) + + def get_action_specs(self): + results = self.env.actions_available() + return _parse_action_specs(results) + + def enqueue_action(self, action, receivers, params): + try: + result = self.env.actions_enqueue(action, receivers, params) + resp = Action.from_data(result['results'][0]) + return resp + except Exception as e: + self.log.error("Juju: Exception enqueing action {} on units {} with params {}: {}". + format(action, receivers, params, e)) + self.log.exception(e) + return None + + @asyncio.coroutine + def is_deployed(self, service): + return self._is_deployed(service) + + def _is_deployed(self, service, status=None): + status = self.get_service_status(service, status=status) + if status not in ['terminated', 'NA']: + return True + + return False + + def get_service_status(self, service, status=None): + ''' Get service status: + maintenance : The unit is not yet providing services, but is actively doing stuff. + unknown : Service has finished an event but the charm has not called status-set yet. + waiting : Service is unable to progress to an active state because of dependency. + blocked : Service needs manual intervention to get back to the Running state. + active : Service correctly offering all the services. + None : Service is not deployed + *** Make sure this is NOT a asyncio coroutine function *** + ''' + try: + #self.log.debug ("In get service status for service %s, %s" % (service, services)) + if status is None: + status = self.get_status() + if status: + srv_status = status['Services'][service]['Status']['Status'] + return srv_status + except KeyError as e: + self.log.info("Juju: Did not find service {}, e={}".format(service, e)) + except Exception as e: + self.log.error("Juju: exception checking service status for {}, e {}". + format(service, e)) + + return 'NA' + + def is_service_active(self, service): + if self.get_service_status(service) == 'active': + self.log.debug("Juju: service is active for %s " % service) + return True + + return False + + def is_service_blocked(self, service): + if self.get_service_status(service) == 'blocked': + return True + + return False + + def is_service_up(self, service): + if self.get_service_status in ['active', 'blocked']: + return True + + return False + + def is_service_in_error(self, service): + if self.get_service_status == 'error': + self.log.debug("Juju: service is in error state for %s" % service) + + def wait_for_service(self, service): + # Check if the agent for the unit is up, wait for units does not wait for service to be up + # TBD: Should add a timeout, so we do not wait endlessly + waiting = True + delay = 5 # seconds + print ("In wait for service %s" % service) + while waiting: + if self.is_service_up(service): + return + else: + yield from asyncio.sleep(delay, loop=self.loop) + + @asyncio.coroutine + def apply_config(self, service, config): + return self._apply_config(service, config) + + def _apply_config(self, service,config): + if config is None or len(config) == 0: + self.log.warn("Juju: Empty config passed for service %s" % service) + return False + if not self._is_deployed(service): + self.log.warn("Juju: Charm service %s not deployed" % (service)) + return False + self.log.debug("Juju: Config for {} updated to: {}".format(service, config)) + try: + # Try to fix error on service, most probably due to config issue + if self.is_service_in_error: + self.resolve_error(service) + self.env.set_config(service, config) + return True + except Exception as e: + self.log.error("Juju: exception setting config for {} with {}, e {}". + format(service, config, e)) + self.reconnect() + return False + + @asyncio.coroutine + def set_parameter(self, service, parameter, value): + return self.apply_config(service, {parameter : value}) + + @asyncio.coroutine + def deploy_service(self, charm, service, config=None, wait=False): + self._deploy_service(charm, service, config=config, wait=wait) + + def _deploy_service(self, charm, service, config=None, wait=False): + self.log.debug("Juju: Deploy service for charm %s with service %s" % + (charm, service)) + if self._is_deployed(service): + self.log.info("Juju: Charm service %s already deployed" % (service)) + if config: + self._apply_config(service, config) + return 'deployed' + series = "trusty" + deploy_to = "lxc:0" + directory = "usr/rift/charms/%s/%s" % (series, charm) + prefix='' + try: + prefix=os.environ.get('RIFT_INSTALL') + except KeyError: + self.log.info("Juju: RIFT_INSTALL not set in environemnt") + directory = "%s/%s" % (prefix, directory) + + try: + self.log.debug("Juju: Local charm settings: dir=%s, series=%s" % + (directory, series)) + result = self.env.add_local_charm_dir(directory, series) + url = result['CharmURL'] + + except Exception as e: + self.log.critical('Juju: Error setting local charm directory {}: {}'. + format(service, e)) + self.log.exception(e) + self.reconnect() + return 'error' + + try: + self.log.debug("Juju: Deploying using: service={}, url={}, to={}, config={}". + format(service, url, deploy_to, config)) + if config: + self.env.deploy(service, url, machine_spec=deploy_to, config=config) + else: + self.env.deploy(service, url, machine_spec=deploy_to) + except Exception as e: + self.log.warn('Juju: Error deploying {}: {}'.format(service, e)) + if not self._is_deployed(service): + self.log.critical ("Juju: Service {} is not deployed" % service) + self.reconnect() + return 'error' + + if wait: + # Wait for the deployed units to start + try: + self.log.debug("Juju: Waiting for charm %s to come up" % service) + self.env.wait_for_units(timeout=self.deploy_timeout) + except Exception as e: + self.log.critical('Juju: Error starting all units for {}: {}'. + format(service, e)) + self.log.exception(e) + self.reconnect() + return 'error' + + self.wait_for_service(service) + return 'deploying' + + @asyncio.coroutine + def execute_actions(self, service, action, params, wait=False, bail=False): + return self._execute_actions(service, action, params, wait=wait, bail=bail) + + def _execute_actions(self, service, action, params, wait=False, bail=False): + tags = [] + try: + services = get_service_units(self.env.status()) + depl_units = services[service] + except KeyError as e: + self.log.error("Juju: Unable to get service units for {}, e={}". + format(services, e)) + return tags + except Exception as e: + self.log.error("Juju: Error on getting service details for service {}, e={}". + format(service, e)) + self.log.exception(e) + self.reconnect() + return tags + + # Go through each unit deployed and apply the actions to the unit + for unit, status in depl_units.items(): + self.log.debug("Juju: Execute on unit {} with {}". + format(unit, status)) + idx = int(unit[unit.index('/')+1:]) + self.log.debug("Juju: Unit index is %d" % idx) + + unit_name = "unit-%s-%d" % (service, idx) + self.log.debug("Juju: Sending action: {}, {}, {}". + format(action, unit_name, params)) + try: + result = self.enqueue_action(action, [unit_name], params) + if result: + tags.append(result.uuid) + else: + self.log.error("Juju: Error applying the action {} on {} with params {}". + format(action, unit, params)) + except Exception as e: + self.log.error("Juju: Error applying the action {} on {} with params {}, e={}" % + format(action, unit, params, e)) + self.log.exception(e) + self.reconnect() + + # act_status = 'pending' + # #self.log.debug("Juju: Action %s status is %s on %s" % (action, act_status, unit)) + # while wait and ((act_status == 'pending') or (act_status == 'running')): + # act_status = self.get_action_status(result.uuid) + # self.log.debug("Juju: Action %s status is %s on %s" % (action, act_status, unit)) + # if bail and (act_status == 'failed'): + # self.log.error("Juju: Error applying action %s on %s with %s" % (action, unit, params)) + # raise RuntimeError("Juju: Error applying action %s on %s with %s" % (action, unit, params)) + # yield from asyncio.sleep(1, loop=self.loop) + + return tags + + def get_service_units_status(self, service, status): + units_status = {} + if status is None: + return units_status + try: + units = get_service_units(status)[service] + for name, data in units.items(): + # Action rpc require unit name as unit-service-index + # while resolved API require unit name as service/index + #idx = int(name[name.index('/')+1:]) + #unit = "unit-%s-%d" % (service, idx) + units_status.update({name : data['Workload']['Status']}) + except KeyError: + pass + except Exception as e: + self.log.error("Juju: service unit status for service {}, e={}". + format(service, e)) + self.log.exception(e) + self.log.debug("Juju: service unit status for service {}: {}". + format(service, units_status)) + return units_status + + def resolve_error(self, service, status=None): + if status is None: + status = self.get_status() + + if status is None: + return + + srv_status = self.get_service_status(service, status) + if srv_status and srv_status not in ['terminated', 'NA']: + units = self.get_service_units_status(service, status) + for unit, ustatus in units.items(): + if ustatus == 'error': + self.log.info("Juju: Found unit %s with status %s" % + (unit, ustatus)) + try: + # Takes the unit name as service_name/idx unlike action + self.env.resolved(unit) + except Exception as e: + self.log.debug("Juju: Exception when running resolve on unit {}: {}". + format(unit, e)) + self.log.exception(e) + + + @asyncio.coroutine + def destroy_service(self, service): + self._destroy_service(service) + + def _destroy_service(self, service): + ''' Destroy juju service + *** Do NOT add aysncio yield on this function, run in separate thread *** + ''' + self.log.debug("Juju: Destroy charm service: %s" % service) + status = self.get_status() + if status is None: + return + + srv_status = self.get_service_status(service, status) + count = 0 + while srv_status and srv_status not in ['terminated', 'NA']: + count += 1 + self.log.debug("Juju: service %s is in %s state, count %d" % + (service, srv_status, count)) + if count > 25: + self.log.error("Juju: Not able to destroy service %s, status %s after %d tries" % + (service, srv_status, count)) + break + + self.resolve_error(service, status) + + try: + self.env.destroy_service(service) + except Exception as e: + self.log.debug("Juju: Exception when running destroy on service {}: {}". + format(service, e)) + self.log.exception(e) + self.reconnect() + + time.sleep(3) + status = self.get_status() + if status is None: + return + srv_status = self.get_service_status(service, status) + + self.log.debug("Destroyed service %s (%s)" % (service, srv_status)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/jujuconf_nsm.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/jujuconf_nsm.py new file mode 100644 index 0000000..9654cda --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/jujuconf_nsm.py @@ -0,0 +1,726 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import concurrent.futures +import re +import tempfile +import yaml + +from gi.repository import ( + RwDts as rwdts, +) + +from . import juju_intf +from . import rwnsmconfigplugin + + +# Charm service name accepts only a to z and -. +def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index): + name = "{}-{}-{}".format(nsr_name, vnfr_short_name, member_vnf_index) + new_name = '' + for c in name: + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + new_name += c + return new_name.lower() + +class JujuExecuteHelper(object): + ''' Run Juju API calls that dwe do not need to wait for response ''' + def __init__(self, log, loop): + self._log = log + self._loop = loop + self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + + @property + def loop(self): + return self._loop + + @property + def log(self): + return self._log + + @property + def executor(self): + return self._executor + + @asyncio.coroutine + def deploy_service(self, api, charm, service): + self.log.debug("Deploying service using %s as %s" % (charm, service)) + try: + rc = yield from self.loop.run_in_executor( + self.executor, + api._deploy_service, + charm, service + ) + self.log.info("Deploy service {} returned {}".format(service, rc)) + except Exception as e: + self.log.error("Error deploying service {}, e={}". + format(service, e)) + self.log.debug("Deployed service using %s as %s " % (charm, service)) + + @asyncio.coroutine + def destroy_service(self, api, service): + self.log.debug("Destroying service %s" % (service)) + rc = yield from self.loop.run_in_executor( + self.executor, + api._destroy_service, + service + ) + self.log.debug("Destroyed service {} ({})".format(service, rc)) + + +class JujuNsmConfigPlugin(rwnsmconfigplugin.NsmConfigPluginBase): + """ + Juju implementation of the NsmConfPluginBase + """ + def __init__(self, dts, log, loop, publisher, account): + rwnsmconfigplugin.NsmConfigPluginBase.__init__(self, dts, log, loop, publisher, account) + self._name = account.name + self._ip_address = account.juju.ip_address + self._port = account.juju.port + self._user = account.juju.user + self._secret = account.juju.secret + self._juju_vnfs = {} + self._helper = JujuExecuteHelper(log, loop) + self._tasks = {} + + @asyncio.coroutine + def _get_api(self): + # Create an juju api instance + try: + self._log.debug("Juju config agent: Create API for {}:{}". + format(self._ip_address, self._port)) + api = yield from self._loop.run_in_executor( + None, + juju_intf.JujuApi, + self._log, self._ip_address, + self._port, self._user, self._secret, + self.loop + ) + if not isinstance(api, juju_intf.JujuApi): + self._log.error("Juju config agent: Did not get JujuApi instance: {}". + format(api)) + api = None + except Exception as e: + self._log.critical("Juju config agent: Instantiate API exception: {}". + format(e)) + self._log.exception(e) + api = None + + return api + + def _get_api_blocking(self): + # Create an juju api instance + try: + self._log.debug("Juju config agent: Blocking create API for {}:{}". + format(self._ip_address, self._port)) + api = juju_intf.JujuApi(self._log, self._ip_address, + self._port, self._user, self._secret, + self.loop) + if not isinstance(api, juju_intf.JujuApi): + self._log.error("Juju config agent: Did not get JujuApi instance blocking: {}". + format(api)) + api = None + except Exception as e: + self._log.critical("Juju config agent: Instantiate API exception blocking: {}". + format(e)) + self._log.exception(e) + api = None + + return api + + # TBD: Do a better, similar to config manager + def xlate(self, tag, tags): + # TBD + if tag is None: + return tag + val = tag + if re.search('<.*>', tag): + self._log.debug("Juju config agent: Xlate value %s" % tag) + try: + if tag == '': + val = tags['rw_mgmt_ip'] + except KeyError as e: + self._log.info("Juju config agent: Did not get a value for tag %s, e=%s" % (tag, e)) + return val + + @asyncio.coroutine + def notify_create_nsr(self, nsr, nsd): + """ + Notification of create Network service record + """ + pass + + + @asyncio.coroutine + def notify_create_vls(self, nsr, vld, vlr): + """ + Notification of create VL record + """ + pass + + @asyncio.coroutine + def notify_create_vnfr(self, nsr, vnfr): + """ + Notification of create Network VNF record + Returns True if configured using config_agent + """ + # Deploy the charm if specified for the vnf + self._log.debug("Juju config agent: create vnfr nsr=%s vnfr=%s" %(nsr, vnfr.name)) + self._log.debug("Juju config agent: Const = %s" %(vnfr._const_vnfd)) + try: + vnf_config = vnfr._const_vnfd.vnf_configuration + self._log.debug("Juju config agent: vnf_configuration = %s", vnf_config) + if vnf_config.config_type != 'juju': + return False + charm = vnf_config.juju.charm + self._log.debug("Juju config agent: charm = %s", charm) + except Exception as e: + self._log.debug("Juju config agent: vnf_configuration error for vnfr {}: {}". + format(vnfr.name, e)) + return False + + # Prepare unique name for this VNF + vnf_unique_name = get_vnf_unique_name(vnfr._nsr_name, vnfr.vnfd.name, vnfr.member_vnf_index) + if vnf_unique_name in self._juju_vnfs: + self._log.warn("Juju config agent: Service %s already deployed" % (vnf_unique_name)) + + self._juju_vnfs.update({vnfr.id: {'name': vnf_unique_name, 'charm': charm, + 'nsr_id': nsr, 'member_vnf_index': vnfr.member_vnf_index, + 'xpath': vnfr.xpath, 'tags': {}, + 'active': False, 'config': vnf_config, + 'vnfr_name' : vnfr.name}}) + self._log.debug("Juju config agent: Charm %s for vnf %s to be deployed as %s" % + (charm, vnfr.name, vnf_unique_name)) + + try: + if vnf_unique_name not in self._tasks: + self._tasks[vnf_unique_name] = {} + api = yield from self._get_api() + if api: + self._tasks[vnf_unique_name]['deploy'] = self.loop.create_task( + self._helper.deploy_service(api, charm, vnf_unique_name) + ) + self._log.debug("Juju config agent: Deployed service %s" % vnf_unique_name) + else: + self._log.error("Juju config agent: Unable to get API for deploy") + except Exception as e: + self._log.critical("Juju config agent: Unable to deploy service {} for charm {}: {}". + format(vnf_unique_name, charm, e)) + self.log.exception(e) + + return True + + @asyncio.coroutine + def notify_instantiate_ns(self, nsr): + """ + Notification of NSR instantiationwith the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vnf(self, nsr, vnfr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vl(self, nsr, vlr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_nsr_active(self, nsr, vnfrs): + """ Notify instantiate of the virtual link""" + for vnf in vnfrs.values(): + self._log.debug("Juju config agent: ns active VNF %s" % vnf.name) + try: + if vnf.id in self._juju_vnfs.keys(): + #self._log.debug("Juju config agent: Fetching VNF: %s in NS %s", vnf.name, nsr) + # vnfr = yield from self.fetch_vnfr(vnf.xpath) + + # Check if the deploy is done + if self.check_task_status(self._juju_vnfs[vnf.id]['name'], 'deploy'): + # apply initial config for the vnfr + yield from self.apply_initial_config(vnf.id, vnf) + else: + self._log.info("Juju config agent: service not yet deployed for %s" % vnf.name) + except Exception as e: + self._log.error("Juju config agent: ns active VNF {}, e {}".format(vnf.name, e)) + self._log.exception(e) + + @asyncio.coroutine + def notify_terminate_ns(self, nsr): + """ + Notification of Terminate the network service + """ + pass + + @asyncio.coroutine + def notify_terminate_vnf(self, nsr, vnfr, xact): + """ + Notification of Terminate the network service + """ + self._log.debug("Juju config agent: Terminate VNFr {}, current vnfrs={}". + format(vnfr.name, self._juju_vnfs)) + try: + api = yield from self._get_api() + vnf = self._juju_vnfs[vnfr.id] + service = vnf['name'] + if api: + self._log.debug ("Juju config agent: Terminating VNFr %s, %s" % + (vnfr.name, service)) + self._tasks[service]['destroy'] = self.loop.create_task( + self._helper.destroy_service(api, service) + ) + else: + self._log.error("Juju: Unable to get API for terminate") + del self._juju_vnfs[vnfr.id] + self._log.debug ("Juju config agent: current vnfrs={}". + format(self._juju_vnfs)) + if service in self._tasks: + tasks = [] + for action in self._tasks[service].keys(): + #if self.check_task_status(service, action): + tasks.append(action) + del tasks + except KeyError as e: + self._log.debug ("Juju config agent: Termiating charm service for VNFr {}, e={}". + format(vnfr.name, e)) + except Exception as e: + self._log.error("Juju config agent: Exception terminating charm service for VNFR {}: {}". + format(vnfr.name, e)) + + @asyncio.coroutine + def notify_terminate_vl(self, nsr, vlr, xact): + """ + Notification of Terminate the virtual link + """ + pass + + def check_task_status(self, service, action): + #self.log.debug("Juju config agent: check task status for %s, %s" % (service, action)) + try: + task = self._tasks[service][action] + if task.done(): + self.log.debug("Juju config agent: Task for %s, %s done" % (service, action)) + e = task.exception() + if e: + self.log.error("Juju config agent: Error in task for {} and {} : {}". + format(service, action, e)) + r= task.result() + if r: + self.log.debug("Juju config agent: Task for {} and {}, returned {}". + format(service, action,r)) + return True + else: + self.log.debug("Juju config agent: task {}, {} not done". + format(service, action)) + return False + except KeyError as e: + self.log.error("Juju config agent: KeyError for task for {} and {}: {}". + format(service, action, e)) + except Exception as e: + self.log.error("Juju config agent: Error for task for {} and {}: {}". + format(service, action, e)) + return True + + @asyncio.coroutine + def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output): + self._log.debug("Juju config agent: VNF config primititve {} for nsr {}, vnfr_id {}". + format(primitive, nsr_id, vnfr_id)) + output.execution_status = "failed" + output.execution_id = '' + api = None + try: + vnfr = self._juju_vnfs[vnfr_id] + except KeyError: + self._log.error("Juju config agent: Did not find VNFR %s in juju plugin" % vnfr_id) + return + + try: + service = vnfr['name'] + vnf_config = vnfr['config'] + self._log.debug("VNF config %s" % vnf_config) + configs = vnf_config.config_primitive + for config in configs: + if config.name == primitive.name: + self._log.debug("Juju config agent: Found the config primitive %s" % config.name) + params = {} + for parameter in primitive.parameter: + if parameter.value: + val = self.xlate(parameter.value, vnfr['tags']) + # TBD do validation of the parameters + data_type = 'string' + found = False + for ca_param in config.parameter: + if ca_param.name == parameter.name: + data_type = ca_param.data_type + found = True + break + if data_type == 'integer': + val = int(parameter.value) + if not found: + self._log.warn("Juju config agent: Did not find parameter {} for {}". + format(parameter, config.name)) + params.update({parameter.name: val}) + if config.name == 'config': + if len(params): + self._log.debug("Juju config agent: applying config with params {} for service {}". + format(params, service)) + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API handle present for {}". + format(vnfr['name'])) + return + + rc = yield from self._loop.run_in_executor( + None, + api._apply_config, + service, + params + ) + if rc: + output.execution_status = "completed" + self._log.debug("Juju config agent: applied config {} on {}". + format(params, service)) + # Removing this as clearwater has fixed its config hook + # Sleep for sometime for the config to take effect + # self._log.debug("Juju config agent: Wait sometime for config to take effect") + # yield from self._loop.run_in_executor( + # None, + # time.sleep, + # 30 + # ) + # self._log.debug("Juju config agent: Wait over for config to take effect") + else: + output.execution_status = 'failed' + self._log.error("Juju config agent: Error applying config {} on service {}". + format(params, service)) + else: + self._log.warn("Juju config agent: Did not find valid paramaters for config : {}". + format(primitive.parameter)) + else: + self._log.debug("Juju config agent: Execute action {} on service {} with params {}". + format(config.name, service, params)) + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API handle present for {}". + format(vnfr['name'])) + return + tags = yield from self._loop.run_in_executor( + None, + api._execute_actions, + service, config.name, params + ) + if len(tags): + output.execution_id = tags[0] + output.execution_status = api.get_action_status(tags[0]) + self._log.debug("Juju config agent: excute action {} on service {} returned {}". + format(config.name, service, output.execution_status)) + else: + self._log.error("Juju config agent: error executing action {} for {} with {}". + format(config.name, service, params)) + output.execution_id = '' + output.execution_status = 'failed' + break + except KeyError as e: + self._log.info("VNF %s does not have config primititves, e=%s" % (vnfr_id, e)) + + @asyncio.coroutine + def apply_config(self, rpc_ip, nsr, vnfrs): + """Hook: Runs the user defined script. Feeds all the necessary data + for the script thro' yaml file. + + Args: + rpc_ip (YangInput_Nsr_ExecNsConfigPrimitive): The input data. + nsr (NetworkServiceRecord): Description + vnfrs (dict): VNFR ID => VirtualNetworkFunctionRecord + """ + def get_meta(vnfrs): + unit_names, initial_params, vnfr_index_map = {}, {}, {} + + for vnfr_id, juju_vnf in self._juju_vnfs.items(): + # Only add vnfr info for vnfs in this particular nsr + if vnfr_id not in nsr.vnfrs: + continue + # Vnfr -> index ref + vnfr_index_map[vnfr_id] = juju_vnf['member_vnf_index'] + + # Unit name + unit_names[vnfr_id] = juju_vnf['name'] + + # Flatten the data for simplicity + param_data = {} + for primitive in juju_vnf['config'].initial_config_primitive: + for parameter in primitive.parameter: + value = self.xlate(parameter.value, juju_vnf['tags']) + param_data[parameter.name] = value + + initial_params[vnfr_id] = param_data + + + return unit_names, initial_params, vnfr_index_map + + for vnfr_id, vnf in self._juju_vnfs.items(): + print (vnf['config'].as_dict()) + + unit_names, init_data, vnfr_index_map = get_meta(vnfrs) + + # The data consists of 4 sections + # 1. Account data + # 2. The input passed. + # 3. Juju unit names (keyed by vnfr ID). + # 4. Initial config data (keyed by vnfr ID). + data = dict() + data['config_agent'] = dict( + name=self._name, + host=self._ip_address, + port=self._port, + user=self._user, + secret=self._secret + ) + data["rpc_ip"] = rpc_ip.as_dict() + data["unit_names"] = unit_names + data["init_config"] = init_data + data["vnfr_index_map"] = vnfr_index_map + + tmp_file = None + with tempfile.NamedTemporaryFile(delete=False) as tmp_file: + tmp_file.write(yaml.dump(data, default_flow_style=True) + .encode("UTF-8")) + + self._log.debug("Juju config agent: Creating a temp file: {} with input data".format( + tmp_file.name)) + + cmd = "{} {}".format(rpc_ip.user_defined_script, tmp_file.name) + self._log.debug("Juju config agent: Running the CMD: {}".format(cmd)) + + coro = asyncio.create_subprocess_shell(cmd, loop=self._loop) + process = yield from coro + task = self._loop.create_task(process.wait()) + + return task + + @asyncio.coroutine + def fetch_vnfr(self, vnfr_path): + """ Fetch VNFR record """ + vnfr = None + self._log.debug("Juju config agent: Fetching VNFR with key %s", vnfr_path) + res_iter = yield from self._dts.query_read(vnfr_path, rwdts.Flag.MERGE) + + for ent in res_iter: + res = yield from ent + vnfr = res.result + + return vnfr + + @asyncio.coroutine + def apply_initial_config(self, vnf_id, vnf): + """ + Apply the initial configuration + Expect config directives mostly, not actions + Actions in initial config may not work based on charm design + """ + try: + tags = [] + vnfr = self._juju_vnfs[vnf_id] + api = None + + vnf_cat = yield from self.fetch_vnfr(vnf.xpath) + if vnf_cat and vnf_cat.mgmt_interface.ip_address: + vnfr['tags'].update({'rw_mgmt_ip': vnf_cat.mgmt_interface.ip_address}) + config = {} + try: + for primitive in vnfr['config'].initial_config_primitive: + self._log.debug("Initial config %s" % (primitive)) + if primitive.name == 'config': + for param in primitive.parameter: + if vnfr['tags']: + val = self.xlate(param.value, vnfr['tags']) + config.update({param.name: val}) + except KeyError as e: + self._log.exception("Juju config agent: Initial config error: config=%s" % config) + config = None + + self._log.debug("Juju config agent: Applying initial config for {} as {}". + format(vnfr['name'], config)) + if config: + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API available for apply initial config") + return + yield from self._loop.run_in_executor( + None, + api._apply_config, + vnfr['name'], + config + ) + + # Apply any actions specified as part of initial config + for primitive in vnfr['config'].initial_config_primitive: + self._log.debug("Juju config agent: Initial config %s" % (primitive)) + if primitive.name != 'config': + action = primitive.name + params = {} + for param in primitive.parameter: + val = self.xlate(param.value, self._juju_vnfs[vnf_id]['tags']) + params.update({param.name: val}) + + self._log.debug("Juju config agent: Action %s with params %s" % (action, params)) + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API available for apply initial config actions") + return + tag = yield from self._loop.run_in_executor( + None, + api._execute_actions, + vnfr['name'], + action, params + ) + tags.append(tag) + + except KeyError as e: + self._log.info("Juju config agent: VNFR %s not managed by Juju" % (vnf_id)) + except Exception as e: + self._log.exception("Juju config agent: Exception juju apply_initial_config for VNFR {}: {}". + format(vnf_id, e)) + return tags + + def is_vnfr_managed(self, vnfr_id): + try: + if vnfr_id in self._juju_vnfs: + return True + except Exception as e: + self._log.debug("Juju config agent: Is VNFR {} managed: {}". + format(vnfr_id, e)) + return False + + def is_service_active(self, service): + """ Is the juju service active """ + resp = False + try: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: Unable to get API for checking service is active") + return resp + + for vnf in self._juju_vnfrs: + if vnf['name'] == service and api: + # Check if deploy is over + if self.check_task_status(service, 'deploy'): + resp = yield from self._loop.run_in_executor( + None, + api.is_service_active, + service + ) + self._log.debug("Juju config agent: Is the service %s active? %s", service, resp) + return resp + except KeyError: + self._log.error("Juju config agent: Check active unknown service ", service) + except Exception as e: + self._log.error("Juju config agent: Caught exception when checking for service is active: ", e) + self._log.exception(e) + return resp + + @asyncio.coroutine + def is_configured(self, vnfr_id): + try: + if self._juju_vnfs[vnfr_id]['active']: + return True + + service = self._juju_vnfs[vnfr_id]['name'] + resp = self.is_service_active(service) + self._juju_vnfs[vnfr_id]['active'] = resp + self._log.debug("Juju config agent: Service state for {} is {}". + format(service, resp)) + return resp + except KeyError: + self._log.debug("Juju config agent: VNFR id {} not found in config agent". + format(vnfr_id)) + return True + except Exception as e: + self._log.error("Juju config agent: VNFR id {} is_configured: {}". + format(vnfr_id, e)) + return False + + @asyncio.coroutine + def get_status(self, vnfr_id): + resp = 'unknown' + try: + vnfr = self._juju_vnfs[vnfr_id] + if vnfr['active']: + return 'configured' + + service = vnfr['name'] + # Check if deploy is over + if self.check_task_status(service, 'deploy'): + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: API not created for get status") + return 'failed' + + resp = yield from self._loop.run_in_executor( + None, + api.get_service_status, + service + ) + self._log.debug("Juju config agent: Service status for {} is {}". + format(service, resp)) + status = 'configuring' + if resp in ['active', 'blocked']: + vnfr['active'] = True + status = 'configured' + elif resp in ['error', 'NA']: + status = 'failed' + return status + except KeyError as e: + self._log.debug("Juju config agent: VNFR id {} not found in config agent, e={}". + format(vnfr_id, e)) + return 'configured' + except Exception as e: + self._log.error("Juju config agent: VNFR id {} gt_status, e={}". + format(vnfr_id, e)) + self._log.exception(e) + return resp + + def get_action_status(self, execution_id): + ''' Get the action status for an execution ID + *** Make sure this is NOT a asyncio coroutine function *** + ''' + api = self._get_api_blocking() + if api is None: + self._log.error("Juju config agent: Unable to get API in get_action_status") + return None + try: + return api.get_action_status(execution_id) + except Exception as e: + self._log.exception("Juju config agent: Error fetching execution status for %s", + execution_id) + self._log.exception(e) + return None \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py new file mode 100644 index 0000000..56b0346 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py @@ -0,0 +1,573 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +import sys +import time +import yaml + +import rift.openmano.rift2openmano as rift2openmano +import rift.openmano.openmano_client as openmano_client +from . import rwnsmplugin + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +DUMP_OPENMANO_DIR = os.path.join( + os.environ["RIFT_ARTIFACTS"], + "openmano_descriptors" + ) + + +def dump_openmano_descriptor(name, descriptor_str): + filename = "{}_{}.yaml".format( + time.strftime("%Y%m%d-%H%M%S"), + name + ) + + filepath = os.path.join( + DUMP_OPENMANO_DIR, + filename + ) + + try: + if not os.path.exists(DUMP_OPENMANO_DIR): + os.makedirs(DUMP_OPENMANO_DIR) + + with open(filepath, 'w') as hdl: + hdl.write(descriptor_str) + + except OSError as e: + print("Failed to dump openmano descriptor: %s" % str(e)) + + return filepath + + +class OpenmanoVnfr(object): + def __init__(self, log, loop, cli_api, vnfr): + self._log = log + self._loop = loop + self._cli_api = cli_api + self._vnfr = vnfr + self._vnfd_id = vnfr.vnfd.id + + self._vnf_id = None + + self._created = False + + @property + def vnfd(self): + return rift2openmano.RiftVNFD(self._vnfr.vnfd) + + @property + def vnfr(self): + return self._vnfr + + @property + def rift_vnfd_id(self): + return self._vnfd_id + + @property + def openmano_vnfd_id(self): + return self._vnf_id + + @property + def openmano_vnfd(self): + self._log.debug("Converting vnfd %s from rift to openmano", self.vnfd.id) + openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd) + return openmano_vnfd + + @property + def openmano_vnfd_yaml(self): + return yaml.safe_dump(self.openmano_vnfd, default_flow_style=False) + + @asyncio.coroutine + def create(self): + self._log.debug("Creating openmano vnfd") + openmano_vnfd = self.openmano_vnfd + name = openmano_vnfd["vnf"]["name"] + + # If the name already exists, get the openmano vnfd id + name_uuid_map = yield from self._loop.run_in_executor( + None, + self._cli_api.vnf_list, + ) + + if name in name_uuid_map: + vnf_id = name_uuid_map[name] + self._log.debug("Vnf already created. Got existing openmano vnfd id: %s", vnf_id) + self._vnf_id = vnf_id + return + + self._vnf_id, _ = yield from self._loop.run_in_executor( + None, + self._cli_api.vnf_create, + self.openmano_vnfd_yaml, + ) + + fpath = dump_openmano_descriptor( + "{}_vnf".format(name), + self.openmano_vnfd_yaml + ) + + self._log.debug("Dumped Openmano VNF descriptor to: %s", fpath) + + self._created = True + + @asyncio.coroutine + def delete(self): + if not self._created: + return + + self._log.debug("Deleting openmano vnfd") + if self._vnf_id is None: + self._log.warning("Openmano vnf id not set. Cannot delete.") + return + + yield from self._loop.run_in_executor( + None, + self._cli_api.vnf_delete, + self._vnf_id, + ) + + +class OpenmanoNsr(object): + TIMEOUT_SECS = 120 + + def __init__(self, log, loop, publisher, cli_api, http_api, nsd_msg, nsr_config_msg): + self._log = log + self._loop = loop + self._publisher = publisher + self._cli_api = cli_api + self._http_api = http_api + + self._nsd_msg = nsd_msg + self._nsr_config_msg = nsr_config_msg + + self._vnfrs = [] + + self._nsd_uuid = None + self._nsr_uuid = None + + self._created = False + + self._monitor_task = None + + @property + def nsd(self): + return rift2openmano.RiftNSD(self._nsd_msg) + + @property + def vnfds(self): + return {v.rift_vnfd_id: v.vnfd for v in self._vnfrs} + + @property + def vnfrs(self): + return self._vnfrs + + @property + def openmano_nsd_yaml(self): + self._log.debug("Converting nsd %s from rift to openmano", self.nsd.id) + openmano_nsd = rift2openmano.rift2openmano_nsd(self.nsd, self.vnfds) + return yaml.safe_dump(openmano_nsd, default_flow_style=False) + + @asyncio.coroutine + def add_vnfr(self, vnfr): + vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr) + yield from vnfr.create() + self._vnfrs.append(vnfr) + + @asyncio.coroutine + def delete(self): + if not self._created: + self._log.debug("NSD wasn't created. Skipping delete.") + return + + self._log.debug("Deleting openmano nsr") + + yield from self._loop.run_in_executor( + None, + self._cli_api.ns_delete, + self._nsd_uuid, + ) + + self._log.debug("Deleting openmano vnfrs") + for vnfr in self._vnfrs: + yield from vnfr.delete() + + @asyncio.coroutine + def create(self): + self._log.debug("Creating openmano scenario") + name_uuid_map = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_list, + ) + + if self._nsd_msg.name in name_uuid_map: + self._log.debug("Found existing openmano scenario") + self._nsd_uuid = name_uuid_map[self._nsd_msg.name] + return + + + # Use the nsd uuid as the scenario name to rebind to existing + # scenario on reload or to support muliple instances of the name + # nsd + self._nsd_uuid, _ = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_create, + self.openmano_nsd_yaml, + self._nsd_msg.name + ) + fpath = dump_openmano_descriptor( + "{}_nsd".format(self._nsd_msg.name), + self.openmano_nsd_yaml, + ) + + self._log.debug("Dumped Openmano NS descriptor to: %s", fpath) + + self._created = True + + @asyncio.coroutine + def instance_monitor_task(self): + self._log.debug("Starting Instance monitoring task") + + vnfr_uuid_map = {} + start_time = time.time() + active_vnfs = [] + + while True: + yield from asyncio.sleep(1, loop=self._loop) + + try: + instance_resp_json = yield from self._loop.run_in_executor( + None, + self._http_api.get_instance, + self._nsr_uuid, + ) + + self._log.debug("Got instance response: %s for NSR ID %s", + instance_resp_json, + self._nsr_uuid) + + except openmano_client.InstanceStatusError as e: + self._log.error("Could not get NS instance status: %s", str(e)) + continue + + def all_vms_active(vnf): + for vm in vnf["vms"]: + vm_status = vm["status"] + vm_uuid = vm["uuid"] + if vm_status != "ACTIVE": + self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status) + return False + + return True + + def any_vm_active_nomgmtip(vnf): + for vm in vnf["vms"]: + vm_status = vm["status"] + vm_uuid = vm["uuid"] + if vm_status != "ACTIVE": + self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status) + return False + + return True + + def any_vms_error(vnf): + for vm in vnf["vms"]: + vm_status = vm["status"] + vm_vim_info = vm["vim_info"] + vm_uuid = vm["uuid"] + if vm_status == "ERROR": + self._log.error("VM Error: %s (vim_info: %s)", vm_uuid, vm_vim_info) + return True + + return False + + def get_vnf_ip_address(vnf): + if "ip_address" in vnf: + return vnf["ip_address"].strip() + return None + + def get_ext_cp_info(vnf): + cp_info_list = [] + for vm in vnf["vms"]: + if "interfaces" not in vm: + continue + + for intf in vm["interfaces"]: + if "external_name" not in intf: + continue + + if not intf["external_name"]: + continue + + ip_address = intf["ip_address"] + if ip_address is None: + ip_address = "0.0.0.0" + + cp_info_list.append((intf["external_name"], ip_address)) + + return cp_info_list + + def get_vnf_status(vnfr): + # When we create an openmano descriptor we use __ + # to come up with openmano constituent VNF name. Use this + # knowledge to map the vnfr back. + openmano_vnfr_suffix = "__{}".format( + vnfr.vnfr.vnfr.member_vnf_index_ref + ) + + for vnf in instance_resp_json["vnfs"]: + if vnf["vnf_name"].endswith(openmano_vnfr_suffix): + return vnf + + self._log.warning("Could not find vnf status with name that ends with: %s", + openmano_vnfr_suffix) + return None + + for vnfr in self._vnfrs: + if vnfr in active_vnfs: + # Skipping, so we don't re-publish the same VNF message. + continue + + vnfr_msg = vnfr.vnfr.vnfr.deep_copy() + vnfr_msg.operational_status = "init" + + try: + vnf_status = get_vnf_status(vnfr) + self._log.debug("Found VNF status: %s", vnf_status) + if vnf_status is None: + self._log.error("Could not find VNF status from openmano") + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + return + + # If there was a VNF that has a errored VM, then just fail the VNF and stop monitoring. + if any_vms_error(vnf_status): + self._log.debug("VM was found to be in error state. Marking as failed.") + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + return + + if all_vms_active(vnf_status): + vnf_ip_address = get_vnf_ip_address(vnf_status) + + if vnf_ip_address is None: + self._log.warning("No IP address obtained " + "for VNF: {}, will retry.".format( + vnf_status['vnf_name'])) + continue + + self._log.debug("All VMs in VNF are active. Marking as running.") + vnfr_msg.operational_status = "running" + + self._log.debug("Got VNF ip address: %s", vnf_ip_address) + vnfr_msg.mgmt_interface.ip_address = vnf_ip_address + vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = vnf_ip_address + + # Add connection point information for the config manager + cp_info_list = get_ext_cp_info(vnf_status) + for (cp_name, cp_ip) in cp_info_list: + cp = vnfr_msg.connection_point.add() + cp.name = cp_name + cp.short_name = cp_name + cp.ip_address = cp_ip + + yield from self._publisher.publish_vnfr(None, vnfr_msg) + active_vnfs.append(vnfr) + + if (time.time() - start_time) > OpenmanoNsr.TIMEOUT_SECS: + self._log.error("NSR timed out before reaching running state") + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + return + + except Exception as e: + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + self._log.exception("Caught exception publishing vnfr info: %s", str(e)) + return + + if len(active_vnfs) == len(self._vnfrs): + self._log.info("All VNF's are active. Exiting NSR monitoring task") + return + + @asyncio.coroutine + def deploy(self): + if self._nsd_uuid is None: + raise ValueError("Cannot deploy an uncreated nsd") + + self._log.debug("Deploying openmano scenario") + + name_uuid_map = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_instance_list, + ) + + + openmano_datacenter = None + if self._nsr_config_msg.has_field("om_datacenter"): + openmano_datacenter = self._nsr_config_msg.om_datacenter + + if self._nsr_config_msg.name in name_uuid_map: + self._log.debug("Found existing instance with nsr name: %s", self._nsr_config_msg.name) + self._nsr_uuid = name_uuid_map[self._nsr_config_msg.name] + + else: + self._nsr_uuid = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_instantiate, + self._nsd_uuid, + self._nsr_config_msg.name, + openmano_datacenter + ) + + self._monitor_task = asyncio.ensure_future( + self.instance_monitor_task(), loop=self._loop + ) + + @asyncio.coroutine + def terminate(self): + if self._nsr_uuid is None: + self._log.warning("Cannot terminate an un-instantiated nsr") + return + + if self._monitor_task is not None: + self._monitor_task.cancel() + self._monitor_task = None + + self._log.debug("Terminating openmano nsr") + yield from self._loop.run_in_executor( + None, + self._cli_api.ns_terminate, + self._nsr_uuid, + ) + + +class OpenmanoNsPlugin(rwnsmplugin.NsmPluginBase): + """ + RW Implentation of the NsmPluginBase + """ + def __init__(self, dts, log, loop, publisher, cloud_account): + self._dts = dts + self._log = log + self._loop = loop + self._publisher = publisher + + self._cli_api = None + self._http_api = None + self._openmano_nsrs = {} + + self._set_cloud_account(cloud_account) + + def _set_cloud_account(self, cloud_account): + self._log.debug("Setting openmano plugin cloud account: %s", cloud_account) + self._cli_api = openmano_client.OpenmanoCliAPI( + self.log, + cloud_account.openmano.host, + cloud_account.openmano.port, + cloud_account.openmano.tenant_id, + ) + + self._http_api = openmano_client.OpenmanoHttpAPI( + self.log, + cloud_account.openmano.host, + cloud_account.openmano.port, + cloud_account.openmano.tenant_id, + ) + + def create_nsr(self, nsr_config_msg, nsd_msg): + """ + Create Network service record + """ + openmano_nsr = OpenmanoNsr( + self._log, + self._loop, + self._publisher, + self._cli_api, + self._http_api, + nsd_msg, + nsr_config_msg + ) + self._openmano_nsrs[nsr_config_msg.id] = openmano_nsr + + @asyncio.coroutine + def deploy(self, nsr_msg): + openmano_nsr = self._openmano_nsrs[nsr_msg.ns_instance_config_ref] + yield from openmano_nsr.create() + yield from openmano_nsr.deploy() + + @asyncio.coroutine + def instantiate_ns(self, nsr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from nsr.instantiate(xact) + + @asyncio.coroutine + def instantiate_vnf(self, nsr, vnfr, xact): + """ + Instantiate NSR with the passed nsr id + """ + openmano_nsr = self._openmano_nsrs[nsr.id] + yield from openmano_nsr.add_vnfr(vnfr) + + # Mark the VNFR as running + # TODO: Create a task to monitor nsr/vnfr status + vnfr_msg = vnfr.vnfr.deep_copy() + vnfr_msg.operational_status = "init" + + self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg) + yield from self._publisher.publish_vnfr(xact, vnfr_msg) + + @asyncio.coroutine + def instantiate_vl(self, nsr, vlr, xact): + """ + Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def terminate_ns(self, nsr, xact): + """ + Terminate the network service + """ + nsr_id = nsr.id + openmano_nsr = self._openmano_nsrs[nsr_id] + yield from openmano_nsr.terminate() + yield from openmano_nsr.delete() + + for vnfr in openmano_nsr.vnfrs: + self._log.debug("Unpublishing VNFR: %s", vnfr.vnfr) + yield from self._publisher.unpublish_vnfr(xact, vnfr.vnfr) + + del self._openmano_nsrs[nsr_id] + + @asyncio.coroutine + def terminate_vnf(self, vnfr, xact): + """ + Terminate the network service + """ + pass + + @asyncio.coroutine + def terminate_vl(self, vlr, xact): + """ + Terminate the virtual link + """ + pass \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py new file mode 100644 index 0000000..3c8e4f9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py @@ -0,0 +1,228 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio + +from gi.repository import ( + RwDts as rwdts, + ) +import rift.tasklets + + +class NsrOpDataDtsHandler(object): + """ The network service op data DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + self._regh = None + + @property + def regh(self): + """ Return the registration handle""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for Nsr op data publisher registration""" + self._log.debug("Registering Nsr op data path %s as publisher", + NsrOpDataDtsHandler.XPATH) + + hdl = rift.tasklets.DTS.RegistrationHandler() + with self._dts.group_create() as group: + self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create an NS record in DTS with the path and message + """ + self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg, flags=rwdts.Flag.REPLACE): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh) + self.regh.update_element(path, msg, flags) + self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Deleting NSR xact:%s, path:%s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted NSR xact:%s, path:%s", xact, path) + + + +class VnfrPublisherDtsHandler(object): + """ Registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' DTS""" + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self._regh = None + + @property + def regh(self): + """ Return registration handle""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for Vvnfr create/update/delete/read requests from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + self._log.debug( + "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + raise NotImplementedError( + "%s action on VirtualNetworkFunctionRecord not supported", + action) + + self._log.debug("Registering for VNFR using xpath: %s", + VnfrPublisherDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler() + with self._dts.group_create() as group: + self._regh = group.register(xpath=VnfrPublisherDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.CACHE),) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VNFR record in DTS with path and message + """ + self._log.debug("Creating VNFR xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VNFR record in DTS with path and message + """ + self._log.debug("Updating VNFR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VNFR record in DTS with path and message + """ + self._log.debug("Deleting VNFR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VNFR xact = %s, %s", xact, path) + + +class VlrPublisherDtsHandler(object): + """ registers 'D,/vlr:vlr-catalog/vlr:vlr """ + XPATH = "D,/vlr:vlr-catalog/vlr:vlr" + + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self._regh = None + + @property + def regh(self): + """ Return registration handle""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for vlr create/update/delete/read requests from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + self._log.debug( + "Got vlr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + raise NotImplementedError( + "%s action on VirtualLinkRecord not supported", + action) + + self._log.debug("Registering for VLR using xpath: %s", + VlrPublisherDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler() + with self._dts.group_create() as group: + self._regh = group.register(xpath=VlrPublisherDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.CACHE),) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VLR record in DTS with path and message + """ + self._log.debug("Creating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VLR record in DTS with path and message + """ + self._log.debug("Updating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VLR record in DTS with path and message + """ + self._log.debug("Deleting VLR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VLR xact = %s, %s", xact, path) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conagent.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conagent.py new file mode 100644 index 0000000..543cef0 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conagent.py @@ -0,0 +1,244 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import rift.tasklets + +from . import rwnsmconfigplugin +from . import jujuconf_nsm +import rift.mano.config_agent + +class ConfigAgentExistsError(Exception): + pass + +class ConfigAccountHandler(object): + def __init__(self, dts, log, loop, on_add_config_agent): + self._log = log + self._dts = dts + self._loop = loop + self._on_add_config_agent = on_add_config_agent + + self._log.debug("creating config account handler") + self.cloud_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber( + self._dts, self._log, + rift.mano.config_agent.ConfigAgentCallbacks( + on_add_apply=self.on_config_account_added, + on_delete_apply=self.on_config_account_deleted, + on_update_prepare=self.on_config_account_update, + ) + ) + + def on_config_account_deleted(self, account_name): + self._log.debug("config account deleted") + self._log.debug(account_name) + self._log.error("Config agent update not supported yet") + + def on_config_account_added(self, account): + self._log.debug("config account added") + self._log.debug(account.as_dict()) + self._on_add_config_agent(account) + + @asyncio.coroutine + def on_config_account_update(self, account): + self._log.debug("config account being updated") + self._log.debug(account.as_dict()) + self._log.error("Config agent update not supported yet") + + @asyncio.coroutine + def register(self): + self.cloud_cfg_handler.register() + +class RwNsConfigPlugin(rwnsmconfigplugin.NsmConfigPluginBase): + """ + Default Implementation of the NsmConfPluginBase + """ + @asyncio.coroutine + def notify_create_nsr(self, nsr, nsd): + """ + Notification of create Network service record + """ + pass + + @asyncio.coroutine + def apply_config(self, config, nsr, vnfrs): + """ + Notification of configuration of Network service record + """ + pass + + @asyncio.coroutine + def notify_create_vls(self, nsr, vld): + """ + Notification of create Network service record + """ + pass + + @asyncio.coroutine + def notify_create_vnfr(self, nsr, vnfr): + """ + Notification of create Network service record + """ + pass + + @asyncio.coroutine + def notify_instantiate_ns(self, nsr): + """ + Notification of NSR instantiationwith the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vnf(self, nsr, vnfr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vl(self, nsr, vlr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_nsr_active(self, nsr, vnfrs): + """ Notify instantiate of the virtual link""" + pass + + @asyncio.coroutine + def notify_terminate_ns(self, nsr): + """ + Notification of Terminate the network service + """ + pass + + @asyncio.coroutine + def notify_terminate_vnf(self, nsr, vnfr, xact): + """ + Notification of Terminate the network service + """ + pass + + @asyncio.coroutine + def notify_terminate_vl(self, nsr, vlr, xact): + """ + Notification of Terminate the virtual link + """ + pass + + @asyncio.coroutine + def apply_initial_config(self, vnfr_id, vnf): + """Apply initial configuration""" + pass + + @asyncio.coroutine + def get_config_status(self, vnfr_id): + """Get the status for the VNF""" + pass + + def get_action_status(self, execution_id): + """Get the action exection status""" + pass + + @asyncio.coroutine + def is_configured(self, vnfr_if): + return True + + @asyncio.coroutine + def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output): + """Apply config primitive on a VNF""" + pass + + def is_vnfr_managed(self, vnfr_id): + return False + +class NsmConfigPlugins(object): + """ NSM Config Agent Plugins """ + def __init__(self): + self._plugin_classes = { + "juju": jujuconf_nsm.JujuNsmConfigPlugin, + } + + @property + def plugins(self): + """ Plugin info """ + return self._plugin_classes + + def __getitem__(self, name): + """ Get item """ + print("%s", self._plugin_classes) + return self._plugin_classes[name] + + def register(self, plugin_name, plugin_class, *args): + """ Register a plugin to this Nsm""" + self._plugin_classes[plugin_name] = plugin_class + + def deregister(self, plugin_name, plugin_class, *args): + """ Deregister a plugin to this Nsm""" + if plugin_name in self._plugin_classes: + del self._plugin_classes[plugin_name] + + def class_by_plugin_name(self, name): + """ Get class by plugin name """ + return self._plugin_classes[name] + + +class NsmConfigAgent(object): + def __init__(self, dts, log, loop, records_publisher, on_config_nsm_plugin): + self._dts = dts + self._log = log + self._loop = loop + + self._records_publisher = records_publisher + self._on_config_nsm_plugin = on_config_nsm_plugin + self._config_plugins = NsmConfigPlugins() + self._config_handler = ConfigAccountHandler( + self._dts, self._log, self._loop, self._on_config_agent) + self._plugin_instances = {} + + def _set_plugin_instance(self, instance): + self._on_config_nsm_plugin(instance) + + def _on_config_agent(self, config_agent): + self._log.debug("Got nsm plugin config agent account: %s", config_agent) + try: + nsm_cls = self._config_plugins.class_by_plugin_name( + config_agent.account_type) + except KeyError as e: + self._log.debug( + "Config agent nsm plugin type not found: {}. Using default plugin, e={}". + format(config_agent.account_type, e)) + nsm_cls = RwNsConfigPlugin + + # Check to see if the plugin was already instantiated + if nsm_cls in self._plugin_instances: + self._log.debug("Config agent nsm plugin already instantiated. Using existing.") + self._set_plugin_instance(self._plugin_instances[nsm_cls]) + + # Otherwise, instantiate a new plugin using the config agent account + self._log.debug("Instantiting new config agent using class: %s", nsm_cls) + nsm_instance = nsm_cls(self._dts, self._log, self._loop, self._records_publisher, config_agent) + self._plugin_instances[nsm_cls] = nsm_instance + + self._set_plugin_instance(self._plugin_instances[nsm_cls]) + + @asyncio.coroutine + def register(self): + self._log.debug("Registering for config agent nsm plugin manager") + yield from self._config_handler.register() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py new file mode 100644 index 0000000..f1823e8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py @@ -0,0 +1,314 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import time +import ncclient +import ncclient.asyncio_manager +import re + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwConmanYang', '1.0') +from gi.repository import ( + RwYang, + RwNsmYang as nsmY, + NsrYang as nsrY, + RwDts as rwdts, + RwTypes, + RwConmanYang as conmanY +) + +import rift.tasklets + +class ROSOConnectionError(Exception): + pass + +class ROServiceOrchif(object): + + def __init__(self, log, loop, parent): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + try: + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(nsmY.get_schema()) + self._model.load_schema_ypbc(conmanY.get_schema()) + except Exception as e: + self._log.error("Error generating models %s", str(e)) + + + @asyncio.coroutine + def connect(self): + @asyncio.coroutine + def update_ns_cfg_state(self): + xpath="/cm-state" + while True: + try: + response = yield from self._manager.get(filter=('xpath', xpath)) + response_xml = response.data_xml.decode() + cm_state = conmanY.CmOpdata() + cm_state.from_xml_v2(self._model, response_xml) + cm_state_d = cm_state.as_dict() + #print("##>> Got NSR config state from RIFT-CM:", cm_state_d) + # Go in loop and update state for each NS + if cm_state_d and 'cm_nsr' in cm_state_d: + for nsr in cm_state_d['cm_nsr']: + if 'cm_vnfr' in nsr: + # Fill in new state to all vnfrs + for vnfr in nsr['cm_vnfr']: + vnfrid = vnfr['id'] + if vnfrid in self._parent.nsm._vnfrs: + # Need a consistent derivable way of checking state (hard coded for now) + if (vnfr['state'] == 'ready'): + if not self._parent.nsm._vnfrs[vnfrid].is_configured(): + yield from self._parent.nsm._vnfrs[vnfrid].set_config_status(nsrY.ConfigStates.CONFIGURED) + elif vnfr['state'] != 'ready_no_cfg': + if self._parent.nsm._vnfrs[vnfrid]._config_status != nsrY.ConfigStates.CONFIGURING: + yield from self._parent.nsm._vnfrs[vnfrid].set_config_status(nsrY.ConfigStates.CONFIGURING) + + except Exception as e: + self._log.error("Failed to get NS cfg state (may have been terminated) e=%s", str(e)) + return + yield from asyncio.sleep(5, loop=self._loop) + + so_endp = self._parent.cm_endpoint + try: + self._log.info("Attemping Resource Orchestrator netconf connection.") + self._manager = yield from ncclient.asyncio_manager.asyncio_connect(loop=self._loop, + host=so_endp['cm_ip_address'], + port=so_endp['cm_port'], + username=so_endp['cm_username'], + password=so_endp['cm_password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False) + self._log.info("Connected to Service Orchestrator netconf @%s", so_endp['cm_ip_address']) + # Start the executor loop to monitor configuration status for this NS + yield from self._loop.create_task(update_ns_cfg_state(self)) + return True + except Exception as e: + self._log.error("Netconf connection to Service Orchestrator ip %s failed: %s", + so_endp['cm_ip_address'], str(e)) + return False + + @staticmethod + def wrap_netconf_config_xml(xml): + xml = '{}'.format(xml) + return xml + + def send_nsr_update(self, nsrid): + + self._log.debug("Attempting to send NSR id: %s", nsrid) + msg = conmanY.SoConfig() + addnsr = msg.nsr.add() + addnsr.id = nsrid + xml = msg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + try: + response = yield from self._manager.edit_config(target='running', config = netconf_xml) + self._log.info("Received edit config response: %s", str(response)) + except ncclient.transport.errors.SSHError as e: + so_endp = self._parent.cm_endpoint + self._log.error("Applying configuration %s to SO(%s) failed: %s", + netconf_xml, so_endp['cm_ip_address'], str(e)) + return + + def send_nsr_delete(self, nsrid): + self._log.debug("Attempting to send delete NSR id: %s", nsrid) + msg = conmanY.SoConfig() + addnsr = msg.nsr.add() + addnsr.id = nsrid + xml = msg.to_xml_v2(self._model) + delete_path = '/cm-config/nsr[id=\'{}\']'.format(nsrid) + + def _xpath_strip_keys(xpath): + ''' Copied from automation ''' + '''Strip key-value pairs from the supplied xpath + + Arguments: + xpath - xpath to be stripped of keys + + Returns: + an xpath without keys + ''' + RE_CAPTURE_KEY_VALUE = re.compile(r'\[[^=]*?\=[\"\']?([^\'\"\]]*?)[\'\"]?\]') + return re.sub(RE_CAPTURE_KEY_VALUE, '', xpath) + + # In leiu of protobuf delta support, try to place the attribute in the correct place + def add_attribute(xpath, xml): + xpath = xpath.lstrip('/') + xpath = _xpath_strip_keys(xpath) + xpath_elems = xpath.split('/') + pos = 0 + for elem in xpath_elems: + pos = xml.index(elem, pos) + pos = xml.index('>', pos) + if xml[pos-1] == '/': + pos -= 1 + xml = xml[:pos] + " xc:operation='delete'" + xml[pos:] + return xml + + xml = add_attribute(delete_path, xml) + # print('>>>>> delete xml=\n{}\n\n'.format(xml)) + netconf_xml = '{}'.format(xml) + + try: + response = yield from self._manager.edit_config(target='running', config = netconf_xml) + self._log.info("Received delete config response: %s", str(response)) + except ncclient.transport.errors.SSHError as e: + self._log.error("Deleting CM config for NSR id=%s failed: %s", + nsrid, str(e)) + return + +class ROServiceOrchConfig(object): + def __init__(self, log, loop, dts, parent): + self._log = log + self._loop = loop + self._dts = dts + self.nsm = parent + self._ro_config_xpath = "C,/ro-config/cm-endpoint" + self.soif = None + self._active_nsr = [] + self.cm_endpoint = {} + self._log.debug("Initialized ROServiceOrchConfig, cm_endpoint = %s", self.cm_endpoint) + + def is_ready(self): + return True + + @asyncio.coroutine + def register(self): + """ Register for Nsd cm-endpoint requests from dts """ + + @asyncio.coroutine + def initiate_connection(): + loop_cnt = 60 + #Create SO interface object + self._log.debug("Inside initiate_connection routine") + self.soif = ROServiceOrchif(self._log, self._loop, self) + for i in range(loop_cnt): + connect_status = yield from self.soif.connect() + if connect_status: + self._log.debug("Successfully connected to netconf") + for nsrid in self._active_nsr: + self._log.debug("Sending nsr-id : %s to SO from pending list", nsrid) + yield from self.soif.send_nsr_update(nsrid) + self._active_nsr.pop(nsrid) + self._log.debug("Deleting nsr-id : %s from pending list", nsrid) + break + else: + self._log.error("Can not connect to SO. Retrying!") + + self._log.debug("Sleeping for 1 second in initiate_connection()") + yield from asyncio.sleep(1, loop = self._loop) + else: + raise ROSOConnectionError("Failed to connect to Service Orchestrator within 60") + return + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + ro_config = nsmY.RoConfig() + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + self._log.debug("Got nsr apply cfg (xact:%s) (action:%s) (cm_endpoint:%s)", + xact, action, self.cm_endpoint) + + # Verify that cm_endpoint is complete, we may get only default values if this is confd re-apply + so_complete = True + for field in ro_config.cm_endpoint.fields: + if field not in self.cm_endpoint: + so_complete = False + + # Create future for connect + if so_complete is True and self.soif is None: + asyncio.ensure_future(initiate_connection(), loop = self._loop) + + return + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for ro-config """ + + self._log.debug("ro-config received msg %s", msg) + + action = xact_info.handle.get_query_action() + # Save msg as dictionary + msg_dict = msg.as_dict() + + self._log.info("ro-config received msg %s action %s - dict = %s", msg, action, msg_dict) + + # Save configuration infomration + # Might be able to save entire msg_dict + for key, val in msg_dict.items(): + self.cm_endpoint[key] = val + + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug( + "Registering for NSD config using xpath: %s", + self._ro_config_xpath + ) + + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + self._regh = acg.register(xpath=self._ro_config_xpath, + flags=rwdts.Flag.SUBSCRIBER, + on_prepare=on_prepare) + + + @asyncio.coroutine + def notify_nsr_up(self, nsrid): + self._log.info("Notifying NSR id = %s!", nsrid) + + if self.soif is None: + self._log.warning("No SO interface created yet! Buffering the nsr-id") + self._active_nsr.append(nsrid) + else: + # Send NSR id as configuration + try: + yield from self.soif.send_nsr_update(nsrid) + except Exception as e: + self._log.error("Failed to send NSR id to SO", str(e)) + return + + + @asyncio.coroutine + def notify_nsr_down(self, nsrid): + self._log.info("Notifying NSR id = %s DOWN!", nsrid) + + if self.soif is None: + self._log.warning("No SO interface created yet! find and delete the nsr-id from queue") + else: + # Send NSR id as configuration + try: + yield from self.soif.send_nsr_delete(nsrid) + except Exception as e: + self._log.error("Failed to send NSR id to SO", str(e)) + return \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmconfigplugin.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmconfigplugin.py new file mode 100644 index 0000000..1347d62 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmconfigplugin.py @@ -0,0 +1,183 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import abc + + +class NsmConfigPluginBase(object): + """ + Abstract base class for the NSM Configuration agent plugin. + There will be single instance of this plugin for each plugin type. + """ + + def __init__(self, dts, log, loop, publisher, config_agent): + self._dts = dts + self._log = log + self._loop = loop + self._publisher = publisher + self._config_agent = config_agent + + @property + def dts(self): + return self._dts + + @property + def log(self): + return self._log + + @property + def loop(self): + return self._loop + + @property + def nsm(self): + return self._nsm + + + @abc.abstractmethod + @asyncio.coroutine + def notify_create_nsr(self, nsr, nsd): + """ Notification on creation of an NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def apply_config(self, config, nsrs, vnfrs): + """ Notification on configuration of an NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_create_vls(self, nsr, vld): + """ Notification on creation of an VL """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_create_vnfr(self, nsr, vnfr): + """ Notification on creation of an VNFR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_instantiate_ns(self, nsr): + """ Notification for instantiate of the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_instantiate_vnf(self, nsr, vnfr, xact): + """ Notify instantiation of the virtual network function """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_instantiate_vl(self, nsr, vl, xact): + """ Notify instantiate of the virtual link""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_nsr_active(self, nsr, vnfrs): + """ Notify instantiate of the virtual link""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_terminate_ns(self, nsr): + """Notify termination of the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_terminate_vnf(self, nsr, vnfr, xact): + """Notify termination of the VNF """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_terminate_vl(self, nsr, vlr, xact): + """Notify termination of the Virtual Link Record""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def apply_initial_config(self, vnfr_id, vnf): + """Apply initial configuration""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_config_status(self, vnfr_id): + """Get the status for the VNF""" + pass + + @abc.abstractmethod + def get_action_status(self, execution_id): + """Get the action exection status""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def is_configured(self, vnfr_if): + """ Check if the agent is configured for the VNFR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output): + """Apply config primitive on a VNF""" + pass + + @abc.abstractmethod + def is_vnfr_managed(self, vnfr_id): + """ Check if VNR is managed by config agent """ + pass + + @asyncio.coroutine + def invoke(self, method, *args): + self._log.debug("Config agent plugin: method {} with args {}: {}". + format(method, args, self)) + # TBD - Do a better way than string compare to find invoke the method + if method == 'notify_create_nsr': + yield from self.notify_create_nsr(args[0], args[1]) + elif method == 'notify_create_vls': + yield from self.notify_create_vls(args[0], args[1], args[2]) + elif method == 'notify_create_vnfr': + yield from self.notify_create_vnfr(args[0], args[1]) + elif method == 'notify_instantiate_ns': + yield from self.notify_instantiate_ns(args[0]) + elif method == 'notify_instantiate_vnf': + yield from self.notify_instantiate_vnf(args[0], args[1], args[2]) + elif method == 'notify_instantiate_vl': + yield from self.notify_instantiate_vl(args[0], args[1], args[2]) + elif method == 'notify_nsr_active': + yield from self.notify_nsr_active(args[0], args[1]) + elif method == 'notify_terminate_ns': + yield from self.notify_terminate_ns(args[0]) + elif method == 'notify_terminate_vnf': + yield from self.notify_terminate_vnf(args[0], args[1], args[2]) + elif method == 'notify_terminate_vl': + yield from self.notify_terminate_vl(args[0], args[1], args[2]) + elif method == 'apply_initial_config': + yield from self.apply_initial_config(args[0], args[1]) + elif method == 'apply_config': + yield from self.apply_config(args[0], args[1], args[2]) + else: + self._log.error("Unknown method %s invoked on config agent plugin" % method) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py new file mode 100755 index 0000000..14a2466 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py @@ -0,0 +1,114 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import abc + + +class NsmPluginBase(object): + """ + Abstract base class for the NSM plugin. + There will be single instance of this plugin for each plugin type. + """ + + def __init__(self, dts, log, loop, nsm, plugin_name, dts_publisher): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._plugin_name = plugin_name + self._dts_publisher = dts_publisher + + @property + def dts(self): + return self._dts + + @property + def log(self): + return self._log + + @property + def loop(self): + return self._loop + + @property + def nsm(self): + return self._nsm + + def create_nsr(self, nsr): + """ Create an NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def deploy(self, nsr_msg): + pass + + @abc.abstractmethod + @asyncio.coroutine + def instantiate_ns(self, nsr, xact): + """ Instantiate the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def instantiate_vnf(self, nsr, vnfr, xact): + """ Instantiate the virtual network function """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def instantiate_vl(self, nsr, vl, xact): + """ Instantiate the virtual link""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_nsr(self, nsr_path): + """ Get the NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_vnfr(self, vnfr_path): + """ Get the VNFR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_vlr(self, vlr_path): + """ Get the VLR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def terminate_ns(self, nsr, xact): + """Terminate the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def terminate_vnf(self, vnfr, xact): + """Terminate the VNF """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def terminate_vl(self, vlr, xact): + """Terminate the Virtual Link Record""" + pass \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py new file mode 100755 index 0000000..10b275b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py @@ -0,0 +1,3185 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 +import asyncio +import logging +import uuid +import sys +import time + +from enum import Enum +from collections import deque +from collections import defaultdict + +import gi +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwVlrYang', '1.0') +from gi.repository import ( + RwNsrYang, + NsrYang, + RwVlrYang, + VnfrYang, + RwVnfrYang, + RwNsmYang, + RwDts as rwdts, + RwTypes, + ProtobufC, +) + +import rift.mano.config_agent +import rift.tasklets + +from . import rwnsm_conman as conman +from . import cloud +from . import publisher +from . import xpath +from . import rwnsm_conagent as conagent +from . import config_value_pool +from . import rwvnffgmgr + + +class NetworkServiceRecordState(Enum): + """ Network Service Record State """ + INIT = 101 + VL_INIT_PHASE = 102 + VNF_INIT_PHASE = 103 + VNFFG_INIT_PHASE = 104 + RUNNING = 105 + TERMINATE = 106 + TERMINATE_RCVD = 107 + VL_TERMINATE_PHASE = 108 + VNF_TERMINATE_PHASE = 109 + VNFFG_TERMINATE_PHASE = 110 + TERMINATED = 111 + FAILED = 112 + + +class NetworkServiceRecordError(Exception): + """ Network Service Record Error """ + pass + + +class NetworkServiceDescriptorError(Exception): + """ Network Service Descriptor Error """ + pass + + +class VirtualNetworkFunctionRecordError(Exception): + """ Virtual Network Function Record Error """ + pass + + +class NetworkServiceDescriptorNotFound(Exception): + """ Cannot find Network Service Descriptor""" + pass + + +class NetworkServiceDescriptorRefCountExists(Exception): + """ Network Service Descriptor reference count exists """ + pass + + +class NetworkServiceDescriptorUnrefError(Exception): + """ Failed to unref a network service descriptor """ + pass + + +class NsrInstantiationFailed(Exception): + """ Failed to instantiate network service """ + pass + + +class VnfInstantiationFailed(Exception): + """ Failed to instantiate virtual network function""" + pass + +class VnffgInstantiationFailed(Exception): + """ Failed to instantiate virtual network function""" + pass + +class VnfDescriptorError(Exception): + """Failed to instantiate virtual network function""" + pass + + +class VlRecordState(Enum): + """ VL Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class VnffgRecordState(Enum): + """ VNFFG Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class VnffgRecord(object): + """ Vnffg Records class""" + def __init__(self, dts, log, loop, vnffgmgr, nsr, nsr_name, vnffgd_msg, sdn_account_name): + + self._dts = dts + self._log = log + self._loop = loop + self._vnffgmgr = vnffgmgr + self._nsr = nsr + self._nsr_name = nsr_name + self._vnffgd_msg = vnffgd_msg + if sdn_account_name is None: + self._sdn_account_name = '' + else: + self._sdn_account_name = sdn_account_name + + self._vnffgr_id = str(uuid.uuid4()) + self._vnffgr_rsp_id = list() + self._vnffgr_state = VnffgRecordState.INIT + + @property + def id(self): + """ VNFFGR id """ + return self._vnffgr_id + + @property + def state(self): + """ state of this VNF """ + return self._vnffgr_state + + def fetch_vnffgr(self): + """ + Get VNFFGR message to be published + """ + + if self._vnffgr_state == VnffgRecordState.INIT: + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + "operational_status": 'init', + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + elif self._vnffgr_state == VnffgRecordState.TERMINATED: + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + "operational_status": 'terminated', + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + else: + try: + vnffgr = self._vnffgmgr.fetch_vnffgr(self._vnffgr_id) + except Exception: + self._log.exception("Fetching VNFFGR for VNFFG with id %s failed", self._vnffgr_id) + self._vnffgr_state = VnffgRecordState.FAILED + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + "operational_status": 'failed', + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + + return vnffgr + + @asyncio.coroutine + def vnffgr_create_msg(self): + """ Virtual Link Record message for Creating VLR in VNS """ + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + for rsp in self._vnffgd_msg.rsp: + vnffgr_rsp = vnffgr.rsp.add() + vnffgr_rsp.id = str(uuid.uuid4()) + vnffgr_rsp.name = self._nsr.name + '.' + rsp.name + self._vnffgr_rsp_id.append(vnffgr_rsp.id) + vnffgr_rsp.vnffgd_rsp_id_ref = rsp.id + vnffgr_rsp.vnffgd_rsp_name_ref = rsp.name + for rsp_cp_ref in rsp.vnfd_connection_point_ref: + vnfd = [self._nsr._vnfds[vnfd_id] for vnfd_id in self._nsr._vnfds.keys() if vnfd_id == rsp_cp_ref.vnfd_id_ref] + if len(vnfd) > 0 and vnfd[0].has_field('service_function_type'): + self._log.debug("Service Function Type for VNFD ID %s is %s",rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type) + else: + self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",rsp_cp_ref.vnfd_id_ref) + continue + + vnfr_cp_ref = vnffgr_rsp.vnfr_connection_point_ref.add() + vnfr_cp_ref.member_vnf_index_ref = rsp_cp_ref.member_vnf_index_ref + vnfr_cp_ref.hop_number = rsp_cp_ref.order + vnfr_cp_ref.vnfd_id_ref =rsp_cp_ref.vnfd_id_ref + vnfr_cp_ref.service_function_type = vnfd[0].service_function_type + for nsr_vnfr in self._nsr.vnfrs.values(): + if (nsr_vnfr.vnfd.id == vnfr_cp_ref.vnfd_id_ref and + nsr_vnfr.member_vnf_index == vnfr_cp_ref.member_vnf_index_ref): + vnfr_cp_ref.vnfr_id_ref = nsr_vnfr.id + vnfr_cp_ref.vnfr_name_ref = nsr_vnfr.name + vnfr_cp_ref.vnfr_connection_point_ref = rsp_cp_ref.vnfd_connection_point_ref + + vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath) + self._log.debug(" Received VNFR is %s", vnfr) + while vnfr.operational_status != 'running': + self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status) + if vnfr.operational_status == 'failed': + self._log.error("Fetching VNFR for %s failed", vnfr.id) + raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id)) + yield from asyncio.sleep(2, loop=self._loop) + vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath) + self._log.debug("Received VNFR is %s", vnfr) + + vnfr_cp_ref.connection_point_params.mgmt_address = vnfr.mgmt_interface.ip_address + for cp in vnfr.connection_point: + if cp.name == vnfr_cp_ref.vnfr_connection_point_ref: + vnfr_cp_ref.connection_point_params.port_id = cp.connection_point_id + vnfr_cp_ref.connection_point_params.name = self._nsr.name + '.' + cp.name + for vdu in vnfr.vdur: + for ext_intf in vdu.external_interface: + if ext_intf.name == vnfr_cp_ref.vnfr_connection_point_ref: + vnfr_cp_ref.connection_point_params.vm_id = vdu.vim_id + self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id, + vnfr_cp_ref.connection_point_params.vm_id) + break + + vnfr_cp_ref.connection_point_params.address = cp.ip_address + vnfr_cp_ref.connection_point_params.port = 50000 + for vdu in vnfr.vdur: + pass + self._log.info("VNFFGR msg to be sent is %s", vnffgr) + return vnffgr + + @asyncio.coroutine + def instantiate(self, xact): + """ Instantiate this VNFFG """ + + self._log.info("Instaniating VNFFGR with vnffgd %s xact %s", + self._vnffgd_msg, xact) + vnffgr_request = yield from self.vnffgr_create_msg() + + try: + vnffgr = self._vnffgmgr.create_vnffgr(vnffgr_request,self._vnffgd_msg.classifier) + except Exception: + self._log.exception("VNFFG instantiation failed") + self._vnffgr_state = VnffgRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFFGR %s failure" % (self.id, vnffgr_request.id)) + + self._vnffgr_state = VnffgRecordState.INSTANTIATION_PENDING + + if vnffgr.operational_status == 'failed': + self._log.error("NS Id:%s VNFFG creation failed for vnffgr id %s", self.id, vnffgr.id) + self._vnffgr_state = VnffgRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFFGR %s failure" % (self.id, vnffgr.id)) + + self._log.info("Instantiated VNFFGR :%s",vnffgr) + self._vnffgr_state = VnffgRecordState.ACTIVE + + self._log.info("Invoking update_nsr_state to update NSR state for NSR ID: %s", self._nsr.id) + yield from self._nsr.update_nsr_state() + + def vnffgr_in_vnffgrm(self): + """ Is there a VNFR record in VNFM """ + if (self._vnffgr_state == VnffgRecordState.ACTIVE or + self._vnffgr_state == VnffgRecordState.INSTANTIATION_PENDING or + self._vnffgr_state == VnffgRecordState.FAILED): + return True + + return False + + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VNFFGR """ + if not self.vnffgr_in_vnffgrm(): + self._log.error("Ignoring terminate request for id %s in state %s", + self.id, self._vnffgr_state) + return + + self._log.info("Terminating VNFFGR id:%s", self.id) + self._vnffgr_state = VnffgRecordState.TERMINATE_PENDING + + self._vnffgmgr.terminate_vnffgr(self._vnffgr_id) + + self._vnffgr_state = VnffgRecordState.TERMINATED + self._log.debug("Terminated VNFFGR id:%s", self.id) + + +class VirtualLinkRecord(object): + """ Virtual Link Records class""" + def __init__(self, dts, log, loop, nsr_name, vld_msg, cloud_account_name): + + self._dts = dts + self._log = log + self._loop = loop + self._nsr_name = nsr_name + self._vld_msg = vld_msg + self._cloud_account_name = cloud_account_name + + self._vlr_id = str(uuid.uuid4()) + self._state = VlRecordState.INIT + + @property + def xpath(self): + """ path for this object """ + return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self._vlr_id) + + @property + def id(self): + """ VLR id """ + return self._vlr_id + + @property + def nsr_name(self): + """ Get NSR name for this VL """ + return self.nsr_name + + @property + def vld_msg(self): + """ Virtual Link Desciptor """ + return self._vld_msg + + @property + def name(self): + """ + Get the name for this VLR. + VLR name is "nsr name:VLD name" + """ + if self.vld_msg.name == "multisite": + # This is a temporary hack to identify manually provisioned inter-site network + return self.vld_msg.name + else: + return self._nsr_name + "." + self.vld_msg.name + + @property + def cloud_account_name(self): + """ Cloud account that this VLR should be created in """ + return self._cloud_account_name + + @staticmethod + def vlr_xpath(vlr): + """ Get the VLR path from VLR """ + return (VirtualLinkRecord.XPATH + "[vlr:id = '{}']").format(vlr.id) + + @property + def vlr_msg(self): + """ Virtual Link Record message for Creating VLR in VNS """ + vld_fields = ["short_name", + "vendor", + "description", + "version", + "type_yang", + "provider_network"] + + vld_copy_dict = {k: v for k, v in self.vld_msg.as_dict().items() + if k in vld_fields} + vlr_dict = {"id": self._vlr_id, + "name": self.name, + "cloud_account": self.cloud_account_name, + } + + vlr_dict.update(vld_copy_dict) + + vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict) + return vlr + + def create_nsr_vlr_msg(self, vnfrs): + """ The VLR message""" + nsr_vlr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vlr() + nsr_vlr.vlr_ref = self._vlr_id + + for conn in self.vld_msg.vnfd_connection_point_ref: + for vnfr in vnfrs: + if (vnfr.vnfd.id == conn.vnfd_id_ref and + vnfr.member_vnf_index == conn.member_vnf_index_ref): + cp_entry = nsr_vlr.vnfr_connection_point_ref.add() + cp_entry.vnfr_id = vnfr.id + cp_entry.connection_point = conn.vnfd_connection_point_ref + + return nsr_vlr + + @asyncio.coroutine + def instantiate(self, xact): + """ Instantiate this VL """ + + self._log.debug("Instaniating VLR key %s, vld %s xact %s", + self.xpath, self._vld_msg, xact) + vlr = None + self._state = VlRecordState.INSTANTIATION_PENDING + with self._dts.transaction(flags=0) as xact: + block = xact.block_create() + block.add_query_create(self.xpath, self.vlr_msg) + self._log.debug("Executing VL create path:%s msg:%s", + self.xpath, self.vlr_msg) + res_iter = yield from block.execute(now=True) + for ent in res_iter: + res = yield from ent + vlr = res.result + + if vlr is None: + self._state = VlRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to empty response" % self.id) + + if vlr.operational_status == 'failed': + self._log.debug("NS Id:%s VL creation failed for vlr id %s", self.id, vlr.id) + self._state = VlRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to VL %s failure" % (self.id, vlr.id)) + + self._log.info("Instantiated VL with xpath %s and vlr:%s", + self.xpath, vlr) + self._state = VlRecordState.ACTIVE + + def vlr_in_vns(self): + """ Is there a VLR record in VNS """ + if (self._state == VlRecordState.ACTIVE or + self._state == VlRecordState.INSTANTIATION_PENDING or + self._state == VlRecordState.FAILED): + return True + + return False + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VL """ + if not self.vlr_in_vns(): + self._log.debug("Ignoring terminate request for id %s in state %s", + self.id, self._state) + return + + self._log.debug("Terminating VL id:%s", self.id) + self._state = VlRecordState.TERMINATE_PENDING + block = xact.block_create() + block.add_query_delete(self.xpath) + yield from block.execute(flags=0, now=True) + self._state = VlRecordState.TERMINATED + self._log.debug("Terminated VL id:%s", self.id) + + +class VnfRecordState(Enum): + """ Vnf Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class VirtualNetworkFunctionRecord(object): + """ Virtual Network Function Record class""" + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop, vnfd, const_vnfd, nsr_name, cloud_account_name): + self._dts = dts + self._log = log + self._loop = loop + self._vnfd = vnfd + self._nsr_name = nsr_name + self._const_vnfd = const_vnfd + self._cloud_account_name = cloud_account_name + + try: + self._config_type = const_vnfd.vnf_configuration.config_type + except: + self._config_type = 'none' + self._config_status = NsrYang.ConfigStates.INIT + self._mon_params = {} + self._state = VnfRecordState.INIT + self._vnfr_id = str(uuid.uuid4()) + self._vnfr = self.vnfr_msg + self._log.debug("Set VNFR {} config type to {}". + format(self.name, self._config_type)) + + @property + def id(self): + """ VNFR id """ + return self._vnfr_id + + @property + def xpath(self): + """ VNFR xpath """ + return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id) + + @property + def mon_param_xpath(self): + """ VNFR monitoring param xpath """ + return self.xpath + "/vnfr:monitoring-param" + + @property + def vnfr(self): + """ VNFR xpath """ + return self._vnfr + + @property + def vnfd(self): + """ vnfd """ + return self._vnfd + + @property + def active(self): + """ Is this VNF actve """ + return True if self._state == VnfRecordState.ACTIVE else False + + @property + def state(self): + """ state of this VNF """ + return self._state + + @property + def member_vnf_index(self): + """ Member VNF index """ + return self._const_vnfd.member_vnf_index + + @property + def nsr_name(self): + """ NSR name""" + return self._nsr_name + + @property + def name(self): + """ Name of this VNFR """ + return self._nsr_name + "." + self.vnfd.name + "." + str(self.member_vnf_index) + + @staticmethod + def vnfr_xpath(vnfr): + """ Get the VNFR path from VNFR """ + return (VirtualNetworkFunctionRecord.XPATH + "[vnfr:id = '{}']").format(vnfr.id) + + @property + def config_type(self): + return self._config_type + + @property + def config_status(self): + self._log.debug("Map VNFR {} config status {} ({})". + format(self.name, self._config_status, self._config_type)) + if self._config_type == 'none': + return 'config_not_needed' + if self._config_status == NsrYang.ConfigStates.CONFIGURED: + return 'configured' + if self._config_status == NsrYang.ConfigStates.FAILED: + return 'failed' + return 'configuring' + + @property + def vnfr_msg(self): + """ VNFR message for this VNFR """ + vnfd_fields = ["short_name", + "vendor", + "description", + "version", + "type_yang"] + vnfd_copy_dict = {k: v for k, v in self._vnfd.as_dict().items() + if k in vnfd_fields} + vnfr_dict = {"id": self.id, + "vnfd_ref": self.vnfd.id, + "name": self.name, + "cloud_account": self._cloud_account_name, + "config_status": self.config_status, + } + vnfr_dict.update(vnfd_copy_dict) + vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict) + vnfr.member_vnf_index_ref = self.member_vnf_index + vnfr.vnf_configuration.from_dict(self._const_vnfd.vnf_configuration.as_dict()) + + if self._vnfd.mgmt_interface.has_field("port"): + vnfr.mgmt_interface.port = self._vnfd.mgmt_interface.port + + # UI expects the monitoring param field to exist + vnfr.monitoring_param = [] + + self._log.debug("Get vnfr_msg for VNFR {} : {}". + format(self.name, vnfr)) + return vnfr + + @property + def msg(self): + """ message for this VNFR """ + return self.id + + @asyncio.coroutine + def update_vnfm(self): + self._vnfr = self.vnfr_msg + # Publish only after VNFM has the VNFR created + if self._config_status != NsrYang.ConfigStates.INIT: + self._log.debug("Send an update to VNFM for VNFR {} with {}". + format(self.name, self.vnfr)) + yield from self._dts.query_update(self.xpath, + 0, + self.vnfr) + + @asyncio.coroutine + def set_config_status(self, status): + self._log.debug("Update VNFR {} from {} ({}) to {}". + format(self.name, self._config_status, + self._config_type, status)) + if self._config_status == NsrYang.ConfigStates.CONFIGURED: + self._log.error("Updating already configured VNFR {}". + format(self.name)) + + if self._config_status != status: + self._config_status = status + self._log.debug("Updated VNFR {} status to {}". + format(self.name, status)) + try: + yield from self.update_vnfm() + except Exception as e: + self._log.error("Exception updating VNFM with new status {} of VNFR {}: {}". + format(status, self.name, e)) + self._log.exception(e) + + def is_configured(self): + if self._config_type == 'none': + return True + + if self._config_status == NsrYang.ConfigStates.CONFIGURED: + return True + return False + + @asyncio.coroutine + def instantiate(self, nsr, xact): + """ Instantiate this VL """ + + self._log.debug("Instaniating VNFR key %s, vnfd %s, xact %s", + self.xpath, self._vnfd, xact) + + self._log.debug("Create VNF with xpath %s and vnfr %s", + self.xpath, self.vnfr) + + self._state = VnfRecordState.INSTANTIATION_PENDING + + def find_vlr_for_cp(conn): + """ Find VLR for the given connection point """ + for vlr in nsr.vlrs: + for vnfd_cp in vlr.vld_msg.vnfd_connection_point_ref: + if (vnfd_cp.vnfd_id_ref == self._vnfd.id and + vnfd_cp.vnfd_connection_point_ref == conn.name and + vnfd_cp.member_vnf_index_ref == self.member_vnf_index): + self._log.debug("Found VLR for cp_name:%s and vnf-index:%d", + conn.name, self.member_vnf_index) + return vlr + return None + + # For every connection point in the VNFD fill in the identifier + for conn_p in self._vnfd.connection_point: + cpr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint() + cpr.name = conn_p.name + cpr.type_yang = conn_p.type_yang + vlr_ref = find_vlr_for_cp(conn_p) + if vlr_ref is None: + msg = "Failed to find VLR for cp = %s" % conn_p.name + self._log.debug("%s", msg) +# raise VirtualNetworkFunctionRecordError(msg) + continue + + cpr.vlr_ref = vlr_ref.id + self.vnfr.connection_point.append(cpr) + self._log.debug("Connection point [%s] added, vnf id=%s vnfd id=%s", + cpr, self.vnfr.id, self.vnfr.vnfd_ref) + + yield from self._dts.query_create(self.xpath, + # 0, # this is sub + 0, # this is sub + self.vnfr) + + self._log.info("Created VNF with xpath %s and vnfr %s", + self.xpath, self.vnfr) + + self._log.info("Instantiated VNFR with xpath %s and vnfd %s, vnfr %s", + self.xpath, self._vnfd, self.vnfr) + + @asyncio.coroutine + def update(self, vnfr): + """ Update this VNFR""" + curr_vnfr = self._vnfr + self._vnfr = vnfr + if vnfr.operational_status == "running": + if curr_vnfr.operational_status != "running": + yield from self.is_active() + elif vnfr.operational_status == "failed": + yield from self.instantiation_failed() + + @asyncio.coroutine + def is_active(self): + """ This VNFR is active """ + self._log.debug("VNFR %s is active", self._vnfr_id) + self._state = VnfRecordState.ACTIVE + + @asyncio.coroutine + def instantiation_failed(self): + """ This VNFR instantiation failed""" + self._log.error("VNFR %s instantiation failed", self._vnfr_id) + self._state = VnfRecordState.FAILED + + def vnfr_in_vnfm(self): + """ Is there a VNFR record in VNFM """ + if (self._state == VnfRecordState.ACTIVE or + self._state == VnfRecordState.INSTANTIATION_PENDING or + self._state == VnfRecordState.FAILED): + return True + + return False + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VNF """ + if not self.vnfr_in_vnfm(): + self._log.debug("Ignoring terminate request for id %s in state %s", + self.id, self._state) + return + + self._log.debug("Terminating VNF id:%s", self.id) + self._state = VnfRecordState.TERMINATE_PENDING + block = xact.block_create() + block.add_query_delete(self.xpath) + yield from block.execute(flags=0, now=True) + self._state = VnfRecordState.TERMINATED + self._log.debug("Terminated VNF id:%s", self.id) + + @asyncio.coroutine + def get_monitoring_param(self): + """ Fetch monitoring params """ + res_iter = yield from self._dts.query_read(self.mon_param_xpath, rwdts.Flag.MERGE) + monp_list = [] + for ent in res_iter: + res = yield from ent + monp = res.result + if monp.id in self._mon_params: + if monp.has_field("value_integer"): + self._mon_params[monp.id].value_integer = monp.value_integer + if monp.has_field("value_decimal"): + self._mon_params[monp.id].value_decimal = monp.value_decimal + if monp.has_field("value_string"): + self._mon_params[monp.id].value_string = monp.value_string + else: + self._mon_params[monp.id] = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_VnfMonitoringParam_MonitoringParam.from_dict(monp.as_dict()) + monp_list.append(self._mon_params[monp.id]) + return monp_list + + +class NetworkServiceStatus(object): + """ A class representing the Network service's status """ + MAX_EVENTS_RECORDED = 10 + """ Network service Status class""" + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self._state = NetworkServiceRecordState.INIT + self._events = deque([]) + + def record_event(self, evt, evt_desc): + """ Record an event """ + self._log.debug("Recording event - evt %s, evt_descr %s len = %s", + evt, evt_desc, len(self._events)) + if len(self._events) >= NetworkServiceStatus.MAX_EVENTS_RECORDED: + self._events.popleft() + self._events.append((int(time.time()), evt, evt_desc)) + + def set_state(self, state): + """ set the state of this status object """ + self._state = state + + def yang_str(self): + """ Return the state as a yang enum string """ + state_to_str_map = {"INIT": "init", + "VL_INIT_PHASE": "vl_init_phase", + "VNF_INIT_PHASE": "vnf_init_phase", + "VNFFG_INIT_PHASE": "vnffg_init_phase", + "RUNNING": "running", + "TERMINATE_RCVD": "terminate_rcvd", + "TERMINATE": "terminate", + "VL_TERMINATE_PHASE": "vl_terminate_phase", + "VNF_TERMINATE_PHASE": "vnf_terminate_phase", + "VNFFG_TERMINATE_PHASE": "vnffg_terminate_phase", + "TERMINATED": "terminated", + "FAILED": "failed"} + return state_to_str_map[self._state.name] + + @property + def state(self): + """ State of this status object """ + return self._state + + @property + def msg(self): + """ Network Service Record as a message""" + event_list = [] + idx = 1 + for entry in self._events: + event = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_OperationalEvents() + event.id = idx + idx += 1 + event.timestamp, event.event, event.description = entry + event_list.append(event) + return event_list + + +class NetworkServiceRecord(object): + """ Network service record """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + def __init__(self, dts, log, loop, nsm, nsm_plugin, config_agent_plugins, nsr_cfg_msg,sdn_account_name): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._nsr_cfg_msg = nsr_cfg_msg + self._nsm_plugin = nsm_plugin + self._config_agent_plugins = config_agent_plugins + self._sdn_account_name = sdn_account_name + + self._nsd = None + self._nsr_msg = None + self._nsr_regh = None + self._vlrs = [] + self._vnfrs = {} + self._vnfds = {} + self._vnffgrs = {} + self._param_pools = {} + self._create_time = int(time.time()) + self._op_status = NetworkServiceStatus(dts, log, loop) + self._mon_params = defaultdict(NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_VnfMonitoringParam) + self._config_status = NsrYang.ConfigStates.CONFIGURING + self._config_update = None + self._job_id = 0 + + # Initalise the state to init + # The NSR moves through the following transitions + # 1. INIT -> VLS_READY once all the VLs in the NSD are created + # 2. VLS_READY - VNFS_READY when all the VNFs in the NSD are created + # 3. VNFS_READY - READY when the NSR is published + + self.set_state(NetworkServiceRecordState.INIT) + + self.substitute_input_parameters = InputParameterSubstitution(self._log) + + @property + def nsm_plugin(self): + """ NSM Plugin """ + return self._nsm_plugin + + @property + def config_agent_plugins(self): + """ Config agent plugin list """ + return self._config_agent_plugins + + def set_state(self, state): + """ Set state for this NSR""" + self._log.debug("Setting state to %s", state) + self._op_status.set_state(state) + + @property + def id(self): + """ Get id for this NSR""" + return self._nsr_cfg_msg.id + + @property + def name(self): + """ Name of this network service record """ + return self._nsr_cfg_msg.name + + @property + def nsd_id(self): + """ Get nsd id for this NSR""" + return self._nsr_cfg_msg.nsd_ref + + @property + def cloud_account_name(self): + return self._nsr_cfg_msg.cloud_account + + @property + def state(self): + """State of this NetworkServiceRecord""" + return self._op_status.state + + def record_event(self, evt, evt_desc, state=None): + """ Record an event """ + self._op_status.record_event(evt, evt_desc) + if state is not None: + self.set_state(state) + + @property + def active(self): + """ Is this NSR active ?""" + return True if self._op_status.state == NetworkServiceRecordState.RUNNING else False + + @property + def vlrs(self): + """ VLRs associated with this NSR""" + return self._vlrs + + @property + def vnfrs(self): + """ VNFRs associated with this NSR""" + return self._vnfrs + + @property + def vnffgrs(self): + """ VNFFGRs associated with this NSR""" + return self._vnffgrs + + @property + def param_pools(self): + """ Parameter value pools associated with this NSR""" + return self._param_pools + + @property + def nsd(self): + """ NSD for this NSR """ + return self._nsd + + @property + def nsd_msg(self): + return self._nsd.msg + + @property + def job_id(self): + ''' Get a new job id for config primitive''' + self._job_id += 1 + return self._job_id + + @property + def config_status(self): + """ Config status for NSR """ + return self._config_status + + def __str__(self): + return "NSR(name={}, nsd_id={}, cloud_account={})".format( + self.name, self.nsd_id, self.cloud_account_name + ) + + @asyncio.coroutine + def invoke_config_agent_plugins(self, method, *args): + # Invoke the methods on all config agent plugins registered + for agent in self._config_agent_plugins: + try: + self._log.debug("Invoke %s on %s" % (method, agent)) + yield from agent.invoke(method, *args) + except Exception: + self._log.warning("Error invoking %s on %s : %s" % + (method, agent, sys.exc_info())) + pass + + @asyncio.coroutine + def instantiate_vls(self, xact): + """ + This function instantiates VLs for every VL in this Network Service + """ + self._log.debug("Instantiating %d VLs in NSD id %s", len(self._vlrs), + self.id) + for vlr in self._vlrs: + yield from self.nsm_plugin.instantiate_vl(self, vlr, xact) + yield from self.invoke_config_agent_plugins('notify_instantiate_vl', self.id, vlr, xact) + + @asyncio.coroutine + def create(self, xact): + """ Create this network service""" + yield from self.invoke_config_agent_plugins('notify_create_nsr', self.id, self._nsd) + # Create virtual links for all the external vnf + # connection points in this NS + yield from self.create_vls() + # Create VNFs in this network service + yield from self.create_vnfs(xact) + # Create VNFFG for network service + yield from self.create_vnffgs() + + self.create_param_pools() + + @asyncio.coroutine + def create_vnffgs(self): + """ This function creates VNFFGs for every VNFFG in the NSD + associated with this NSR""" + + for vnffgd in self.nsd.msg.vnffgd: + self._log.debug("Found vnffgd %s in nsr id %s", vnffgd, self.id) + vnffgr = VnffgRecord(self._dts, + self._log, + self._loop, + self._nsm._vnffgmgr, + self, + self.name, + vnffgd, + self._sdn_account_name + ) + self._vnffgrs[vnffgr.id] = vnffgr + + @asyncio.coroutine + def create_vls(self): + """ This function creates VLs for every VLD in the NSD + associated with this NSR""" + + for vld in self.nsd.msg.vld: + self._log.debug("Found vld %s in nsr id %s", vld, self.id) + vlr = VirtualLinkRecord(self._dts, + self._log, + self._loop, + self.name, + vld, + self.cloud_account_name + ) + self._vlrs.append(vlr) + yield from self.invoke_config_agent_plugins('notify_create_vls', self.id, vld, vlr) + + def is_vnfr_config_agent_managed(self, vnfr): + if vnfr.config_type == 'none': + return False + + for agent in self._config_agent_plugins: + try: + if agent.is_vnfr_managed(vnfr.id): + return True + except Exception as e: + self._log.debug("Check if VNFR {} is config agent managed: {}". + format(vnfr.name, e)) + return False + + @asyncio.coroutine + def create_vnfs(self, xact): + """ + This function creates VNFs for every VNF in the NSD + associated with this NSR + """ + + self._log.debug("Creating %u VNFs associated with this NS id %s", + len(self.nsd.msg.constituent_vnfd), self.id) + + # Fetch the VNFD associated with this VNF + @asyncio.coroutine + def fetch_vnfd(vnfd_ref): + """ Fetch vnfd for the passed vnfd ref """ + return (yield from self._nsm.get_vnfd(vnfd_ref, xact)) + + for const_vnfd in self.nsd.msg.constituent_vnfd: + vnfd = None + vnfd_id = const_vnfd.vnfd_id_ref + if vnfd_id in self._vnfds: + vnfd = self._vnfds[vnfd_id] + else: + vnfd = yield from fetch_vnfd(vnfd_id) + self._vnfds[vnfd_id] = vnfd + if vnfd is None: + self._log.debug("NS instantiation failed for NSR id %s" + "Cannot find VNF descriptor with VNFD id %s", + self.id, vnfd_id) + err = ("Failed NS instantiation-VNF desc not found:" + "nsr id %s, vnfd id %s" % (self.id, vnfd_id)) + + raise NetworkServiceRecordError(err) + + vnfr = VirtualNetworkFunctionRecord(self._dts, + self._log, + self._loop, + vnfd, + const_vnfd, + self.name, + self.cloud_account_name, + ) + if vnfr.id in self._vnfrs: + err = "VNF with VNFR id %s already in vnf list" % (vnfr.id,) + raise NetworkServiceRecordError(err) + + self._vnfrs[vnfr.id] = vnfr + self._nsm.vnfrs[vnfr.id] = vnfr + + yield from self.invoke_config_agent_plugins('notify_create_vnfr', + self.id, + vnfr) + yield from vnfr.set_config_status(NsrYang.ConfigStates.INIT) + + self._log.debug("Added VNFR %s to NSM VNFR list with id %s", + vnfr.name, + vnfr.id) + + def create_param_pools(self): + for param_pool in self.nsd.msg.parameter_pool: + self._log.debug("Found parameter pool %s in nsr id %s", param_pool, self.id) + + start_value = param_pool.range.start_value + end_value = param_pool.range.end_value + if end_value < start_value: + raise NetworkServiceRecordError( + "Parameter pool %s has invalid range (start: {}, end: {})".format( + start_value, end_value + ) + ) + + self._param_pools[param_pool.name] = config_value_pool.ParameterValuePool( + self._log, + param_pool.name, + range(start_value, end_value) + ) + + + @asyncio.coroutine + def fetch_vnfr(self, vnfr_path): + """ Fetch VNFR record """ + vnfr = None + self._log.debug("Fetching VNFR with key %s while instantiating %s", + vnfr_path, self.id) + res_iter = yield from self._dts.query_read(vnfr_path, rwdts.Flag.MERGE) + + for ent in res_iter: + res = yield from ent + vnfr = res.result + + return vnfr + + @asyncio.coroutine + def instantiate_vnfs(self, xact): + """ + This function instantiates VNFs for every VNF in this Network Service + """ + self._log.debug("Instantiating %u VNFs in NS %s", + len(self.nsd.msg.constituent_vnfd), self.id) + for vnf in self._vnfrs.values(): + self._log.debug("Instantiating VNF: %s in NS %s", vnf, self.id) + yield from self.nsm_plugin.instantiate_vnf(self, vnf, xact) + vnfr = yield from self.fetch_vnfr(vnf.xpath) + if vnfr.operational_status == 'failed': + self._log.debug("Instatiation of VNF %s failed", vnf.id) + raise VnfInstantiationFailed("Failed to instantiate vnf %s", vnf.id) + yield from self.invoke_config_agent_plugins('notify_instantiate_vnf', self.id, vnf, xact) + + @asyncio.coroutine + def instantiate_vnffgs(self, xact): + """ + This function instantiates VNFFGs for every VNFFG in this Network Service + """ + self._log.debug("Instantiating %u VNFFGs in NS %s", + len(self.nsd.msg.vnffgd), self.id) + for vnffg in self._vnffgrs.values(): + self._log.debug("Instantiating VNFFG: %s in NS %s", vnffg, self.id) + yield from vnffg.instantiate(xact) + #vnffgr = vnffg.fetch_vnffgr() + #if vnffgr.operational_status == 'failed': + if vnffg.state == VnffgRecordState.FAILED: + self._log.debug("Instatiation of VNFFG %s failed", vnffg.id) + raise VnffgInstantiationFailed("Failed to instantiate vnffg %s", vnffg.id) + + @asyncio.coroutine + def publish(self): + """ This function publishes this NSR """ + self._nsr_msg = self.create_msg() + self._log.debug("Publishing the NSR with xpath %s and nsr %s", + self.nsr_xpath, + self._nsr_msg) + with self._dts.transaction() as xact: + yield from self._nsm.nsr_handler.update(xact, self.nsr_xpath, self._nsr_msg) + self._log.info("Published the NSR with xpath %s and nsr %s", + self.nsr_xpath, + self._nsr_msg) + + @asyncio.coroutine + def unpublish(self, xact): + """ Unpublish this NSR object """ + self._log.debug("Unpublishing Network service id %s", self.id) + yield from self._nsm.nsr_handler.delete(xact, self.nsr_xpath) + + @property + def nsr_xpath(self): + """ Returns the xpath associated with this NSR """ + return( + "D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" + ).format(self.id) + + @staticmethod + def xpath_from_nsr(nsr): + """ Returns the xpath associated with this NSR op data""" + return (NetworkServiceRecord.XPATH + + "[nsr:ns-instance-config-ref = '{}']").format(nsr.id) + + @property + def nsd_xpath(self): + """ Return NSD config xpath.""" + return( + "C,/nsd:nsd-catalog" + + "/nsd:nsd[nsd:id = '{}']" + ).format(self.nsd_id) + + @asyncio.coroutine + def instantiate(self, xact): + """"Instantiates a NetworkServiceRecord. + + This function instantiates a Network service + which involves the following steps, + + * Fetch the NSD associated with NSR from DTS. + * Merge the NSD withe NSR config to begin instantiating the NS. + * Instantiate every VL in NSD by sending create VLR request to DTS. + * Instantiate every VNF in NSD by sending create VNF reuqest to DTS. + * Publish the NSR details to DTS + + Arguments: + nsr: The NSR configuration request containing nsr-id and nsd_ref + xact: The transaction under which this instatiation need to be + completed + + Raises: + NetworkServiceRecordError if the NSR creation fails + + Returns: + No return value + """ + + self._log.debug("Instatiating NS - %s xact - %s", self, xact) + + # Move the state to INIITALIZING + self.set_state(NetworkServiceRecordState.INIT) + + event_descr = "Instatiation Request Received NSR Id:%s" % self.id + self.record_event("instantiating", event_descr) + + # Find the NSD + self._nsd = self._nsm.get_nsd_ref(self.nsd_id) + event_descr = "Fetched NSD with descriptor id %s" % self.nsd_id + self.record_event("nsd-fetched", event_descr) + + if self._nsd is None: + msg = "Failed to fetch NSD with nsd-id [%s] for nsr-id %s" + self._log.debug(msg, self.nsd_id, self.id) + raise NetworkServiceRecordError(self) + + self._log.debug("Got nsd result %s", self._nsd) + + # Sbustitute any input parameters + self.substitute_input_parameters(self._nsd._nsd, self._nsr_cfg_msg) + + # Create the record + yield from self.create(xact) + + # Publish the NSR to DTS + yield from self.publish() + yield from self.invoke_config_agent_plugins('notify_instantiate_ns', self.id) + + @asyncio.coroutine + def do_instantiate(): + """ + Instantiate network service + """ + self._log.debug("Instantiating VLs nsr id [%s] nsd id [%s]", + self.id, self.nsd_id) + + # instantiate the VLs + event_descr = ("Instantiating %s external VLs for NSR id %s" % + (len(self.nsd.msg.vld), self.id)) + self.record_event("begin-external-vls-instantiation", event_descr) + + self.set_state(NetworkServiceRecordState.VL_INIT_PHASE) + + try: + yield from self.instantiate_vls(xact) + except Exception: + self._log.exception("VL instantiation failed") + yield from self.instantiation_failed() + return + + # Publish the NSR to DTS + yield from self.publish() + + event_descr = ("Finished instantiating %s external VLs for NSR id %s" % + (len(self.nsd.msg.vld), self.id)) + self.record_event("end-external-vls-instantiation", event_descr) + + # Move the state to VLS_READY + self.set_state(NetworkServiceRecordState.VNF_INIT_PHASE) + + self._log.debug("Instantiating VNFs ...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + # instantiate the VNFs + event_descr = ("Instantiating %s VNFS for NSR id %s" % + (len(self.nsd.msg.constituent_vnfd), self.id)) + + self.record_event("begin-vnf-instantiation", event_descr) + + try: + yield from self.instantiate_vnfs(xact) + except Exception: + self._log.exception("VNF instantiation failed") + yield from self.instantiation_failed() + return + + self._log.debug(" Finished instantiating %d VNFs for NSR id %s", + len(self.nsd.msg.constituent_vnfd), self.id) + + event_descr = ("Finished instantiating %s VNFs for NSR id %s" % + (len(self.nsd.msg.constituent_vnfd), self.id)) + self.record_event("end-vnf-instantiation", event_descr) + + if len(self.vnffgrs) > 0: + self._log.debug("Instantiating VNFFGRs ...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + # instantiate the VNFs + event_descr = ("Instantiating %s VNFFGS for NSR id %s" % + (len(self.nsd.msg.vnffgd), self.id)) + + self.record_event("begin-vnffg-instantiation", event_descr) + + try: + yield from self.instantiate_vnffgs(xact) + except Exception: + self._log.exception("VNFFG instantiation failed") + yield from self.instantiation_failed() + return + + self._log.debug(" Finished instantiating %d VNFFGs for NSR id %s", + len(self.nsd.msg.vnffgd), self.id) + event_descr = ("Finished instantiating %s VNFFGDs for NSR id %s" % + (len(self.nsd.msg.vnffgd), self.id)) + self.record_event("end-vnffg-instantiation", event_descr) + + + # Give the plugin a chance to deploy the network service now that all + # virtual links and vnfs are instantiated + try: + yield from self.nsm_plugin.deploy(self._nsr_msg) + except Exception: + self._log.exception("NSM deploy failed") + yield from self.instantiation_failed() + return + + self._log.debug("Publishing NSR...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + # Publish the NSR to DTS + yield from self.publish() + + event_descr = ("NSR in running state for NSR id %s" % self.id) + self.record_event("ns-running", event_descr) + + self._log.debug("Published NSR...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + self._loop.create_task(do_instantiate()) + + @asyncio.coroutine + def get_vnfr_config_status(self, vnfr): + if vnfr.is_configured(): + return NsrYang.ConfigStates.CONFIGURED + + if self.is_vnfr_config_agent_managed(vnfr): + # Check if config agent has finished configuring + status = NsrYang.ConfigStates.CONFIGURED + for agent in self._config_agent_plugins: + try: + rc = yield from agent.get_status(vnfr.id) + self._log.debug("VNFR {} config agent status is {}". + format(vnfr.name, rc)) + if rc == 'configuring': + status = NsrYang.ConfigStates.CONFIGURING + break + elif rc == 'failed': + status == NsrYang.ConfigStates.FAILED + break + + except Exception as e: + self._log.debug("Exception in is_vnfr_config_agent_managed for {}: {}". + format(vnfr.name, e)) + status = NsrYang.ConfigStates.CONFIGURING + yield from vnfr.set_config_status(status) + else: + # Rift Configuration Manager + status = vnfr._config_status + + if status in [NsrYang.ConfigStates.CONFIGURED, NsrYang.ConfigStates.FAILED]: + if self.is_vnfr_config_agent_managed(vnfr): + # Re-apply initial config + self._log.debug("VNF active. Apply initial config for vnfr {}".format(vnfr.name)) + yield from self.invoke_config_agent_plugins('apply_initial_config', + vnfr.id, vnfr) + + return status + + @asyncio.coroutine + def update_config_status(self): + ''' Check if all VNFRs are configured ''' + self._log.debug("Check all VNFRs are configured for ns %s" % self.name) + + if self._config_status in [NsrYang.ConfigStates.CONFIGURED, NsrYang.ConfigStates.FAILED]: + return + + # Handle reload scenarios + for vnfr in self._vnfrs.values(): + if self.is_vnfr_config_agent_managed(vnfr): + yield from vnfr.set_config_status(NsrYang.ConfigStates.CONFIGURING) + + while True: + config_status = NsrYang.ConfigStates.CONFIGURED + for vnfr in self._vnfrs.values(): + config_status = yield from self.get_vnfr_config_status(vnfr) + if config_status == NsrYang.ConfigStates.CONFIGURING: + break + self._config_status = config_status + if config_status in [NsrYang.ConfigStates.CONFIGURED, NsrYang.ConfigStates.FAILED]: + self._log.debug("Publish config status for NS {}: {}". + format(self.name, config_status)) + yield from self.publish() + return + else: + yield from asyncio.sleep(10, loop=self._loop) + + + @asyncio.coroutine + def is_active(self): + """ This NS is active """ + self._log.debug("Network service %s is active ", self.id) + self.set_state(NetworkServiceRecordState.RUNNING) + + # Publish the NSR to DTS + yield from self.publish() + yield from self._nsm.so_obj.notify_nsr_up(self.id) + yield from self.invoke_config_agent_plugins('notify_nsr_active', self.id, self._vnfrs) + self._config_update = self._loop.create_task(self.update_config_status()) + self._log.debug("Created tasklet %s" % self._config_update) + + @asyncio.coroutine + def instantiation_failed(self): + """ The NS instantiation failed""" + self._log.debug("Network service %s instantiation failed", self.id) + self.set_state(NetworkServiceRecordState.FAILED) + + event_descr = "Instantiation of NS %s failed" % self.id + self.record_event("ns-failed", event_descr) + + # Publish the NSR to DTS + yield from self.publish() + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate a NetworkServiceRecord.""" + def terminate_vnfrs(xact): + """ Terminate VNFRS in this network service """ + self._log.debug("Terminating VNFs in network service %s", self.id) + for vnfr in self.vnfrs.values(): + yield from self.nsm_plugin.terminate_vnf(vnfr, xact) + yield from self.invoke_config_agent_plugins('notify_terminate_vnf', self.id, vnfr, xact) + + def terminate_vnffgrs(xact): + """ Terminate VNFFGRS in this network service """ + self._log.debug("Terminating VNFFGRs in network service %s", self.id) + for vnffgr in self.vnffgrs.values(): + yield from vnffgr.terminate(xact) + + + def terminate_vlrs(xact): + """ Terminate VLRs in this netork service """ + self._log.debug("Terminating VLs in network service %s", self.id) + for vlr in self.vlrs: + yield from self.nsm_plugin.terminate_vl(vlr, xact) + yield from self.invoke_config_agent_plugins('notify_terminate_vl', self.id, vlr, xact) + + self._log.debug("Terminating network service id %s", self.id) + + # Move the state to TERMINATE + self.set_state(NetworkServiceRecordState.TERMINATE) + event_descr = "Terminate being processed for NS Id:%s" % self.id + self.record_event("terminate", event_descr) + + # Move the state to VNF_TERMINATE_PHASE + self._log.debug("Terminating VNFFGs in NS ID: %s",self.id) + self.set_state(NetworkServiceRecordState.VNFFG_TERMINATE_PHASE) + event_descr = "Terminating VNFFGS in NS Id:%s" % self.id + self.record_event("terminating-vnffgss", event_descr) + yield from terminate_vnffgrs(xact) + + # Move the state to VNF_TERMINATE_PHASE + self.set_state(NetworkServiceRecordState.VNF_TERMINATE_PHASE) + event_descr = "Terminating VNFS in NS Id:%s" % self.id + self.record_event("terminating-vnfs", event_descr) + yield from terminate_vnfrs(xact) + + # Move the state to VL_TERMINATE_PHASE + self.set_state(NetworkServiceRecordState.VL_TERMINATE_PHASE) + event_descr = "Terminating VLs in NS Id:%s" % self.id + self.record_event("terminating-vls", event_descr) + yield from terminate_vlrs(xact) + + yield from self.nsm_plugin.terminate_ns(self, xact) + + # Move the state to TERMINATED + self.set_state(NetworkServiceRecordState.TERMINATED) + event_descr = "Terminated NS Id:%s" % self.id + self.record_event("terminated", event_descr) + self._loop.create_task(self._nsm.so_obj.notify_nsr_down(self.id)) + yield from self.invoke_config_agent_plugins('notify_terminate_ns', self.id) + self._log.debug("Checking tasklet %s" % (self._config_update)) + if self._config_update: + self._config_update.print_stack() + self._config_update.cancel() + self._config_update = None + + def enable(self): + """"Enable a NetworkServiceRecord.""" + pass + + def disable(self): + """"Disable a NetworkServiceRecord.""" + pass + + def map_config_status(self): + self._log.debug("Config status for ns {} is {}". + format(self.name, self._config_status)) + if self._config_status == NsrYang.ConfigStates.CONFIGURING: + return 'configuring' + if self._config_status == NsrYang.ConfigStates.FAILED: + return 'failed' + return 'configured' + + def create_msg(self): + """ The network serice record as a message """ + nsr_dict = {"ns_instance_config_ref": self.id} + nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict(nsr_dict) + nsr.cloud_account = self.cloud_account_name + nsr.name_ref = self.name + nsr.nsd_name_ref = self.nsd.name + nsr.operational_events = self._op_status.msg + nsr.operational_status = self._op_status.yang_str() + nsr.config_status = self.map_config_status() + nsr.create_time = self._create_time + for vnfr_id in self.vnfrs: + nsr.constituent_vnfr_ref.append(self.vnfrs[vnfr_id].msg) + for vlr in self.vlrs: + nsr.vlr.append(vlr.create_nsr_vlr_msg(self.vnfrs.values())) + for vnffgr in self.vnffgrs.values(): + nsr.vnffgr.append(vnffgr.fetch_vnffgr()) + return nsr + + def all_vnfs_active(self): + """ Are all VNFS in this NS active? """ + for _, vnfr in self.vnfrs.items(): + if vnfr.active is not True: + return False + return True + + @asyncio.coroutine + def update_nsr_state(self): + """ Re-evaluate this NS's state """ + curr_state = self._op_status.state + new_state = NetworkServiceRecordState.RUNNING + self._log.info("Received update_nsr_state for nsr: %s, curr-state: %s",self.id,curr_state) + #Check all the VNFRs are present + for _, vnfr in self.vnfrs.items(): + if vnfr.state == VnfRecordState.ACTIVE: + pass + elif vnfr.state == VnfRecordState.FAILED: + event_descr = "Instantiation of VNF %s failed" % vnfr.id + self.record_event("vnf-failed", event_descr) + new_state = NetworkServiceRecordState.FAILED + break + else: + new_state = curr_state + + # If new state is RUNNIG; check VNFFGRs are also active + if new_state == NetworkServiceRecordState.RUNNING: + for _, vnffgr in self.vnffgrs.items(): + self._log.info("Checking vnffgr state for nsr %s is: %s",self.id,vnffgr.state) + if vnffgr.state == VnffgRecordState.ACTIVE: + pass + elif vnffgr.state == VnffgRecordState.FAILED: + event_descr = "Instantiation of VNFFGR %s failed" % vnffgr.id + self.record_event("vnffg-failed", event_descr) + new_state = NetworkServiceRecordState.FAILED + break + else: + self._log.info("VNFFGR %s in NSR %s is still not active; current state is: %s", + vnffgr.id, self.state, vnffgr.state) + new_state = curr_state + + if new_state != curr_state: + self._log.debug("Changing state of Network service %s from %s to %s", + self.id, curr_state, new_state) + if new_state == NetworkServiceRecordState.RUNNING: + yield from self.is_active() + elif new_state == NetworkServiceRecordState.FAILED: + yield from self.instantiation_failed() + + @asyncio.coroutine + def get_monitoring_param(self): + """ Get monitoring params for this network service """ + vnfrs = list(self.vnfrs.values()) + monp_list = [] + for vnfr in vnfrs: + self._mon_params[vnfr.id].vnfr_id_ref = vnfr.id + self._mon_params[vnfr.id].monitoring_param = yield from vnfr.get_monitoring_param() + monp_list.append(self._mon_params[vnfr.id]) + + return monp_list + + +class InputParameterSubstitution(object): + """ + This class is responsible for substituting input parameters into an NSD. + """ + + def __init__(self, log): + """Create an instance of InputParameterSubstitution + + Arguments: + log - a logger for this object to use + + """ + self.log = log + + def __call__(self, nsd, nsr_config): + """Substitutes input parameters from the NSR config into the NSD + + This call modifies the provided NSD with the input parameters that are + contained in the NSR config. + + Arguments: + nsd - a GI NSD object + nsr_config - a GI NSR config object + + """ + if nsd is None or nsr_config is None: + return + + # Create a lookup of the xpath elements that this descriptor allows + # to be modified + optional_input_parameters = set() + for input_parameter in nsd.input_parameter_xpath: + optional_input_parameters.add(input_parameter.xpath) + + # Apply the input parameters to the descriptor + if nsr_config.input_parameter: + for param in nsr_config.input_parameter: + if param.xpath not in optional_input_parameters: + msg = "tried to set an invalid input parameter ({})" + self.log.error(msg.format(param.xpath)) + + continue + + self.log.debug( + "input-parameter:{} = {}".format( + param.xpath, + param.value, + ) + ) + + try: + xpath.setxattr(nsd, param.xpath, param.value) + + except Exception as e: + self.log.exception(e) + + +class NetworkServiceDescriptor(object): + """ + Network service descriptor class + """ + + def __init__(self, dts, log, loop, nsd): + self._dts = dts + self._log = log + self._loop = loop + + self._nsd = nsd + self._ref_count = 0 + + @property + def id(self): + """ Returns nsd id """ + return self._nsd.id + + @property + def name(self): + """ Returns name of nsd """ + return self._nsd.name + + @property + def ref_count(self): + """ Returns reference count""" + return self._ref_count + + def in_use(self): + """ Returns whether nsd is in use or not """ + return True if self.ref_count > 0 else False + + def ref(self): + """ Take a reference on this object """ + self._ref_count += 1 + + def unref(self): + """ Release reference on this object """ + if self.ref_count < 1: + msg = ("Unref on a NSD object - nsd id %s, ref_count = %s" % + (self.id, self.ref_count)) + self._log.critical(msg) + raise NetworkServiceDescriptorError(msg) + self._ref_count -= 1 + + @property + def msg(self): + """ Return the message associated with this NetworkServiceDescriptor""" + return self._nsd + + @staticmethod + def path_for_id(nsd_id): + """ Return path for the passed nsd_id""" + return "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".format(nsd_id) + + def path(self): + """ Return the message associated with this NetworkServiceDescriptor""" + return NetworkServiceDescriptor.path_for_id(self.id) + + def update(self, nsd): + """ Update the NSD descriptor """ + if self.in_use(): + self._log.error("Cannot update descriptor %s in use", self.id) + raise NetworkServiceDescriptorError("Cannot update descriptor in use %s" % self.id) + self._nsd = nsd + + +class NsdDtsHandler(object): + """ The network service descriptor DTS handler """ + XPATH = "C,/nsd:nsd-catalog/nsd:nsd" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for Nsd create/update/delete/read requests from dts """ + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got nsd apply cfg (xact:%s) (action:%s)", + xact, action) + # Create/Update an NSD record + for cfg in self._regh.get_xact_elements(xact): + # Only interested in those NSD cfgs whose ID was received in prepare callback + if cfg.id in acg.scratch['nsds']: + self._nsm.update_nsd(cfg) + + del acg._scratch['nsds'][:] + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for NSD config """ + + self._log.info("Got nsd prepare - config received nsd id %s, msg %s", + msg.id, msg) + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + # Delete an NSD record + self._log.debug("Deleting NSD with id %s", msg.id) + if self._nsm.nsd_in_use(msg.id): + self._log.debug("Cannot delete NSD in use - %s", msg.id) + err = "Cannot delete an NSD in use - %s" % msg.id + raise NetworkServiceDescriptorRefCountExists(err) + self._nsm.delete_nsd(msg.id) + else: + # Handle actual adds/updates in apply_callback, + # just check if NSD in use in prepare_callback + if self._nsm.nsd_in_use(msg.id): + self._log.debug("Cannot modify an NSD in use - %s", msg.id) + err = "Cannot modify an NSD in use - %s" % msg.id + raise NetworkServiceDescriptorRefCountExists(err) + + # Add this NSD to scratch to create/update in apply callback + acg._scratch['nsds'].append(msg.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug( + "Registering for NSD config using xpath: %s", + NsdDtsHandler.XPATH, + ) + + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + # Need a list in scratch to store NSDs to create/update later + acg._scratch['nsds'] = list() + self._regh = acg.register( + xpath=NsdDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class VnfdDtsHandler(object): + """ DTS handler for VNFD config changes """ + XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._regh = None + + @property + def regh(self): + """ DTS registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for VNFD configuration""" + + @asyncio.coroutine + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got NSM VNFD apply (xact: %s) (action: %s)(scr: %s)", + xact, action, scratch) + + # Create/Update a VNFD record + for cfg in self._regh.get_xact_elements(xact): + # Only interested in those VNFD cfgs whose ID was received in prepare callback + if cfg.id in acg.scratch['vnfds']: + self._nsm.update_vnfd(cfg) + + for cfg in self._regh.elements: + if cfg.id in acg.scratch['deleted_vnfds']: + yield from self._nsm.delete_vnfd(cfg.id) + + del acg._scratch['vnfds'][:] + del acg._scratch['deleted_vnfds'][:] + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ on prepare callback """ + self._log.debug("Got on prepare for VNFD (path: %s) (action: %s) (msg: %s)", + ks_path.to_xpath(RwNsmYang.get_schema()), xact_info.query_action, msg) + # RIFT-10161 + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + # Handle deletes in prepare_callback, but adds/updates in apply_callback + if fref.is_field_deleted(): + self._log.debug("Adding msg to deleted field") + acg._scratch['deleted_vnfds'].append(msg.id) + else: + # Add this VNFD to scratch to create/update in apply callback + acg._scratch['vnfds'].append(msg.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug( + "Registering for VNFD config using xpath: %s", + VnfdDtsHandler.XPATH, + ) + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + # Need a list in scratch to store VNFDs to create/update later + acg._scratch['vnfds'] = list() + acg._scratch['deleted_vnfds'] = list() + self._regh = acg.register( + xpath=VnfdDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class NsrDtsHandler(object): + """ The network service DTS handler """ + XPATH = "C,/nsr:ns-instance-config/nsr:nsr" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for Nsr create/update/delete/read requests from dts """ + + def on_init(acg, xact, scratch): + """ On init callback """ + + def on_deinit(acg, xact, scratch): + """ On deinit callback """ + pass + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)", + xact, action, scratch) + + def handle_create_nsr(): + """ Handle create nsr requests """ + # Do some validations + if not msg.has_field("nsd_ref"): + err = "NSD reference not provided" + self._log.error(err) + raise NetworkServiceRecordError(err) + + self._log.info("Creating NetworkServiceRecord %s from nsd_id %s", + msg.id, msg.nsd_ref) + + nsr = self.nsm.create_nsr(msg) + return nsr + + @asyncio.coroutine + def begin_instantiation(nsr, xact): + """ Begin instantiation """ + self._log.info("Beginning NS instantiation: %s", nsr.id) + yield from self._nsm.instantiate_ns(nsr.id, xact) + + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + xact = None + + for msg in self.regh.get_xact_elements(xact): + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + self._log.error("Ignoring delete in apply - msg:%s", msg) + continue + + if msg.id not in self._nsm.nsrs: + nsr = handle_create_nsr() + self._loop.create_task(begin_instantiation(nsr, xact)) + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare calllback from DTS for NSR """ + + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + self._log.debug( + "Got Nsr prepare callback (xact:%s info: %s, %s:%s)", + xact, xact_info, xpath, msg) + + @asyncio.coroutine + def delete_instantiation(ns_id): + """ Delete instantiation """ + with self._dts.transaction() as xact: + yield from self._nsm.terminate_ns(ns_id, xact) + + def handle_delete_nsr(): + """ Handle delete NSR requests """ + self._log.info("Delete req for NSR Id: %s received", msg.id) + # Terminate the NSR instance + nsr = self._nsm.get_ns_by_nsr_id(msg.id) + + nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD) + event_descr = "Terminate rcvd for NS Id:%s" % msg.id + nsr.record_event("terminate-rcvd", event_descr) + + self._loop.create_task(delete_instantiation(msg.id)) + + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + self._log.info("Delete NSR received in prepare to terminate NS:%s", msg.id) + try: + handle_delete_nsr() + except Exception: + self._log.exception("Failed to terminate NS:%s", msg.id) + + else: + # Ensure the Cloud account has been specified if this is an NSR create + if msg.id not in self._nsm.nsrs: + if not msg.has_field("cloud_account"): + raise NsrInstantiationFailed("Cloud account not specified in NSR") + + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug("Registering for NSR config using xpath: %s", + NsrDtsHandler.XPATH,) + + acg_hdl = rift.tasklets.AppConfGroup.Handler( + on_init=on_init, + on_deinit=on_deinit, + on_apply=on_apply, + ) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + self._regh = acg.register(xpath=NsrDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class NsrOpDataDtsHandler(object): + """ The network service op data DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._regh = None + + @property + def regh(self): + """ Return the registration handle""" + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for Nsr op data publisher registration""" + self._log.debug("Registering Nsr op data path %s as publisher", + NsrOpDataDtsHandler.XPATH) + + hdl = rift.tasklets.DTS.RegistrationHandler() + handlers = rift.tasklets.Group.Handler() + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.FILE_DATASTORE) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create an NS record in DTS with the path and message + """ + self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg, flags=rwdts.Flag.REPLACE): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh) + self.regh.update_element(path, msg, flags) + self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Deleting NSR xact:%s, path:%s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted NSR xact:%s, path:%s", xact, path) + + +class VnfrDtsHandler(object): + """ The virtual network service DTS handler """ + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for vnfr create/update/delete/ advises from dts """ + + def on_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vnfr commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + self._log.debug( + "Got vnfr on_prepare cb (xact_info: %s, action: %s): %s:%s", + xact_info, action, ks_path, msg + ) + + if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE: + yield from self._nsm.update_vnfr(msg) + elif action == rwdts.QueryAction.DELETE: + schema = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + self._log.debug("Deleting VNFR with id %s", path_entry.key00.id) + self._nsm.delete_vnfr(path_entry.key00.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath) + + self._log.debug("Registering for VNFR using xpath: %s", + VnfrDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit, + on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=VnfrDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.SUBSCRIBER),) + + +class NsMonitorDtsHandler(object): + """ The Network service Monitor DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr/nsr:vnf-monitoring-param" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @staticmethod + def vnf_mon_param_xpath(nsr_id, vnfr_id): + """ VNF monitoring xpath """ + return ("D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" + + "/nsr:vnf-monitoring-param" + + "[nsr:vnfr-id-ref = '{}']").format(nsr_id, vnfr_id) + + @asyncio.coroutine + def register(self): + """ Register for NS monitoring read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + if action == rwdts.QueryAction.READ: + schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + try: + monp_list = yield from self._nsm.get_monitoring_param( + path_entry.key00.ns_instance_config_ref) + for nsr_id, vnf_monp_list in monp_list: + for monp in vnf_monp_list: + vnf_xpath = NsMonitorDtsHandler.vnf_mon_param_xpath( + nsr_id, + monp.vnfr_id_ref + ) + xact_info.respond_xpath(rwdts.XactRspCode.MORE, + vnf_xpath, + monp) + except Exception: + self._log.exception("##### Caught exception while collection mon params #####") + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=NsMonitorDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER, + ) + + +class NsdRefCountDtsHandler(object): + """ The NSD Ref Count DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for NSD ref count read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + + if action == rwdts.QueryAction.READ: + schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount.schema() + path_entry = schema.keyspec_to_entry(ks_path) + nsd_list = yield from self._nsm.get_nsd_refcount(path_entry.key00.nsd_id_ref) + for xpath, msg in nsd_list: + xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE, + xpath=xpath, + msg=msg) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + raise NetworkServiceRecordError("Not supported operation %s" % action) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=NsdRefCountDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER,) + + +class NsManagerRPCHandler(object): + """ The Network service Monitor DTS handler """ + EXEC_NS_CONF_XPATH = "I,/nsr:exec-ns-config-primitive" + EXEC_NS_CONF_O_XPATH = "O,/nsr:exec-ns-config-primitive" + + GET_NS_CONF_XPATH = "I,/nsr:get-ns-config-primitive-values" + GET_NS_CONF_O_XPATH = "O,/nsr:get-ns-config-primitive-values" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._ns_regh = None + self._vnf_regh = None + self._get_ns_conf_regh = None + + self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop, nsm) + + @property + def reghs(self): + """ Return registration handles """ + return (self._ns_regh, self._vnf_regh, self._get_ns_conf_regh) + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + def prepare_meta(self, rpc_ip): + + try: + nsr_id = rpc_ip.nsr_id_ref + nsr = self._nsm.nsrs[nsr_id] + vnfrs = {} + for vnf in rpc_ip.vnf_list: + vnfr_id = vnf.vnfr_id_ref + vnfrs[vnfr_id] = self._nsm.vnfrs[vnfr_id] + + return nsr, vnfrs + except KeyError as e: + raise ValueError("Record not found", str(e)) + + def _get_ns_cfg_primitive(self, nsr_id, ns_cfg_name): + try: + nsr = self._nsm.nsrs[nsr_id] + except KeyError: + raise ValueError("NSR id %s not found" % nsr_id) + + nsd_msg = self._nsm.get_nsd(nsr.nsd_id).msg + + def get_nsd_cfg_prim(name): + for ns_cfg_prim in nsd_msg.config_primitive: + if ns_cfg_prim.name == name: + return ns_cfg_prim + + raise ValueError("Could not find ns_cfg_prim %s in nsr id %s" % (name, nsr_id)) + + ns_cfg_prim_msg = get_nsd_cfg_prim(ns_cfg_name) + ret_cfg_prim_msg = ns_cfg_prim_msg.deep_copy() + + return ret_cfg_prim_msg + + def _get_vnf_primitive(self, nsr_id, vnf_index, primitive_name): + try: + nsr = self._nsm.nsrs[nsr_id] + except KeyError: + raise ValueError("NSR id %s not found" % nsr_id) + + nsd_msg = self._nsm.get_nsd(nsr.nsd_id).msg + for vnf in nsd_msg.constituent_vnfd: + if vnf.member_vnf_index != vnf_index: + continue + + for primitive in vnf.vnf_configuration.config_primitive: + if primitive.name == primitive_name: + return primitive + + raise ValueError("Could not find vnf index %s primitive %s in nsr id %s" % + (vnf_index, primitive_name, nsr_id)) + + @asyncio.coroutine + def register(self): + """ Register for NS monitoring read from dts """ + yield from self.job_manager.register() + + @asyncio.coroutine + def on_ns_config_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts exec-ns-config-primitive""" + assert action == rwdts.QueryAction.RPC + rpc_ip = msg + rpc_op = NsrYang.YangOutput_Nsr_ExecNsConfigPrimitive() + + ns_cfg_prim_name = rpc_ip.name + nsr_id = rpc_ip.nsr_id_ref + nsr = self._nsm.nsrs[nsr_id] + + nsd_cfg_prim_msg = self._get_ns_cfg_primitive(nsr_id, ns_cfg_prim_name) + + def find_nsd_vnf_prim_param_pool(vnf_index, vnf_prim_name, param_name): + for vnf_prim_group in nsd_cfg_prim_msg.vnf_primitive_group: + if vnf_prim_group.member_vnf_index_ref != vnf_index: + continue + + for vnf_prim in vnf_prim_group.primitive: + if vnf_prim.name != vnf_prim_name: + continue + + for pool_param in vnf_prim.pool_parameters: + if pool_param.name != param_name: + continue + + try: + nsr_param_pool = nsr.param_pools[pool_param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % vnf_prim.parameter_pool) + + self._log.debug("Found parameter pool %s for vnf index(%s), vnf_prim_name(%s), param_name(%s)", + nsr_param_pool, vnf_index, vnf_prim_name, param_name) + return nsr_param_pool + + self._log.debug("Could not find parameter pool for vnf index(%s), vnf_prim_name(%s), param_name(%s)", + vnf_index, vnf_prim_name, param_name) + return None + + rpc_op.nsr_id_ref = nsr_id + rpc_op.name = ns_cfg_prim_name + + nsr, vnfrs = self.prepare_meta(rpc_ip) + rpc_op.job_id = nsr.job_id + + # Give preference to user defined script. + if nsd_cfg_prim_msg.has_field("user_defined_script"): + rpc_ip.user_defined_script = nsd_cfg_prim_msg.user_defined_script + + + tasks = [] + for config_plugin in self.nsm.config_agent_plugins: + task = yield from config_plugin.apply_config( + rpc_ip, + nsr, + vnfrs) + tasks.append(task) + + self.job_manager.add_job(rpc_op, tasks) + else: + for vnf in rpc_ip.vnf_list: + vnf_op = rpc_op.vnf_out_list.add() + vnf_member_idx = vnf.member_vnf_index_ref + vnfr_id = vnf.vnfr_id_ref + vnf_op.vnfr_id_ref = vnfr_id + vnf_op.member_vnf_index_ref = vnf_member_idx + for primitive in vnf.vnf_primitive: + op_primitive = vnf_op.vnf_out_primitive.add() + op_primitive.name = primitive.name + op_primitive.execution_id = '' + op_primitive.execution_status = 'completed' + self._log.debug("%s:%s Got primitive %s:%s", + nsr_id, vnf.member_vnf_index_ref, primitive.name, primitive.parameter) + + nsd_vnf_primitive = self._get_vnf_primitive( + nsr_id, + vnf_member_idx, + primitive.name + ) + for param in nsd_vnf_primitive.parameter: + if not param.has_field("parameter_pool"): + continue + + try: + nsr_param_pool = nsr.param_pools[param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % param.parameter_pool) + nsr_param_pool.add_used_value(param.value) + + for config_plugin in self.nsm.config_agent_plugins: + yield from config_plugin.vnf_config_primitive(nsr_id, + vnfr_id, + primitive, + op_primitive) + + self.job_manager.add_job(rpc_op) + + # Get NSD + # Find Config Primitive + # For each vnf-primitive with parameter pool + # Find parameter pool + # Add used value to the pool + self._log.debug("RPC output: {}".format(rpc_op)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + NsManagerRPCHandler.EXEC_NS_CONF_O_XPATH, + rpc_op) + + @asyncio.coroutine + def on_get_ns_config_values_prepare(xact_info, action, ks_path, msg): + assert action == rwdts.QueryAction.RPC + nsr_id = msg.nsr_id_ref + nsr = self._nsm.nsrs[nsr_id] + cfg_prim_name = msg.name + + rpc_op = NsrYang.YangOutput_Nsr_GetNsConfigPrimitiveValues() + + ns_cfg_prim_msg = self._get_ns_cfg_primitive(nsr_id, cfg_prim_name) + + # Get pool values for NS-level parameters + for ns_param in ns_cfg_prim_msg.parameter: + if not ns_param.has_field("parameter_pool"): + continue + + try: + nsr_param_pool = nsr.param_pools[ns_param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % ns_param.parameter_pool) + + new_ns_param = rpc_op.ns_parameter.add() + new_ns_param.name = ns_param.name + new_ns_param.value = str(nsr_param_pool.get_next_unused_value()) + + + # Get pool values for NS-level parameters + for vnf_prim_group in ns_cfg_prim_msg.vnf_primitive_group: + rsp_prim_group = rpc_op.vnf_primitive_group.add() + rsp_prim_group.member_vnf_index_ref = vnf_prim_group.member_vnf_index_ref + if vnf_prim_group.has_field("vnfd_id_ref"): + rsp_prim_group.vnfd_id_ref = vnf_prim_group.vnfd_id_ref + + for index, vnf_prim in enumerate(vnf_prim_group.primitive): + rsp_prim = rsp_prim_group.primitive.add() + rsp_prim.name = vnf_prim.name + rsp_prim.index = index + vnf_primitive = self._get_vnf_primitive( + nsr_id, + vnf_prim_group.member_vnf_index_ref, + vnf_prim.name + ) + for param in vnf_primitive.parameter: + if not param.has_field("parameter_pool"): + continue + + try: + nsr_param_pool = nsr.param_pools[param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % vnf_prim.parameter_pool) + + vnf_param = rsp_prim.parameter.add() + vnf_param.name = param.name + vnf_param.value = str(nsr_param_pool.get_next_unused_value()) + + self._log.debug("RPC output: {}".format(rpc_op)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + NsManagerRPCHandler.GET_NS_CONF_O_XPATH, rpc_op) + + hdl_ns = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_ns_config_prepare,) + hdl_ns_get = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_get_ns_config_values_prepare,) + + with self._dts.group_create() as group: + self._ns_regh = group.register(xpath=NsManagerRPCHandler.EXEC_NS_CONF_XPATH, + handler=hdl_ns, + flags=rwdts.Flag.PUBLISHER, + ) + self._get_ns_conf_regh = group.register(xpath=NsManagerRPCHandler.GET_NS_CONF_XPATH, + handler=hdl_ns_get, + flags=rwdts.Flag.PUBLISHER, + ) + + +class NsManager(object): + """ The Network Service Manager class""" + def __init__(self, dts, log, loop, + nsr_handler, vnfr_handler, vlr_handler, cloud_plugin_selector,vnffgmgr): + self._dts = dts + self._log = log + self._loop = loop + self._nsr_handler = nsr_handler + self._vnfr_pub_handler = vnfr_handler + self._vlr_pub_handler = vlr_handler + self._vnffgmgr = vnffgmgr + + self._cloud_plugin_selector = cloud_plugin_selector + + self._nsrs = {} + self._nsds = {} + self._vnfds = {} + self._vnfrs = {} + + self._so_obj = conman.ROServiceOrchConfig(log, loop, dts, self) + + self._nsd_dts_handler = NsdDtsHandler(dts, log, loop, self) + self._vnfd_dts_handler = VnfdDtsHandler(dts, log, loop, self) + + self._dts_handlers = [self._nsd_dts_handler, + VnfrDtsHandler(dts, log, loop, self), + NsMonitorDtsHandler(dts, log, loop, self), + NsdRefCountDtsHandler(dts, log, loop, self), + NsrDtsHandler(dts, log, loop, self), + self._vnfd_dts_handler, + NsManagerRPCHandler(dts, log, loop, self), + self._so_obj] + + self._config_agent_plugins = [] + + @property + def log(self): + """ Log handle """ + return self._log + + @property + def loop(self): + """ Loop """ + return self._loop + + @property + def dts(self): + """ DTS handle """ + return self._dts + + @property + def nsr_handler(self): + """" NSR handler """ + return self._nsr_handler + + @property + def so_obj(self): + """" So Obj handler """ + return self._so_obj + + @property + def nsrs(self): + """ NSRs in this NSM""" + return self._nsrs + + @property + def nsds(self): + """ NSDs in this NSM""" + return self._nsds + + @property + def vnfds(self): + """ VNFDs in this NSM""" + return self._vnfds + + @property + def vnfrs(self): + """ VNFRs in this NSM""" + return self._vnfrs + + @property + def nsr_pub_handler(self): + """ NSR publication handler """ + return self._nsr_handler + + @property + def vnfr_pub_handler(self): + """ VNFR publication handler """ + return self._vnfr_pub_handler + + @property + def vlr_pub_handler(self): + """ VLR publication handler """ + return self._vlr_pub_handler + + @property + def config_agent_plugins(self): + """ Config agent plugins""" + return self._config_agent_plugins + + def set_config_agent_plugin(self, plugin_instance): + """ Sets the plugin to use for the NSM config agents""" + self._log.debug("Set NSM config agent plugin instance: %s", plugin_instance) + if plugin_instance not in self._config_agent_plugins: + self._config_agent_plugins.append(plugin_instance) + + @asyncio.coroutine + def register(self): + """ Register all static DTS handlers """ + for dts_handle in self._dts_handlers: + yield from dts_handle.register() + + def get_ns_by_nsr_id(self, nsr_id): + """ get NSR by nsr id """ + if nsr_id not in self._nsrs: + raise NetworkServiceRecordError("NSR id %s not found" % nsr_id) + + return self._nsrs[nsr_id] + + def create_nsr(self, nsr_msg): + """ Create an NSR instance """ + if nsr_msg.id in self._nsrs: + msg = "NSR id %s already exists" % nsr_msg.id + self._log.error(msg) + raise NetworkServiceRecordError(msg) + + self._log.info("Create NetworkServiceRecord nsr id %s from nsd_id %s", + nsr_msg.id, + nsr_msg.nsd_ref) + + nsm_plugin = self._cloud_plugin_selector.get_cloud_account_plugin_instance( + nsr_msg.cloud_account + ) + sdn_account_name = self._cloud_plugin_selector.get_cloud_account_sdn_name(nsr_msg.cloud_account) + + nsr = NetworkServiceRecord(self._dts, + self._log, + self._loop, + self, + nsm_plugin, + self._config_agent_plugins, + nsr_msg, + sdn_account_name + ) + self._nsrs[nsr_msg.id] = nsr + nsm_plugin.create_nsr(nsr_msg, self.get_nsd(nsr_msg.nsd_ref).msg) + + return nsr + + def delete_nsr(self, nsr_id): + """ + Delete NSR with the passed nsr id + """ + del self._nsrs[nsr_id] + + @asyncio.coroutine + def instantiate_ns(self, nsr_id, xact): + """ Instantiate an NS instance """ + self._log.debug("Instatiating Network service id %s", nsr_id) + if nsr_id not in self._nsrs: + err = "NSR id %s not found " % nsr_id + self._log.error(err) + raise NetworkServiceRecordError(err) + + nsr = self._nsrs[nsr_id] + yield from nsr.nsm_plugin.instantiate_ns(nsr, xact) + + @asyncio.coroutine + def update_vnfr(self, vnfr): + """Create/Update an VNFR """ + vnfr_state = self._vnfrs[vnfr.id].state + self._log.debug("Updating VNFR with state %s: vnfr %s", vnfr_state, vnfr) + yield from self._vnfrs[vnfr.id].update(vnfr) + nsr = self.find_nsr_for_vnfr(vnfr.id) + yield from nsr.update_nsr_state() + return self._vnfrs[vnfr.id] + + def find_nsr_for_vnfr(self, vnfr_id): + """ Find the NSR which )has the passed vnfr id""" + for nsr in list(self.nsrs.values()): + for vnfr in list(nsr.vnfrs.values()): + if vnfr.id == vnfr_id: + return nsr + return None + + def delete_vnfr(self, vnfr_id): + """ Delete VNFR with the passed id""" + del self._vnfrs[vnfr_id] + + def get_nsd_ref(self, nsd_id): + """ Get network service descriptor for the passed nsd_id + with a reference""" + nsd = self.get_nsd(nsd_id) + nsd.ref() + return nsd + + @asyncio.coroutine + def get_nsr_config(self, nsd_id): + xpath = "C,/nsr:ns-instance-config" + results = yield from self._dts.query_read(xpath, rwdts.Flag.MERGE) + + for result in results: + entry = yield from result + ns_instance_config = entry.result + + for nsr in ns_instance_config.nsr: + if nsr.nsd_ref == nsd_id: + return nsr + + return None + + @asyncio.coroutine + def nsd_unref_by_nsr_id(self, nsr_id): + """ Unref the network service descriptor based on NSR id """ + self._log.debug("NSR Unref called for Nsr Id:%s", nsr_id) + if nsr_id in self._nsrs: + nsr = self._nsrs[nsr_id] + nsd = self.get_nsd(nsr.nsd_id) + self._log.debug("Releasing ref on NSD %s held by NSR %s - Curr %d", + nsd.id, nsr.id, nsd.ref_count) + nsd.unref() + else: + self._log.error("Cannot find NSD for NSR id %s", nsr_id) + raise NetworkServiceDescriptorUnrefError("No Nsd for nsr id" % nsr_id) + + @asyncio.coroutine + def nsd_unref(self, nsd_id): + """ Unref the network service descriptor associated with the id """ + nsd = self.get_nsd(nsd_id) + nsd.unref() + + def get_nsd(self, nsd_id): + """ Get network service descriptor for the passed nsd_id""" + if nsd_id not in self._nsds: + self._log.error("Cannot find NSD id:%s", nsd_id) + raise NetworkServiceDescriptorError("Cannot find NSD id:%s", nsd_id) + + return self._nsds[nsd_id] + + def create_nsd(self, nsd_msg): + """ Create a network service descriptor """ + self._log.debug("Create network service descriptor - %s", nsd_msg) + if nsd_msg.id in self._nsds: + self._log.error("Cannot create NSD %s -NSD ID already exists", nsd_msg) + raise NetworkServiceDescriptorError("NSD already exists-%s", nsd_msg.id) + + nsd = NetworkServiceDescriptor( + self._dts, + self._log, + self._loop, + nsd_msg, + ) + self._nsds[nsd_msg.id] = nsd + + return nsd + + def update_nsd(self, nsd): + """ update the Network service descriptor """ + self._log.debug("Update network service descriptor - %s", nsd) + if nsd.id not in self._nsds: + self._log.debug("No NSD found - creating NSD id = %s", nsd.id) + self.create_nsd(nsd) + else: + self._log.debug("Updating NSD id = %s, nsd = %s", nsd.id, nsd) + self._nsds[nsd.id].update(nsd) + + def delete_nsd(self, nsd_id): + """ Delete the Network service descriptor with the passed id """ + self._log.debug("Deleting the network service descriptor - %s", nsd_id) + if nsd_id not in self._nsds: + self._log.debug("Delete NSD failed - cannot find nsd-id %s", nsd_id) + raise NetworkServiceDescriptorNotFound("Cannot find %s", nsd_id) + + if nsd_id not in self._nsds: + self._log.debug("Cannot delete NSD id %s reference exists %s", + nsd_id, + self._nsds[nsd_id].ref_count) + raise NetworkServiceDescriptorRefCountExists( + "Cannot delete :%s, ref_count:%s", + nsd_id, + self._nsds[nsd_id].ref_count) + + del self._nsds[nsd_id] + + def get_vnfd_config(self, xact): + vnfd_dts_reg = self._vnfd_dts_handler.regh + for cfg in vnfd_dts_reg.get_xact_elements(xact): + self.create_vnfd(cfg) + + @asyncio.coroutine + def get_vnfd(self, vnfd_id, xact): + """ Get virtual network function descriptor for the passed vnfd_id""" + if vnfd_id not in self._vnfds: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + self.get_vnfd_config(xact) + + if vnfd_id not in self._vnfds: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + raise VnfDescriptorError("Cannot find VNFD id:%s", vnfd_id) + + return self._vnfds[vnfd_id] + + def create_vnfd(self, vnfd): + """ Create a virtual network function descriptor """ + self._log.debug("Create virtual network function descriptor - %s", vnfd) + if vnfd.id in self._vnfds: + self._log.error("Cannot create VNFD %s -VNFD ID already exists", vnfd) + raise VnfDescriptorError("VNFD already exists-%s", vnfd.id) + + self._vnfds[vnfd.id] = vnfd + return self._vnfds[vnfd.id] + + def update_vnfd(self, vnfd): + """ Update the virtual network function descriptor """ + self._log.debug("Update virtual network function descriptor- %s", vnfd) + + # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511 + for ivld in vnfd.internal_vld: + ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref)) + + if vnfd.id not in self._vnfds: + self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id) + self.create_vnfd(vnfd) + else: + self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd) + self._vnfds[vnfd.id] = vnfd + + @asyncio.coroutine + def delete_vnfd(self, vnfd_id): + """ Delete the virtual network function descriptor with the passed id """ + self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id) + if vnfd_id not in self._vnfds: + self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id) + raise VnfDescriptorError("Cannot find %s", vnfd_id) + + del self._vnfds[vnfd_id] + + def nsd_in_use(self, nsd_id): + """ Is the NSD with the passed id in use """ + self._log.debug("Is this NSD in use - msg:%s", nsd_id) + if nsd_id in self._nsds: + return self._nsds[nsd_id].in_use() + return False + + @asyncio.coroutine + def publish_nsr(self, xact, path, msg): + """ Publish a NSR """ + self._log.debug("Publish NSR with path %s, msg %s", + path, msg) + yield from self.nsr_handler.update(xact, path, msg) + + @asyncio.coroutine + def unpublish_nsr(self, xact, path): + """ Un Publish an NSR """ + self._log.debug("Publishing delete NSR with path %s", path) + yield from self.nsr_handler.delete(path, xact) + + def vnfr_is_ready(self, vnfr_id): + """ VNFR with the id is ready """ + self._log.debug("VNFR id %s ready", vnfr_id) + if vnfr_id not in self._vnfds: + err = "Did not find VNFR ID with id %s" % vnfr_id + self._log.critical("err") + raise VirtualNetworkFunctionRecordError(err) + self._vnfrs[vnfr_id].is_ready() + + @asyncio.coroutine + def get_monitoring_param(self, nsr_id): + """ Get the monitoring params based on the passed ks_path """ + monp_list = [] + if nsr_id is None or nsr_id == "": + nsrs = list(self._nsrs.values()) + for nsr in nsrs: + if nsr.active: + monp = yield from nsr.get_monitoring_param() + monp_list.append((nsr.id, monp)) + elif nsr_id in self._nsrs: + if self._nsrs[nsr_id].active: + monp = yield from self._nsrs[nsr_id].get_monitoring_param() + monp_list.append((nsr_id, monp)) + + return monp_list + + @asyncio.coroutine + def get_nsd_refcount(self, nsd_id): + """ Get the nsd_list from this NSM""" + + def nsd_refcount_xpath(nsd_id): + """ xpath for ref count entry """ + return (NsdRefCountDtsHandler.XPATH + + "[rw-nsr:nsd-id-ref = '{}']").format(nsd_id) + + nsd_list = [] + if nsd_id is None or nsd_id == "": + for nsd in self._nsds.values(): + nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount() + nsd_msg.nsd_id_ref = nsd.id + nsd_msg.instance_ref_count = nsd.ref_count + nsd_list.append((nsd_refcount_xpath(nsd.id), nsd_msg)) + elif nsd_id in self._nsds: + nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount() + nsd_msg.nsd_id_ref = self._nsds[nsd_id].id + nsd_msg.instance_ref_count = self._nsds[nsd_id].ref_count + nsd_list.append((nsd_refcount_xpath(nsd_id), nsd_msg)) + + return nsd_list + + @asyncio.coroutine + def terminate_ns(self, nsr_id, xact): + """ + Terminate network service for the given NSR Id + """ + + # Terminate the instances/networks assocaited with this nw service + self._log.debug("Terminating the network service %s", nsr_id) + yield from self._nsrs[nsr_id].terminate(xact) + + # Unref the NSD + yield from self.nsd_unref_by_nsr_id(nsr_id) + + # Unpublish the NSR record + self._log.debug("Unpublishing the network service %s", nsr_id) + yield from self._nsrs[nsr_id].unpublish(xact) + + # Finaly delete the NS instance from this NS Manager + self._log.debug("Deletng the network service %s", nsr_id) + self.delete_nsr(nsr_id) + + +class NsmRecordsPublisherProxy(object): + """ This class provides a publisher interface that allows plugin objects + to publish NSR/VNFR/VLR""" + + def __init__(self, dts, log, loop, nsr_pub_hdlr, vnfr_pub_hdlr, vlr_pub_hdlr): + self._dts = dts + self._log = log + self._loop = loop + self._nsr_pub_hdlr = nsr_pub_hdlr + self._vlr_pub_hdlr = vlr_pub_hdlr + self._vnfr_pub_hdlr = vnfr_pub_hdlr + + @asyncio.coroutine + def publish_nsr(self, xact, nsr): + """ Publish an NSR """ + path = NetworkServiceRecord.xpath_from_nsr(nsr) + return (yield from self._nsr_pub_hdlr.update(xact, path, nsr)) + + @asyncio.coroutine + def unpublish_nsr(self, xact, nsr): + """ Unpublish an NSR """ + path = NetworkServiceRecord.xpath_from_nsr(nsr) + return (yield from self._nsr_pub_hdlr.delete(xact, path)) + + @asyncio.coroutine + def publish_vnfr(self, xact, vnfr): + """ Publish an VNFR """ + path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr) + return (yield from self._vnfr_pub_hdlr.update(xact, path, vnfr)) + + @asyncio.coroutine + def unpublish_vnfr(self, xact, vnfr): + """ Unpublish a VNFR """ + path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr) + return (yield from self._vnfr_pub_hdlr.delete(xact, path)) + + @asyncio.coroutine + def publish_vlr(self, xact, vlr): + """ Publish a VLR """ + path = VirtualLinkRecord.vlr_xpath(vlr) + return (yield from self._vlr_pub_hdlr.update(xact, path, vlr)) + + @asyncio.coroutine + def unpublish_vlr(self, xact, vlr): + """ Unpublish a VLR """ + path = VirtualLinkRecord.vlr_xpath(vlr) + return (yield from self._vlr_pub_hdlr.delete(xact, path)) + + +class NsmTasklet(rift.tasklets.Tasklet): + """ + The network service manager tasklet + """ + def __init__(self, *args, **kwargs): + super(NsmTasklet, self).__init__(*args, **kwargs) + + self._dts = None + self._nsm = None + + self._cloud_plugin_selector = None + self._config_agent_mgr = None + self._vnffgmgr = None + + self._nsr_handler = None + self._vnfr_pub_handler = None + self._vlr_pub_handler = None + + self._records_publisher_proxy = None + + def start(self): + """ The task start callback """ + super(NsmTasklet, self).start() + self.log.info("Starting NsmTasklet") + + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwNsmYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in NSM stop:", sys.exc_info()[0]) + raise + + def on_instance_started(self): + """ Task instance started callback """ + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def init(self): + """ Task init callback """ + self.log.debug("Got instance started callback") + + self.log.debug("creating config account handler") + + self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(self._dts, self.log, self.loop) + yield from self._nsr_pub_handler.register() + + self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(self._dts, self.log, self.loop) + yield from self._vnfr_pub_handler.register() + + self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(self._dts, self.log, self.loop) + yield from self._vlr_pub_handler.register() + + self._records_publisher_proxy = NsmRecordsPublisherProxy( + self._dts, + self.log, + self.loop, + self._nsr_pub_handler, + self._vnfr_pub_handler, + self._vlr_pub_handler, + ) + + # Register the NSM to receive the nsm plugin + # when cloud account is configured + self._cloud_plugin_selector = cloud.CloudAccountNsmPluginSelector( + self._dts, + self.log, + self.log_hdl, + self.loop, + self._records_publisher_proxy, + ) + yield from self._cloud_plugin_selector.register() + + self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts,self.log,self.log_hdl,self.loop) + yield from self._vnffgmgr.register() + + self._nsm = NsManager( + self._dts, + self.log, + self.loop, + self._nsr_pub_handler, + self._vnfr_pub_handler, + self._vlr_pub_handler, + self._cloud_plugin_selector, + self._vnffgmgr, + ) + + # Register the NSM to receive the nsm config agent plugin + # when config agent is configured + self._config_agent_mgr = conagent.NsmConfigAgent( + self._dts, + self.log, + self.loop, + self._records_publisher_proxy, + self._nsm.set_config_agent_plugin, + ) + yield from self._config_agent_mgr.register() + # RIFT-11780 : Must call NSM register after initializing config plugin + # During restart, there is race condition which causes the NS creation + # to occur before even config_plugin is registered. + yield from self._nsm.register() + + + @asyncio.coroutine + def run(self): + """ Task run callback """ + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self.log.debug("Changing state to %s", next_state) + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py new file mode 100755 index 0000000..3551079 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py @@ -0,0 +1,361 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio + +from gi.repository import ( + RwDts as rwdts, + RwsdnYang, + RwTypes, + ProtobufC, +) + +from gi.repository.RwTypes import RwStatus +import rw_peas +import rift.tasklets + +class SdnGetPluginError(Exception): + """ Error while fetching SDN plugin """ + pass + + +class SdnGetInterfaceError(Exception): + """ Error while fetching SDN interface""" + pass + + +class SdnAccountExistsError(Exception): + """ Error while creating SDN Account""" + pass + +class VnffgrDoesNotExist(Exception): + """ Error while fetching SDN interface""" + pass + +class VnffgrAlreadyExist(Exception): + """ Vnffgr already exists Error""" + pass + +class VnffgrCreationFailed(Exception): + """ Error while creating VNFFGR""" + pass + + +class VnffgrUpdateFailed(Exception): + """ Error while updating VNFFGR""" + pass + +class VnffgMgr(object): + """ Implements the interface to backend plugins to fetch topology """ + def __init__(self, dts, log, log_hdl, loop): + self._account = {} + self._dts = dts + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._sdn = {} + self._sdn_handler = SDNAccountDtsHandler(self._dts,self._log,self) + self._log.error("Vnffmgr instantiated") + self._vnffgr_list = {} + + @asyncio.coroutine + def register(self): + yield from self._sdn_handler.register() + + def set_sdn_account(self,account): + if (account.name in self._account): + self._log.error("SDN Account is already set") + else: + sdn_account = RwsdnYang.SDNAccount() + sdn_account.from_dict(account.as_dict()) + sdn_account.name = account.name + self._account[account.name] = sdn_account + self._log.debug("Account set is %s , %s",type(self._account), self._account) + + def del_sdn_account(self, name): + self._log.debug("Account deleted is %s , %s", type(self._account), name) + del self._account[name] + + + def get_sdn_account(self, name): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + if (name in self._account): + return self._account[name] + else: + self._log.error("SDN account is not configured") + + + def get_sdn_plugin(self,name): + """ + Loads rw.sdn plugin via libpeas + """ + if (name in self._sdn): + return self._sdn[name] + account = self.get_sdn_account(name) + plugin_name = getattr(account, account.account_type).plugin_name + self._log.debug("SDN plugin being created") + plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0') + engine, info, extension = plugin() + + self._sdn[name] = plugin.get_interface("Topology") + try: + rc = self._sdn[name].init(self._log_hdl) + assert rc == RwStatus.SUCCESS + except: + self._log.error("ERROR:SDN plugin instantiation failed ") + else: + self._log.debug("SDN plugin successfully instantiated") + return self._sdn[name] + + def fetch_vnffgr(self,vnffgr_id): + if vnffgr_id not in self._vnffgr_list: + self._log.error("VNFFGR with id %s not present in VNFFGMgr", vnffgr_id) + msg = "VNFFGR with id {} not present in VNFFGMgr".format(vnffgr_id) + raise VnffgrDoesNotExist(msg) + self.update_vnffgrs(self._vnffgr_list[vnffgr_id].sdn_account) + vnffgr = self._vnffgr_list[vnffgr_id].deep_copy() + self._log.debug("VNFFGR for id %s is %s",vnffgr_id,vnffgr) + return vnffgr + + def create_vnffgr(self,vnffgr,classifier_list): + """ + """ + self._log.debug("Received VNFFG chain Create msg %s",vnffgr) + if vnffgr.id in self._vnffgr_list: + self._log.error("VNFFGR with id %s already present in VNFFGMgr", vnffgr.id) + vnffgr.operational_status = 'failed' + msg = "VNFFGR with id {} already present in VNFFGMgr".format(vnffgr.id) + raise VnffgrAlreadyExist(msg) + + self._vnffgr_list[vnffgr.id] = vnffgr + vnffgr.operational_status = 'init' + if len(self._account) == 0: + self._log.error("SDN Account not configured") + vnffgr.operational_status = 'failed' + return + if vnffgr.sdn_account: + sdn_acct_name = vnffgr.sdn_account + else: + self._log.error("SDN Account is not associated to create VNFFGR") + # TODO Fail the VNFFGR creation if SDN account is not associated + #vnffgr.operational_status = 'failed' + #msg = "SDN Account is not associated to create VNFFGR" + #raise VnffgrCreationFailed(msg) + sdn_account = [sdn_account.name for _,sdn_account in self._account.items()] + sdn_acct_name = sdn_account[0] + vnffgr.sdn_account = sdn_acct_name + sdn_plugin = self.get_sdn_plugin(sdn_acct_name) + + for rsp in vnffgr.rsp: + vnffg = RwsdnYang.VNFFGChain() + vnffg.name = rsp.name + + for index,cp_ref in enumerate(rsp.vnfr_connection_point_ref): + cpath = vnffg.vnf_chain_path.add() + cpath.order=cp_ref.hop_number + cpath.service_function_type = cp_ref.service_function_type + cpath.nsh_aware=True + cpath.transport_type = 'vxlan-gpe' + + vnfr=cpath.vnfr_ids.add() + vnfr.vnfr_id = cp_ref.vnfr_id_ref + vnfr.vnfr_name = cp_ref.vnfr_name_ref + vnfr.mgmt_address = cp_ref.connection_point_params.mgmt_address + vnfr.mgmt_port = 5000 + + vdu = vnfr.vdu_list.add() + vdu.name = cp_ref.connection_point_params.name + vdu.port_id = cp_ref.connection_point_params.port_id + vdu.vm_id = cp_ref.connection_point_params.vm_id + vdu.address = cp_ref.connection_point_params.address + vdu.port = cp_ref.connection_point_params.port + + self._log.debug("VNFFG chain msg is %s",vnffg) + rc,rs = sdn_plugin.create_vnffg_chain(self._account[sdn_acct_name],vnffg) + if rc != RwTypes.RwStatus.SUCCESS: + vnffgr.operational_status = 'failed' + msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id) + raise VnffgrCreationFailed(msg) + + self._log.info("VNFFG chain created successfully for rsp with id %s",rsp.id) + + for classifier in classifier_list: + cl_rsp = [_rsp for _rsp in vnffgr.rsp if classifier.rsp_id_ref == _rsp.vnffgd_rsp_id_ref] + if len(cl_rsp) > 0: + cl_rsp_name = cl_rsp[0].name + else: + self._log.error("No RSP wiht name %s found; Skipping classifier %s creation",classifier.rsp_id_ref,classifier.name) + continue + vnffgcl = RwsdnYang.VNFFGClassifier() + vnffgcl.name = classifier.name + vnffgcl.rsp_name = cl_rsp_name + #vnffgcl.port_id ='dfc3eb6b-3753-4183-93c8-df7c25723fd0' + #vnffgcl.vm_id = 'bd86ade8-03bf-4f03-aa3e-375a7cb5a629' + acl = vnffgcl.match_attributes.add() + acl.name = vnffgcl.name + acl.ip_proto = classifier.match_attributes.ip_proto + acl.source_ip_address = classifier.match_attributes.source_ip_address + '/32' + acl.source_port = classifier.match_attributes.source_port + acl.destination_ip_address = classifier.match_attributes.destination_ip_address + '/32' + acl.destination_port = classifier.match_attributes.destination_port + + self._log.debug(" Creating VNFFG Classifier Classifier %s for RSP: %s",vnffgcl.name,vnffgcl.rsp_name) + rc,rs = sdn_plugin.create_vnffg_classifier(self._account[sdn_acct_name],vnffgcl) + if rc != RwTypes.RwStatus.SUCCESS: + self._log.error("VNFFG Classifier cretaion failed for Classifier %s for RSP ID: %s",classifier.name,classifier.rsp_id_ref) + #vnffgr.operational_status = 'failed' + #msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id) + #raise VnffgrCreationFailed(msg) + + vnffgr.operational_status = 'running' + self.update_vnffgrs(vnffgr.sdn_account) + return vnffgr + + def update_vnffgrs(self,sdn_acct_name): + """ + Update VNFFGR by reading data from SDN Plugin + """ + sdn_plugin = self.get_sdn_plugin(sdn_acct_name) + rc,rs = sdn_plugin.get_vnffg_rendered_paths(self._account[sdn_acct_name]) + if rc != RwTypes.RwStatus.SUCCESS: + msg = "Reading of VNFFGR from SDN Plugin failed" + raise VnffgrUpdateFailed(msg) + + vnffgr_list = [_vnffgr for _vnffgr in self._vnffgr_list.values() if _vnffgr.sdn_account == sdn_acct_name and _vnffgr.operational_status == 'running'] + + for _vnffgr in vnffgr_list: + for _vnffgr_rsp in _vnffgr.rsp: + vnffg_rsp_list = [vnffg_rsp for vnffg_rsp in rs.vnffg_rendered_path if vnffg_rsp.name == _vnffgr_rsp.name] + if vnffg_rsp_list is not None and len(vnffg_rsp_list) > 0: + vnffg_rsp = vnffg_rsp_list[0] + if len(vnffg_rsp.rendered_path_hop) != len(_vnffgr_rsp.vnfr_connection_point_ref): + _vnffgr.operational_status = 'failed' + self._log.error("Received hop count %d doesnt match the VNFFGD hop count %d", len(vnffg_rsp.rendered_path_hop), + len(_vnffgr_rsp.vnfr_connection_point_ref)) + msg = "Fetching of VNFFGR with id {} failed".format(_vnffgr.id) + raise VnffgrUpdateFailed(msg) + _vnffgr_rsp.path_id = vnffg_rsp.path_id + for index, rendered_hop in enumerate(vnffg_rsp.rendered_path_hop): + for vnfr_cp_ref in _vnffgr_rsp.vnfr_connection_point_ref: + if rendered_hop.vnfr_name == vnfr_cp_ref.vnfr_name_ref: + vnfr_cp_ref.hop_number = rendered_hop.hop_number + vnfr_cp_ref.service_index = rendered_hop.service_index + vnfr_cp_ref.service_function_forwarder.name = rendered_hop.service_function_forwarder.name + vnfr_cp_ref.service_function_forwarder.ip_address = rendered_hop.service_function_forwarder.ip_address + vnfr_cp_ref.service_function_forwarder.port = rendered_hop.service_function_forwarder.port + else: + _vnffgr.operational_status = 'failed' + self._log.error("VNFFGR RSP with name %s in VNFFG %s not found",_vnffgr_rsp.name, _vnffgr.id) + msg = "Fetching of VNFFGR with name {} failed".format(_vnffgr_rsp.name) + raise VnffgrUpdateFailed(msg) + + + def terminate_vnffgr(self,vnffgr_id,sdn_account_name = None): + """ + Deletet the VNFFG chain + """ + if vnffgr_id not in self._vnffgr_list: + self._log.error("VNFFGR with id %s not present in VNFFGMgr during termination", vnffgr_id) + msg = "VNFFGR with id {} not present in VNFFGMgr during termination".format(vnffgr_id) + raise VnffgrDoesNotExist(msg) + self._log.info("Received VNFFG chain terminate for id %s",vnffgr_id) + if sdn_account_name is None: + sdn_account = [sdn_account.name for _,sdn_account in self._account.items()] + sdn_account_name = sdn_account[0] + sdn_plugin = self.get_sdn_plugin(sdn_account_name) + sdn_plugin.terminate_vnffg_chain(self._account[sdn_account_name],vnffgr_id) + sdn_plugin.terminate_vnffg_classifier(self._account[sdn_account_name],vnffgr_id) + del self._vnffgr_list[vnffgr_id] + +class SDNAccountDtsHandler(object): + XPATH = "C,/rw-sdn:sdn-account" + + def __init__(self, dts, log, parent): + self._dts = dts + self._log = log + self._parent = parent + + self._sdn_account = {} + + def _set_sdn_account(self, account): + self._log.info("Setting sdn account: {}".format(account)) + if account.name in self._sdn_account: + self._log.error("SDN Account with name %s already exists. Ignoring config", account.name); + self._sdn_account[account.name] = account + self._parent.set_sdn_account(account) + + def _del_sdn_account(self, account_name): + self._log.info("Deleting sdn account: {}".format(account_name)) + del self._sdn_account[account_name] + + self._parent.del_sdn_account(account_name) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action) + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + return RwTypes.RwStatus.SUCCESS + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for SDN Account config """ + + self._log.info("SDN Cloud account config received: %s", msg) + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + # Delete the sdn account record + self._del_sdn_account(msg.name) + else: + if msg.name in self._sdn_account: + msg = "Cannot update a SDN account that already was set." + self._log.error(msg) + xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE, + SDNAccountDtsHandler.XPATH, + msg) + raise SdnAccountExistsError(msg) + + # Set the sdn account record + self._set_sdn_account(msg) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + + self._log.debug("Registering for Sdn Account config using xpath: %s", + SDNAccountDtsHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + acg.register( + xpath=SDNAccountDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml new file mode 100644 index 0000000..7187897 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml @@ -0,0 +1,23 @@ + + + + + + + + + diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py new file mode 100755 index 0000000..b55511b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py @@ -0,0 +1,363 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import re + + +class Attribute(collections.namedtuple("Attribute", "module name")): + def __repr__(self): + return "{}:{}".format(self.module, self.name) + + +class ListElement(collections.namedtuple("List", "module name key value")): + def __repr__(self): + return "{}:{}[{}={}]".format(self.module, self.name, self.key, self.value) + + +def tokenize(xpath): + """Return a list of tokens representing an xpath + + The types of xpaths that this selector supports is extremely limited. + The xpath is required to be an absolute path delimited by a + forward-slash. Each of the parts (elements between delimiters) is + treated as one of two possible types: + + - an attribute + - a list element + + An attribute is a normal python attribute on an object. A list element + is an element within a list, which is identified by a key value (like a + yang list, although this is more properly a dict in python). + + Each attribute is expected to have the form, + + : + + A valid variable name (or namespace) follows the python regular expression, + + [a-zA-Z0-9-_]+ + + A list entry has the form, + + :[:=] + + The expression in the square brackets is the key of the required + element, and the value that that key must have. + + Arguments: + xpath - a string containing an xpath expression + + Raises: + A ValueError is raised if the xpath cannot be parsed. + + Returns: + a list of tokens + + """ + # define the symbols that are valid for a variable name in yang + name = "[a-zA-Z0-9-_]+" + + # define a set of regular expressions for parsing the xpath + pattern_attribute = re.compile("({t}):({t})$".format(t=name)) + pattern_key_value = re.compile("^{t}:({t})\s*=\s*(.*)$".format(t=name)) + pattern_quote = re.compile("^[\'\"](.*)[\'\"]$") + pattern_list = re.compile("^(.*)\[(.*)\]$") + + def dash_to_underscore(text): + return text.replace('-', '_') + + # Iterate through the parts of the xpath (NB: because the xpaths are + # required to be absolute paths, the first character is going to be the + # forward slash. As a result, when the string is split, the first + # element with be an empty string). + tokens = list() + for part in xpath.split("/")[1:]: + + # Test the part to see if it is a attribute + result = pattern_attribute.match(part) + if result is not None: + module, name = result.groups() + + # Convert the dashes to underscores + name = dash_to_underscore(name) + module = dash_to_underscore(module) + + tokens.append(Attribute(module, name)) + + continue + + # Test the part to see if it is a list + result = pattern_list.match(part) + if result is not None: + attribute, keyvalue = result.groups() + + module, name = pattern_attribute.match(attribute).groups() + key, value = pattern_key_value.match(keyvalue).groups() + + # Convert the dashes to underscore (but not in the key value) + key = dash_to_underscore(key) + name = dash_to_underscore(name) + module = dash_to_underscore(module) + + result = pattern_quote.match(value) + if result is not None: + value = result.group(1) + + tokens.append(ListElement(module, name, key, value)) + + continue + + raise ValueError("cannot parse '{}'".format(part)) + + return tokens + + +class XPathAttribute(object): + """ + This class is used to represent a reference to an attribute. If you use + getattr on an attribute, it may give you the value of the attribute rather + than a reference to it. What is really wanted is a representation of the + attribute so that its value can be both retrieved and set. That is what + this class provides. + """ + + def __init__(self, obj, name): + """Create an instance of XPathAttribute + + Arguments: + obj - the object containing the attribute + name - the name of an attribute + + Raises: + A ValueError is raised if the provided object does not have the + associated attribute. + + """ + if not hasattr(obj, name): + msg = "The provided object does not contain the associated attribute" + raise ValueError(msg) + + self.obj = obj + self.name = name + + def __repr__(self): + return self.value + + @property + def value(self): + return getattr(self.obj, self.name) + + @value.setter + def value(self, value): + """Set the value of the attribute + + Arguments: + value - the new value that the attribute should take + + Raises: + An TypeError is raised if the provided value cannot be cast the + current type of the attribute. + + """ + attr_type = type(self.value) + attr_value = value + + # The only way we can currently get the type of the atrribute is if it + # has an existing value. So if the attribute has an existing value, + # cast the value to the type of the attribute value. + if attr_type is not type(None): + try: + attr_value = attr_type(attr_value) + + except ValueError: + msg = "expected type '{}', but got '{}' instead" + raise TypeError(msg.format(attr_type.__name__, type(value).__name__)) + + setattr(self.obj, self.name, attr_value) + + +class XPathElement(XPathAttribute): + """ + This class is used to represent a reference to an element within a list. + Unlike scalar attributes, it is not entirely necessary to have this class + to represent the attribute because the element cannot be a simple scalar. + However, this class is used because it creates a uniform interface that can + be used by the setxattr and getxattr functions. + """ + + def __init__(self, container, key, value): + """Create an instance of XPathElement + + Arguments: + container - the object that contains the element + key - the name of the field that is used to identify the + element + value - the value of the key that identifies the element + + """ + self._container = container + self._value = value + self._key = key + + @property + def value(self): + for element in self._container: + if getattr(element, self._key) == self._value: + return element + + raise ValueError("specified element does not exist") + + @value.setter + def value(self, value): + existing = None + for element in self._container: + if getattr(element, self._key) == self._value: + existing = element + break + + if existing is not None: + self._container.remove(existing) + + self._container.append(value) + + +class XPathSelector(object): + def __init__(self, xpath): + """Creates an instance of XPathSelector + + Arguments: + xpath - a string containing an xpath expression + + """ + self._tokens = tokenize(xpath) + + + def __call__(self, obj): + """Returns a reference to an attribute on the provided object + + Using the defined xpath, an attribute is selected from the provided + object and returned. + + Arguments: + obj - a GI object + + Raises: + A ValueError is raised if the specified element in a list cannot be + found. + + Returns: + an XPathAttribute that reference the specified attribute + + """ + current = obj + for token in self._tokens[:-1]: + # If the object is contained within a list, we will need to iterate + # through the tokens until we find a token that is a field of the + # object. + if token.name not in current.fields: + if current is obj: + continue + + raise ValueError('cannot find attribute {}'.format(token.name)) + + # If the token is a ListElement, try to find the matching element + if isinstance(token, ListElement): + for element in getattr(current, token.name): + if getattr(element, token.key) == token.value: + current = element + break + + else: + raise ValueError('unable to find {}'.format(token.value)) + + else: + # Attribute the variable matching the name of the token + current = getattr(current, token.name) + + # Process the final token + token = self._tokens[-1] + + # If the token represents a list element, find the element in the list + # and return an XPathElement + if isinstance(token, ListElement): + container = getattr(current, token.name) + for element in container: + if getattr(element, token.key) == token.value: + return XPathElement(container, token.key, token.value) + + else: + raise ValueError('unable to find {}'.format(token.value)) + + # Otherwise, return the object as an XPathAttribute + return XPathAttribute(current, token.name) + + @property + def tokens(self): + """The tokens in the xpath expression""" + return self._tokens + + +# A global cache to avoid repeated parsing of known xpath expressions +__xpath_cache = dict() + + +def reset_cache(): + global __xpath_cache + __xpath_cache = dict() + + +def getxattr(obj, xpath): + """Return an attribute on the provided object + + The xpath is parsed and used to identify an attribute on the provided + object. The object is expected to be a GI object where each attribute that + is accessible via an xpath expression is contained in the 'fields' + attribute of the object (NB: this is not true of GI lists, which do not + have a 'fields' attribute). + + A selector is create for each xpath and used to find the specified + attribute. The accepted xpath expressions are those supported by the + XPathSelector class. The parsed xpath expression is cached so that + subsequent parsing is unnecessary. However, selectors are stored in a + global dictionary and this means that this function is not thread-safe. + + Arguments: + obj - a GI object + xpath - a string containing an xpath expression + + Returns: + an attribute on the provided object + + """ + if xpath not in __xpath_cache: + __xpath_cache[xpath] = XPathSelector(xpath) + + return __xpath_cache[xpath](obj).value + + +def setxattr(obj, xpath, value): + """Set the attribute referred to by the xpath + + Arguments: + obj - a GI object + xpath - a string containing an xpath expression + value - the new value of the attribute + + """ + if xpath not in __xpath_cache: + __xpath_cache[xpath] = XPathSelector(xpath) + + __xpath_cache[xpath](obj).value = value \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py new file mode 100755 index 0000000..e56c32b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwnsmtasklet + +class Tasklet(rift.tasklets.rwnsmtasklet.NsmTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt new file mode 100644 index 0000000..7cd388f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwresmgrtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/rwresmgr_config.py + rift/tasklets/${TASKLET_NAME}/rwresmgr_core.py + rift/tasklets/${TASKLET_NAME}/rwresmgr_events.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py new file mode 100644 index 0000000..f93e30e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwresmgrtasklet import ResMgrTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py new file mode 100644 index 0000000..1494cff --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py @@ -0,0 +1,123 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import time +import uuid +from enum import Enum + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +from gi.repository.RwTypes import RwStatus +import rift.tasklets +import rift.mano.cloud + + +class ResourceMgrConfig(object): + XPATH_POOL_OPER_DATA = "D,/rw-resource-mgr:resource-pool-records" + def __init__(self, dts, log, rwlog_hdl, loop, parent): + self._dts = dts + self._log = log + self._rwlog_hdl = rwlog_hdl + self._loop = loop + self._parent = parent + + self._cloud_sub = None + + @asyncio.coroutine + def register(self): + yield from self.register_resource_pool_operational_data() + self.register_cloud_account_config() + + def register_cloud_account_config(self): + def on_add_cloud_account_apply(account): + self._log.debug("Received on_add_cloud_account: %s", account) + self._parent.add_cloud_account_config(account) + + def on_delete_cloud_account_apply(account_name): + self._log.debug("Received on_delete_cloud_account_apply: %s", account_name) + self._parent.delete_cloud_account_config(account_name) + + @asyncio.coroutine + def on_delete_cloud_account_prepare(account_name): + self._log.debug("Received on_delete_cloud_account_prepare: %s", account_name) + self._parent.delete_cloud_account_config(account_name, dry_run=True) + + @asyncio.coroutine + def on_update_cloud_account_prepare(account): + raise NotImplementedError( + "Resource manager does not support updating cloud account" + ) + + cloud_callbacks = rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=on_add_cloud_account_apply, + on_delete_apply=on_delete_cloud_account_apply, + on_delete_prepare=on_delete_cloud_account_prepare, + on_update_prepare=on_update_cloud_account_prepare, + ) + + self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber( + self._dts, self._log, self._rwlog_hdl, cloud_callbacks + ) + self._cloud_sub.register() + + @asyncio.coroutine + def register_resource_pool_operational_data(self): + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + self._log.debug("ResourceMgr providing resource-pool information") + msg = RwResourceMgrYang.ResourcePoolRecords() + + cloud_accounts = self._parent.get_cloud_account_names() + for cloud_account_name in cloud_accounts: + pools = self._parent.get_pool_list(cloud_account_name) + self._log.debug("Publishing information about cloud account %s %d resource pools", + cloud_account_name, len(pools)) + + cloud_account_msg = msg.cloud_account.add() + cloud_account_msg.name = cloud_account_name + for pool in pools: + pool_info = self._parent.get_pool_info(cloud_account_name, pool.name) + cloud_account_msg.records.append(pool_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + ResourceMgrConfig.XPATH_POOL_OPER_DATA, + msg=msg,) + + self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: %s", + ResourceMgrConfig.XPATH_POOL_OPER_DATA) + + handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + response = yield from self._dts.register(xpath=ResourceMgrConfig.XPATH_POOL_OPER_DATA, + handler=handler, + flags=rwdts.Flag.PUBLISHER) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py new file mode 100644 index 0000000..c212abd --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py @@ -0,0 +1,1185 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import uuid +import collections +import asyncio + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +from gi.repository.RwTypes import RwStatus + +class ResMgrCALNotPresent(Exception): + pass + +class ResMgrCloudAccountNotFound(Exception): + pass + +class ResMgrCloudAccountExists(Exception): + pass + +class ResMgrCloudAccountInUse(Exception): + pass + +class ResMgrDuplicatePool(Exception): + pass + +class ResMgrPoolNotAvailable(Exception): + pass + +class ResMgrPoolOperationFailed(Exception): + pass + +class ResMgrDuplicateEventId(Exception): + pass + +class ResMgrUnknownEventId(Exception): + pass + +class ResMgrUnknownResourceId(Exception): + pass + +class ResMgrResourceIdBusy(Exception): + pass + +class ResMgrResourceIdNotAllocated(Exception): + pass + +class ResMgrNoResourcesAvailable(Exception): + pass + +class ResMgrResourcesInitFailed(Exception): + pass + +class ResMgrCALOperationFailure(Exception): + pass + + +class ResourceMgrCALHandler(object): + def __init__(self, log, log_hdl, account): + self._log = log + self._account = account.cal_account_msg + self._rwcal = account.cal + if account.account_type == 'aws': + self._subnets = ["172.31.97.0/24", "172.31.98.0/24", "172.31.99.0/24", "172.31.100.0/24", "172.31.101.0/24"] + else: + self._subnets = ["11.0.0.0/24", "12.0.0.0/24", "13.0.0.0/24", "14.0.0.0/24", "15.0.0.0/24"] + self._subnet_ptr = 0 + + def _select_link_subnet(self): + subnet = self._subnets[self._subnet_ptr] + self._subnet_ptr += 1 + if self._subnet_ptr == len(self._subnets): + self._subnet_ptr = 0 + return subnet + + def create_virtual_network(self, req_params): + rc, rsp = self._rwcal.get_virtual_link_list(self._account) + assert rc == RwStatus.SUCCESS + links = [vlink for vlink in rsp.virtual_link_info_list if vlink.name == req_params.name] + if links: + return links[0].virtual_link_id + + params = RwcalYang.VirtualLinkReqParams() + params.from_dict(req_params.as_dict()) + params.subnet = self._select_link_subnet() + rc, rs = self._rwcal.create_virtual_link(self._account, params) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-network-allocate operation failed for cloud account: %s", self._account.name) + raise ResMgrCALOperationFailure("Virtual-network allocate operationa failed for cloud account: %s" %(self._account.name)) + return rs + + + def delete_virtual_network(self, network_id): + rc = self._rwcal.delete_virtual_link(self._account, network_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-network-release operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + network_id) + raise ResMgrCALOperationFailure("Virtual-network release operation failed for cloud account: %s. ResourceId: %s" %(self._account.name, network_id)) + + def get_virtual_network_info(self, network_id): + rc, rs = self._rwcal.get_virtual_link(self._account, network_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + network_id) + raise ResMgrCALOperationFailure("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, network_id)) + return rs + + def create_virtual_compute(self, req_params): + rc, rsp = self._rwcal.get_vdu_list(self._account) + assert rc == RwStatus.SUCCESS + vdus = [vm for vm in rsp.vdu_info_list if vm.name == req_params.name] + if vdus: + return vdus[0].vdu_id + + params = RwcalYang.VDUInitParams() + params.from_dict(req_params.as_dict()) + + image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None + params.image_id = self.get_image_id_from_image_info(req_params.image_name, image_checksum) + + self._log.info("Creating virtual-compute, Name: %s, ImageID: %s, FlavorID: %s", params.name, params.image_id, params.flavor_id) + rc, rs = self._rwcal.create_vdu(self._account, params) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-create operation failed for cloud account: %s", self._account.name) + raise ResMgrCALOperationFailure("Virtual-compute-create operation failed for cloud account: %s" %(self._account.name)) + return rs + + def modify_virtual_compute(self, req_params): + rc = self._rwcal.modify_vdu(self._account, req_params) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-modify operation failed for cloud account: %s", self._account.name) + raise ResMgrCALOperationFailure("Virtual-compute-modify operation failed for cloud account: %s" %(self._account.name)) + + def delete_virtual_compute(self, compute_id): + rc = self._rwcal.delete_vdu(self._account, compute_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + compute_id) + raise ResMgrCALOperationFailure("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id)) + + def get_virtual_compute_info(self, compute_id): + rc, rs = self._rwcal.get_vdu(self._account, compute_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + compute_id) + raise ResMgrCALOperationFailure("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id)) + return rs + + def get_compute_flavor_info_list(self): + rc, rs = self._rwcal.get_flavor_list(self._account) + if rc != RwStatus.SUCCESS: + self._log.error("Get-flavor-info-list operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(self._account.name)) + return rs.flavorinfo_list + + def create_compute_flavor(self, request): + flavor = RwcalYang.FlavorInfoItem() + flavor.name = str(uuid.uuid4()) + epa_types = ['vm_flavor', 'guest_epa'] + epa_dict = {k: v for k, v in request.as_dict().items() if k in epa_types} + flavor.from_dict(epa_dict) + + self._log.info("Creating flavor: %s", flavor) + rc, rs = self._rwcal.create_flavor(self._account, flavor) + if rc != RwStatus.SUCCESS: + self._log.error("Create-flavor operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(self._account.name)) + return rs + + def get_image_info_list(self): + rc, rs = self._rwcal.get_image_list(self._account) + if rc != RwStatus.SUCCESS: + self._log.error("Get-image-info-list operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Get-image-info-list operation failed for cloud account: %s" %(self._account.name)) + return rs.imageinfo_list + + def get_image_id_from_image_info(self, image_name, image_checksum=None): + self._log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s", + image_name, image_checksum, self._account.name + ) + image_list = self.get_image_info_list() + matching_images = [i for i in image_list if i.name == image_name] + + # If the image checksum was filled in then further filter the images by the checksum + if image_checksum is not None: + matching_images = [i for i in matching_images if i.checksum == image_checksum] + else: + self._log.warning("Image checksum not provided. Lookup using image name only.") + + if len(matching_images) == 0: + raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format( + image_name, image_checksum, self._account.name + )) + + elif len(matching_images) > 1: + unique_checksums = {i.checksum for i in matching_images} + if len(unique_checksums) > 1: + msg = ("Too many images with different checksums matched " + "image name of %s for cloud account: %s" % (image_name, self._account.name)) + raise ResMgrCALOperationFailure(msg) + + return matching_images[0].id + + def get_image_info(self, image_id): + rc, rs = self._rwcal.get_image(self._account, image_id) + if rc != RwStatus.SUCCESS: + self._log.error("Get-image-info-list operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Get-image-info operation failed for cloud account: %s" %(self._account.name)) + return rs.imageinfo_list + + def dynamic_flavor_supported(self): + return getattr(self._account, self._account.account_type).dynamic_flavor_support + + +class Resource(object): + def __init__(self, resource_id, resource_type): + self._id = resource_id + self._type = resource_type + + @property + def resource_id(self): + return self._id + + @property + def resource_type(self): + return self._type + + def cleanup(self): + pass + + +class ComputeResource(Resource): + def __init__(self, resource_id, resource_type): + super(ComputeResource, self).__init__(resource_id, resource_type) + + +class NetworkResource(Resource): + def __init__(self, resource_id, resource_type): + super(NetworkResource, self).__init__(resource_id, resource_type) + + +class ResourcePoolInfo(object): + def __init__(self, name, pool_type, resource_type, max_size): + self.name = name + self.pool_type = pool_type + self.resource_type = resource_type + self.max_size = max_size + + @classmethod + def from_dict(cls, pool_dict): + return cls( + pool_dict["name"], + pool_dict["pool_type"], + pool_dict["resource_type"], + pool_dict["max_size"], + ) + + +class ResourcePool(object): + def __init__(self, log, loop, pool_info, resource_class, cal): + self._log = log + self._loop = loop + self._name = pool_info.name + self._pool_type = pool_info.pool_type + self._resource_type = pool_info.resource_type + self._cal = cal + self._resource_class = resource_class + + self._max_size = pool_info.max_size + + self._status = 'unlocked' + ### A Dictionary of all the resources in this pool, keyed by CAL resource-id + self._all_resources = {} + ### A List of free resources in this pool + self._free_resources = [] + ### A Dictionary of all the allocated resources in this pool, keyed by CAL resource-id + self._allocated_resources = {} + + @property + def name(self): + return self._name + + @property + def cal(self): + """ This instance's ResourceMgrCALHandler """ + return self._cal + + @property + def pool_type(self): + return self._pool_type + + @property + def resource_type(self): + return self._resource_type + + @property + def max_size(self): + return self._max_size + + @property + def status(self): + return self._status + + def in_use(self): + if len(self._allocated_resources) != 0: + return True + else: + return False + + def update_cal_handler(self, cal): + if self.in_use(): + raise ResMgrPoolOperationFailed( + "Cannot update CAL plugin for in use pool" + ) + + self._cal = cal + + def lock_pool(self): + self._log.info("Locking the pool :%s", self.name) + self._status = 'locked' + + def unlock_pool(self): + self._log.info("Unlocking the pool :%s", self.name) + self._status = 'unlocked' + + def add_resource(self, resource_info): + self._log.info("Adding static resource to Pool: %s, Resource-id: %s Resource-Type: %s", + self.name, + resource_info.resource_id, + self.resource_type) + + ### Add static resources to pool + resource = self._resource_class(resource_info.resource_id, 'static') + assert resource.resource_id == resource_info.resource_id + self._all_resources[resource.resource_id] = resource + self._free_resources.append(resource) + + def delete_resource(self, resource_id): + if resource_id not in self._all_resources: + self._log.error("Resource Id: %s not present in pool: %s. Delete operation failed", resource_id, self.name) + raise ResMgrUnknownResourceId("Resource Id: %s requested for release is not found" %(resource_id)) + + if resource_id in self._allocated_resources: + self._log.error("Resource Id: %s in use. Delete operation failed", resource_id) + raise ResMgrResourceIdBusy("Resource Id: %s requested for release is in use" %(resource_id)) + + self._log.info("Deleting resource: %s from pool: %s, Resource-Type", + resource_id, + self.name, + self.resource_type) + + resource = self._all_resources.pop(resource_id) + self._free_resources.remove(resource) + resource.cleanup() + del resource + + @asyncio.coroutine + def read_resource_info(self, resource_id): + if resource_id not in self._all_resources: + self._log.error("Resource Id: %s not present in pool: %s. Read operation failed", resource_id, self.name) + raise ResMgrUnknownResourceId("Resource Id: %s requested for read is not found" %(resource_id)) + + if resource_id not in self._allocated_resources: + self._log.error("Resource Id: %s not in use. Read operation failed", resource_id) + raise ResMgrResourceIdNotAllocated("Resource Id: %s not in use. Read operation failed" %(resource_id)) + + resource = self._allocated_resources[resource_id] + resource_info = self.get_resource_info(resource) + return resource_info + + def get_pool_info(self): + info = RwResourceMgrYang.ResourceRecordInfo() + self._log.info("Providing info for pool: %s", self.name) + info.name = self.name + if self.pool_type: + info.pool_type = self.pool_type + if self.resource_type: + info.resource_type = self.resource_type + if self.status: + info.pool_status = self.status + + info.total_resources = len(self._all_resources) + info.free_resources = len(self._free_resources) + info.allocated_resources = len(self._allocated_resources) + return info + + def cleanup(self): + for _, v in self._all_resources.items(): + v.cleanup() + + def _allocate_static_resource(self, request, resource_type): + unit_type = {'compute': 'VDU', 'network':'VirtualLink'} + match_found = False + resource = None + self._log.info("Doing resource match from pool :%s", self._free_resources) + for resource in self._free_resources: + resource_info = self.get_resource_info(resource) + self._log.info("Attempting to match %s-requirements for %s: %s with resource-id :%s", + resource_type, unit_type[resource_type],request.name, resource.resource_id) + if self.match_epa_params(resource_info, request): + if self.match_image_params(resource_info, request): + match_found = True + self._log.info("%s-requirements matched for %s: %s with resource-id :%s", + resource_type, unit_type[resource_type],request.name, resource.resource_id) + self.initialize_resource_in_cal(resource, request) + break + + if not match_found: + self._log.error("No match found for %s-requirements for %s: %s in pool: %s. %s instantiation failed", + resource_type, + unit_type[resource_type], + request.name, + self.name, + unit_type[resource_type]) + return None + else: + ### Move resource from free-list into allocated-list + self._log.info("Allocating the static resource with resource-id: %s for %s: %s", + resource.resource_id, + unit_type[resource_type],request.name) + self._free_resources.remove(resource) + self._allocated_resources[resource.resource_id] = resource + + return resource + + @asyncio.coroutine + def allocate_resource(self, request): + resource = yield from self.allocate_resource_in_cal(request) + resource_info = self.get_resource_info(resource) + return resource.resource_id, resource_info + + @asyncio.coroutine + def release_resource(self, resource_id): + self._log.debug("Releasing resource_id %s in pool %s", resource_id, self.name) + if resource_id not in self._allocated_resources: + self._log.error("Failed to release a resource with resource-id: %s in pool: %s. Resource not known", + resource_id, + self.name) + raise ResMgrUnknownResourceId("Failed to release resource with resource-id: %s. Unknown resource-id" %(resource_id)) + + ### Get resource object + resource = self._allocated_resources.pop(resource_id) + self.uninitialize_resource_in_cal(resource) + yield from self.release_cal_resource(resource) + + +class NetworkPool(ResourcePool): + def __init__(self, log, loop, pool_info, cal): + super(NetworkPool, self).__init__(log, loop, pool_info, NetworkResource, cal) + + @asyncio.coroutine + def allocate_resource_in_cal(self, request): + resource = None + if self.pool_type == 'static': + self._log.info("Attempting network resource allocation from static pool: %s", self.name) + ### Attempt resource allocation from static pool + resource = self._allocate_static_resource(request, 'network') + elif self.pool_type == 'dynamic': + ### Attempt resource allocation from dynamic pool + self._log.info("Attempting network resource allocation from dynamic pool: %s", self.name) + if len(self._free_resources) != 0: + self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources", + self.name, len(self._free_resources)) + resource = self._allocate_static_resource(request, 'network') + if resource is None: + self._log.info("Could not resource from static resources. Going for dynamic resource allocation") + ## Not static resource available. Attempt dynamic resource from pool + resource = yield from self.allocate_dynamic_resource(request) + if resource is None: + raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name)) + return resource + + @asyncio.coroutine + def allocate_dynamic_resource(self, request): + resource_id = self._cal.create_virtual_network(request) + resource = self._resource_class(resource_id, 'dynamic') + self._all_resources[resource_id] = resource + self._allocated_resources[resource_id] = resource + self._log.info("Successfully allocated virtual-network resource from CAL with resource-id: %s", resource_id) + return resource + + @asyncio.coroutine + def release_cal_resource(self, resource): + if resource.resource_type == 'dynamic': + self._log.debug("Deleting virtual network with network_id: %s", resource.resource_id) + self._cal.delete_virtual_network(resource.resource_id) + self._all_resources.pop(resource.resource_id) + self._log.info("Successfully released virtual-network resource in CAL with resource-id: %s", resource.resource_id) + else: + self._log.info("Successfully released virtual-network resource with resource-id: %s into available-list", resource.resource_id) + self._free_resources.append(resource) + + def get_resource_info(self, resource): + info = self._cal.get_virtual_network_info(resource.resource_id) + self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s", + resource.resource_id, str(info)) + response = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo() + response.from_dict(info.as_dict()) + response.pool_name = self.name + response.resource_state = 'active' + return response + + def get_info_by_id(self, resource_id): + info = self._cal.get_virtual_network_info(resource_id) + self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s", + resource_id, str(info)) + return info + + def match_image_params(self, resource_info, request_params): + return True + + def match_epa_params(self, resource_info, request_params): + if not hasattr(request_params, 'provider_network'): + ### Its a match if nothing is requested + return True + else: + required = getattr(request_params, 'provider_network') + + if not hasattr(resource_info, 'provider_network'): + ### Its no match + return False + else: + available = getattr(resource_info, 'provider_network') + + self._log.debug("Matching Network EPA params. Required: %s, Available: %s", required, available) + + if required.has_field('name') and required.name!= available.name: + self._log.debug("Provider Network mismatch. Required: %s, Available: %s", + required.name, + available.name) + return False + + self._log.debug("Matching EPA params physical network name") + + if required.has_field('physical_network') and required.physical_network != available.physical_network: + self._log.debug("Physical Network mismatch. Required: %s, Available: %s", + required.physical_network, + available.physical_network) + return False + + self._log.debug("Matching EPA params overlay type") + if required.has_field('overlay_type') and required.overlay_type != available.overlay_type: + self._log.debug("Overlay type mismatch. Required: %s, Available: %s", + required.overlay_type, + available.overlay_type) + return False + + self._log.debug("Matching EPA params SegmentationID") + if required.has_field('segmentation_id') and required.segmentation_id != available.segmentation_id: + self._log.debug("Segmentation-Id mismatch. Required: %s, Available: %s", + required.segmentation_id, + available.segmentation_id) + return False + return True + + def initialize_resource_in_cal(self, resource, request): + pass + + def uninitialize_resource_in_cal(self, resource): + pass + + +class ComputePool(ResourcePool): + def __init__(self, log, loop, pool_info, cal): + super(ComputePool, self).__init__(log, loop, pool_info, ComputeResource, cal) + + @asyncio.coroutine + def allocate_resource_in_cal(self, request): + resource = None + if self.pool_type == 'static': + self._log.info("Attempting compute resource allocation from static pool: %s", self.name) + ### Attempt resource allocation from static pool + resource = self._allocate_static_resource(request, 'compute') + elif self.pool_type == 'dynamic': + ### Attempt resource allocation from dynamic pool + self._log.info("Attempting compute resource allocation from dynamic pool: %s", self.name) + if len(self._free_resources) != 0: + self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources", + len(self._free_resources), + self.name) + resource = self._allocate_static_resource(request, 'compute') + if resource is None: + self._log.info("Attempting for dynamic resource allocation") + resource = yield from self.allocate_dynamic_resource(request) + if resource is None: + raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name)) + + requested_params = RwcalYang.VDUInitParams() + requested_params.from_dict(request.as_dict()) + resource.requested_params = requested_params + return resource + + @asyncio.coroutine + def allocate_dynamic_resource(self, request): + request.flavor_id = self.select_resource_flavor(request) + resource_id = self._cal.create_virtual_compute(request) + resource = self._resource_class(resource_id, 'dynamic') + self._all_resources[resource_id] = resource + self._allocated_resources[resource_id] = resource + self._log.info("Successfully allocated virtual-compute resource from CAL with resource-id: %s", resource_id) + return resource + + @asyncio.coroutine + def release_cal_resource(self, resource): + if hasattr(resource, 'requested_params'): + delattr(resource, 'requested_params') + if resource.resource_type == 'dynamic': + self._cal.delete_virtual_compute(resource.resource_id) + self._all_resources.pop(resource.resource_id) + self._log.info("Successfully released virtual-compute resource in CAL with resource-id: %s", resource.resource_id) + else: + self._log.info("Successfully released virtual-compute resource with resource-id: %s into available-list", resource.resource_id) + self._free_resources.append(resource) + + def get_resource_info(self, resource): + info = self._cal.get_virtual_compute_info(resource.resource_id) + self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s", + resource.resource_id, str(info)) + response = RwResourceMgrYang.VDUEventData_ResourceInfo() + response.from_dict(info.as_dict()) + response.pool_name = self.name + response.resource_state = self._get_resource_state(info, resource.requested_params) + return response + + def get_info_by_id(self, resource_id): + info = self._cal.get_virtual_compute_info(resource_id) + self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s", + resource_id, str(info)) + return info + + def _get_resource_state(self, resource_info, requested_params): + if resource_info.state == 'failed': + self._log.error(" Reached failed state.", + resource_info.name) + return 'failed' + + if resource_info.state != 'active': + self._log.info(" Not reached active state.", + resource_info.name) + return 'pending' + + if not resource_info.has_field('management_ip') or resource_info.management_ip == '': + self._log.info(" Management IP not assigned.", + resource_info.name) + return 'pending' + + if (requested_params.has_field('allocate_public_address')) and (requested_params.allocate_public_address == True): + if not resource_info.has_field('public_ip'): + self._log.warning(" Management IP not assigned- waiting for public ip, %s", + resource_info.name, requested_params) + return 'pending' + + if(len(requested_params.connection_points) != + len(resource_info.connection_points)): + return 'pending' + + not_active = [c for c in resource_info.connection_points + if c.state != 'active'] + + if not_active: + self._log.warning(" Management IP not assigned- waiting for connection_points , %s", + resource_info.name, resource_info) + return 'pending' + + ## Find the connection_points which are in active state but does not have IP address + no_address = [c for c in resource_info.connection_points + if (c.state == 'active') and (not c.has_field('ip_address'))] + + if no_address: + self._log.warning(" Management IP not assigned- waiting for connection_points , %s", + resource_info.name, resource_info) + return 'pending' + + return 'active' + + def select_resource_flavor(self, request): + flavors = self._cal.get_compute_flavor_info_list() + self._log.debug("Received %d flavor information from RW.CAL", len(flavors)) + flavor_id = None + match_found = False + for flv in flavors: + self._log.info("Attempting to match compute requirement for VDU: %s with flavor %s", + request.name, flv) + if self.match_epa_params(flv, request): + self._log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s", + request.name, flv.name, flv.id) + match_found = True + flavor_id = flv.id + break + + if not match_found: + ### Check if CAL account allows dynamic flavor creation + if self._cal.dynamic_flavor_supported(): + self._log.info("Attempting to create a new flavor for required compute-requirement for VDU: %s", request.name) + flavor_id = self._cal.create_compute_flavor(request) + else: + ### No match with existing flavors and CAL does not support dynamic flavor creation + self._log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", request.name) + raise ResMgrNoResourcesAvailable("No resource available with matching EPA attributes") + else: + ### Found flavor + self._log.info("Found flavor with id: %s for compute requirement for VDU: %s", + flavor_id, request.name) + return flavor_id + + def _match_vm_flavor(self, required, available): + if available.vcpu_count != required.vcpu_count: + self._log.debug("VCPU requirement mismatch. Required: %d, Available: %d", + required.vcpu_count, + available.vcpu_count) + return False + if available.memory_mb != required.memory_mb: + self._log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB", + required.memory_mb, + available.memory_mb) + return False + if available.storage_gb != required.storage_gb: + self._log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB", + required.storage_gb, + available.storage_gb) + return False + self._log.debug("VM Flavor match found") + return True + + def _match_guest_epa(self, required, available): + if required.has_field('mempage_size'): + self._log.debug("Matching mempage_size") + if available.has_field('mempage_size') == False: + self._log.debug("Matching mempage_size failed. Not available in flavor") + return False + else: + if required.mempage_size != available.mempage_size: + self._log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size) + return False + + if required.has_field('cpu_pinning_policy'): + self._log.debug("Matching cpu_pinning_policy") + if required.cpu_pinning_policy != 'ANY': + if available.has_field('cpu_pinning_policy') == False: + self._log.debug("Matching cpu_pinning_policy failed. Not available in flavor") + return False + else: + if required.cpu_pinning_policy != available.cpu_pinning_policy: + self._log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy) + return False + + if required.has_field('cpu_thread_pinning_policy'): + self._log.debug("Matching cpu_thread_pinning_policy") + if available.has_field('cpu_thread_pinning_policy') == False: + self._log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor") + return False + else: + if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy: + self._log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy) + return False + + if required.has_field('trusted_execution'): + self._log.debug("Matching trusted_execution") + if required.trusted_execution == True: + if available.has_field('trusted_execution') == False: + self._log.debug("Matching trusted_execution failed. Not available in flavor") + return False + else: + if required.trusted_execution != available.trusted_execution: + self._log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution) + return False + + if required.has_field('numa_node_policy'): + self._log.debug("Matching numa_node_policy") + if available.has_field('numa_node_policy') == False: + self._log.debug("Matching numa_node_policy failed. Not available in flavor") + return False + else: + if required.numa_node_policy.has_field('node_cnt'): + self._log.debug("Matching numa_node_policy node_cnt") + if available.numa_node_policy.has_field('node_cnt') == False: + self._log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor") + return False + else: + if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt: + self._log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt) + return False + + if required.numa_node_policy.has_field('mem_policy'): + self._log.debug("Matching numa_node_policy mem_policy") + if available.numa_node_policy.has_field('mem_policy') == False: + self._log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor") + return False + else: + if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy: + self._log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy) + return False + + if required.numa_node_policy.has_field('node'): + self._log.debug("Matching numa_node_policy nodes configuration") + if available.numa_node_policy.has_field('node') == False: + self._log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor") + return False + for required_node in required.numa_node_policy.node: + self._log.debug("Matching numa_node_policy nodes configuration for node %s", required_node) + numa_match = False + for available_node in available.numa_node_policy.node: + if required_node.id != available_node.id: + self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node) + continue + if required_node.vcpu != available_node.vcpu: + self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node) + continue + if required_node.memory_mb != available_node.memory_mb: + self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node) + continue + numa_match = True + if numa_match == False: + return False + return True + + def _match_vswitch_epa(self, required, available): + self._log.debug("VSwitch EPA match found") + return True + + def _match_hypervisor_epa(self, required, available): + self._log.debug("Hypervisor EPA match found") + return True + + def _match_host_epa(self, required, available): + return True + + def match_image_params(self, resource_info, request_params): + return True + + def match_epa_params(self, resource_info, request_params): + result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'), + getattr(resource_info, 'vm_flavor')) + if result == False: + self._log.debug("VM Flavor mismatched") + return False + + result = self._match_guest_epa(getattr(request_params, 'guest_epa'), + getattr(resource_info, 'guest_epa')) + if result == False: + self._log.debug("Guest EPA mismatched") + return False + + result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'), + getattr(resource_info, 'vswitch_epa')) + if result == False: + self._log.debug("Vswitch EPA mismatched") + return False + + result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'), + getattr(resource_info, 'hypervisor_epa')) + if result == False: + self._log.debug("Hypervisor EPA mismatched") + return False + + result = self._match_host_epa(getattr(request_params, 'host_epa'), + getattr(resource_info, 'host_epa')) + if result == False: + self._log.debug("Host EPA mismatched") + return False + + return True + + + def initialize_resource_in_cal(self, resource, request): + self._log.info("Initializing the compute-resource with id: %s in RW.CAL", resource.resource_id) + modify_params = RwcalYang.VDUModifyParams() + modify_params.vdu_id = resource.resource_id + modify_params.image_id = request.image_id + + for c_point in request.connection_points: + self._log.debug("Adding connection point for VDU: %s to virtual-compute with id: %s Connection point Name: %s", + request.name,resource.resource_id,c_point.name) + point = modify_params.connection_points_add.add() + point.name = c_point.name + point.virtual_link_id = c_point.virtual_link_id + self._cal.modify_virtual_compute(modify_params) + + def uninitialize_resource_in_cal(self, resource): + self._log.info("Un-initializing the compute-resource with id: %s in RW.CAL", resource.resource_id) + modify_params = RwcalYang.VDUModifyParams() + modify_params.vdu_id = resource.resource_id + resource_info = self.get_resource_info(resource) + for c_point in resource_info.connection_points: + self._log.debug("Removing connection point: %s from VDU: %s ", + c_point.name,resource_info.name) + point = modify_params.connection_points_remove.add() + point.connection_point_id = c_point.connection_point_id + self._cal.modify_virtual_compute(modify_params) + + +class ResourceMgrCore(object): + def __init__(self, dts, log, log_hdl, loop, parent): + self._log = log + self._log_hdl = log_hdl + self._dts = dts + self._loop = loop + self._parent = parent + self._cloud_cals = {} + # Dictionary of pool objects keyed by name + self._cloud_pool_table = {} + # Dictionary of tuples (resource_id, cloud_account_name, pool_name) keyed by event_id + self._resource_table = {} + self._pool_class = {'compute': ComputePool, + 'network': NetworkPool} + + def _get_cloud_pool_table(self, cloud_account_name): + if cloud_account_name not in self._cloud_pool_table: + msg = "Cloud account %s not found" % cloud_account_name + self._log.error(msg) + raise ResMgrCloudAccountNotFound(msg) + + return self._cloud_pool_table[cloud_account_name] + + def _get_cloud_cal_plugin(self, cloud_account_name): + if cloud_account_name not in self._cloud_cals: + msg = "Cloud account %s not found" % cloud_account_name + self._log.error(msg) + raise ResMgrCloudAccountNotFound(msg) + + return self._cloud_cals[cloud_account_name] + + def _add_default_cloud_pools(self, cloud_account_name): + self._log.debug("Adding default compute and network pools for cloud account %s", + cloud_account_name) + default_pools = [ + { + 'name': '____default_compute_pool', + 'resource_type': 'compute', + 'pool_type': 'dynamic', + 'max_size': 128, + }, + { + 'name': '____default_network_pool', + 'resource_type': 'network', + 'pool_type': 'dynamic', + 'max_size': 128, + }, + ] + + for pool_dict in default_pools: + pool_info = ResourcePoolInfo.from_dict(pool_dict) + self._log.info("Applying configuration for cloud account %s pool: %s", + cloud_account_name, pool_info.name) + + self.add_resource_pool(cloud_account_name, pool_info) + self.unlock_resource_pool(cloud_account_name, pool_info.name) + + def get_cloud_account_names(self): + """ Returns a list of configured cloud account names """ + return self._cloud_cals.keys() + + def add_cloud_account(self, account): + self._log.debug("Received CAL account. Account Name: %s, Account Type: %s", + account.name, account.account_type) + + ### Add cal handler to all the pools + if account.name in self._cloud_cals: + raise ResMgrCloudAccountExists("Cloud account already exists in res mgr: %s", + account.name) + + self._cloud_pool_table[account.name] = {} + + cal = ResourceMgrCALHandler(self._log, self._log_hdl, account) + self._cloud_cals[account.name] = cal + + self._add_default_cloud_pools(account.name) + + def update_cloud_account(self, account): + raise NotImplementedError("Update cloud account not implemented") + + def delete_cloud_account(self, account_name, dry_run=False): + cloud_pool_table = self._get_cloud_pool_table(account_name) + for pool in cloud_pool_table.values(): + if pool.in_use(): + raise ResMgrCloudAccountInUse("Cannot delete cloud which is currently in use") + + # If dry_run is specified, do not actually delete the cloud account + if dry_run: + return + + for pool in list(cloud_pool_table): + self.delete_resource_pool(account_name, pool) + + del self._cloud_pool_table[account_name] + del self._cloud_cals[account_name] + + def add_resource_pool(self, cloud_account_name, pool_info): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_info.name in cloud_pool_table: + raise ResMgrDuplicatePool("Pool with name: %s already exists", pool_info.name) + + cloud_cal = self._get_cloud_cal_plugin(cloud_account_name) + pool = self._pool_class[pool_info.resource_type](self._log, self._loop, pool_info, cloud_cal) + + cloud_pool_table[pool_info.name] = pool + + def delete_resource_pool(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name not in cloud_pool_table: + self._log.error("Pool: %s not found for deletion", pool_name) + return + pool = cloud_pool_table[pool_name] + + if pool.in_use(): + # Can't delete a pool in use + self._log.error("Pool: %s in use. Can not delete in-use pool", pool.name) + return + + pool.cleanup() + del cloud_pool_table[pool_name] + self._log.info("Resource Pool: %s successfully deleted", pool_name) + + def modify_resource_pool(self, cloud_account_name, pool): + pass + + def lock_resource_pool(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name not in cloud_pool_table: + self._log.info("Pool: %s is not available for lock operation") + return + + pool = cloud_pool_table[pool_name] + pool.lock_pool() + + def unlock_resource_pool(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name not in cloud_pool_table: + self._log.info("Pool: %s is not available for unlock operation") + return + + pool = cloud_pool_table[pool_name] + pool.unlock_pool() + + def get_resource_pool_info(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name in cloud_pool_table: + pool = cloud_pool_table[pool_name] + return pool.get_pool_info() + else: + return None + + def get_resource_pool_list(self, cloud_account_name): + return [v for _, v in self._get_cloud_pool_table(cloud_account_name).items()] + + def _select_resource_pools(self, cloud_account_name, resource_type): + pools = [pool for pool in self.get_resource_pool_list(cloud_account_name) if pool.resource_type == resource_type and pool.status == 'unlocked'] + if not pools: + raise ResMgrPoolNotAvailable("No %s pool found for resource allocation", resource_type) + + return pools[0] + + @asyncio.coroutine + def allocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type): + ### Check if event_id is unique or already in use + if event_id in self._resource_table: + r_id, cloud_account_name, pool_name = self._resource_table[event_id] + self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s", + event_id, pool_name) + # If resource-type matches then return the same resource + cloud_pool_table = self._get_cloud_pool_table(request.cloud_account) + pool = cloud_pool_table[pool_name] + if pool.resource_type == resource_type: + info = pool.get_resource_info(r_id) + return info + else: + self._log.error("Event-id conflict. Duplicate event-id: %s", event_id) + raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name)) + + ### All-OK, lets go ahead with resource allocation + pool = self._select_resource_pools(cloud_account_name, resource_type) + self._log.info("Selected pool %s for resource allocation", pool.name) + + r_id, r_info = yield from pool.allocate_resource(request) + + self._resource_table[event_id] = (r_id, cloud_account_name, pool.name) + return r_info + + @asyncio.coroutine + def reallocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type, resource): + ### Check if event_id is unique or already in use + if event_id in self._resource_table: + r_id, cloud_account_name, pool_name = self._resource_table[event_id] + self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s", + event_id, pool_name) + # If resource-type matches then return the same resource + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[pool_name] + if pool.resource_type == resource_type: + info = pool.get_resource_info(r_id) + return info + else: + self._log.error("Event-id conflict. Duplicate event-id: %s", event_id) + raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name)) + + r_info = None + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[resource.pool_name] + if pool.resource_type == resource_type: + if resource_type == 'network': + r_id = resource.virtual_link_id + r_info = pool.get_info_by_id(resource.virtual_link_id) + elif resource_type == 'compute': + r_id = resource.vdu_id + r_info = pool.get_info_by_id(resource.vdu_id) + + if r_info is None: + r_id, r_info = yield from pool.allocate_resource(request) + self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name) + return r_info + + self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name) + new_resource = pool._resource_class(r_id, 'dynamic') + if resource_type == 'compute': + requested_params = RwcalYang.VDUInitParams() + requested_params.from_dict(request.as_dict()) + new_resource.requested_params = requested_params + pool._all_resources[r_id] = new_resource + pool._allocated_resources[r_id] = new_resource + return r_info + + @asyncio.coroutine + def release_virtual_resource(self, event_id, resource_type): + ### Check if event_id exists + if event_id not in self._resource_table: + self._log.error("Received resource-release-request with unknown Event-id :%s", event_id) + raise ResMgrUnknownEventId("Received resource-release-request with unknown Event-id :%s" %(event_id)) + + ## All-OK, lets proceed with resource release + r_id, cloud_account_name, pool_name = self._resource_table.pop(event_id) + self._log.debug("Attempting to release virtual resource id %s from pool %s", + r_id, pool_name) + + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[pool_name] + yield from pool.release_resource(r_id) + + @asyncio.coroutine + def read_virtual_resource(self, event_id, resource_type): + ### Check if event_id exists + if event_id not in self._resource_table: + self._log.error("Received resource-read-request with unknown Event-id :%s", event_id) + raise ResMgrUnknownEventId("Received resource-read-request with unknown Event-id :%s" %(event_id)) + + ## All-OK, lets proceed + r_id, cloud_account_name, pool_name = self._resource_table[event_id] + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[pool_name] + info = yield from pool.read_resource_info(r_id) + return info \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py new file mode 100644 index 0000000..cdab39b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py @@ -0,0 +1,270 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import sys + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +from gi.repository.RwTypes import RwStatus +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class ResourceMgrEvent(object): + VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data" + VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data" + + def __init__(self, dts, log, loop, parent): + self._log = log + self._dts = dts + self._loop = loop + self._parent = parent + self._vdu_reg = None + self._link_reg = None + + self._vdu_reg_event = asyncio.Event(loop=self._loop) + self._link_reg_event = asyncio.Event(loop=self._loop) + + @asyncio.coroutine + def wait_ready(self, timeout=5): + self._log.debug("Waiting for all request registrations to become ready.") + yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()], + timeout=timeout, loop=self._loop) + + def create_record_dts(self, regh, xact, path, msg): + """ + Create a record in DTS with path and message + """ + self._log.debug("Creating Resource Record xact = %s, %s:%s", + xact, path, msg) + regh.create_element(path, msg) + + def delete_record_dts(self, regh, xact, path): + """ + Delete a VNFR record in DTS with path and message + """ + self._log.debug("Deleting Resource Record xact = %s, %s", + xact, path) + regh.delete_element(path) + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def onlink_event(dts, g_reg, xact, xact_event, scratch_data): + @asyncio.coroutine + def instantiate_realloc_vn(link): + """Re-populate the virtual link information after restart + + Arguments: + vlink + + """ + # wait for 3 seconds + yield from asyncio.sleep(3, loop=self._loop) + + response_info = yield from self._parent.reallocate_virtual_network(link.event_id, + link.cloud_account, + link.request_info, link.resource_info, + ) + if (xact_event == rwdts.MemberEvent.INSTALL): + link_cfg = self._link_reg.elements + for link in link_cfg: + self._loop.create_task(instantiate_realloc_vn(link)) + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def onvdu_event(dts, g_reg, xact, xact_event, scratch_data): + @asyncio.coroutine + def instantiate_realloc_vdu(vdu): + """Re-populate the VDU information after restart + + Arguments: + vdu + + """ + # wait for 3 seconds + yield from asyncio.sleep(3, loop=self._loop) + + response_info = yield from self._parent.reallocate_virtual_compute(vdu.event_id, + vdu.cloud_account, + vdu.request_info, vdu.resource_info, + ) + if (xact_event == rwdts.MemberEvent.INSTALL): + vdu_cfg = self._vdu_reg.elements + for vdu in vdu_cfg: + self._loop.create_task(instantiate_realloc_vdu(vdu)) + return rwdts.MemberRspCode.ACTION_OK + + def on_link_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Received link request commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_link_request_prepare(xact_info, action, ks_path, request_msg): + self._log.debug("Received virtual-link on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VirtualLinkEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = yield from self._parent.allocate_virtual_network(pathentry.key00.event_id, + request_msg.cloud_account, + request_msg.request_info) + request_msg.resource_info = response_info + self.create_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()), request_msg) + elif action == rwdts.QueryAction.DELETE: + yield from self._parent.release_virtual_network(pathentry.key00.event_id) + self.delete_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema())) + elif action == rwdts.QueryAction.READ: + response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id) + else: + raise ValueError("Only read/create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + + def on_vdu_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Received vdu request commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + def monitor_vdu_state(response_xpath, pathentry): + self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath) + loop_cnt = 120 + while loop_cnt > 0: + self._log.debug("VDU state monitoring: Sleeping for 1 second ") + yield from asyncio.sleep(1, loop = self._loop) + try: + response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id) + except Exception as e: + self._log.info("VDU state monitoring: Received exception %s in VDU state monitoring for %s. Aborting monitoring", + str(e),response_xpath) + return + if response_info.resource_state == 'active' or response_info.resource_state == 'failed': + self._log.info("VDU state monitoring: VDU reached terminal state. Publishing VDU info: %s at path: %s", + response_info, response_xpath) + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + else: + loop_cnt -= 1 + ### End of while loop. This is only possible if VDU did not reach active state + self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring", + response_xpath) + response_info = RwResourceMgrYang.VDUEventData_ResourceInfo() + response_info.resource_state = 'failed' + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + + @asyncio.coroutine + def on_vdu_request_prepare(xact_info, action, ks_path, request_msg): + self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VDUEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = yield from self._parent.allocate_virtual_compute(pathentry.key00.event_id, + request_msg.cloud_account, + request_msg.request_info, + ) + if response_info.resource_state == 'pending': + asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry), + loop = self._loop) + request_msg.resource_info = response_info + self.create_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()), request_msg) + elif action == rwdts.QueryAction.DELETE: + yield from self._parent.release_virtual_compute(pathentry.key00.event_id) + self.delete_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema())) + elif action == rwdts.QueryAction.READ: + response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id) + else: + raise ValueError("Only create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VDUInfo at xpath %s: %s", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + + + @asyncio.coroutine + def on_request_ready(registration, status): + self._log.debug("Got request ready event (registration: %s) (status: %s)", + registration, status) + + if registration == self._link_reg: + self._link_reg_event.set() + elif registration == self._vdu_reg: + self._vdu_reg_event.set() + else: + self._log.error("Unknown registration ready event: %s", registration) + + link_handlers = rift.tasklets.Group.Handler(on_event=onlink_event,) + with self._dts.group_create(handler=link_handlers) as link_group: + self._log.debug("Registering for Link Resource Request using xpath: %s", + ResourceMgrEvent.VLINK_REQUEST_XPATH) + + self._link_reg = link_group.register(xpath=ResourceMgrEvent.VLINK_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_commit=on_link_request_commit, + on_prepare=on_link_request_prepare), + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.FILE_DATASTORE,) + + vdu_handlers = rift.tasklets.Group.Handler(on_event=onvdu_event, ) + with self._dts.group_create(handler=vdu_handlers) as vdu_group: + + self._log.debug("Registering for VDU Resource Request using xpath: %s", + ResourceMgrEvent.VDU_REQUEST_XPATH) + + self._vdu_reg = vdu_group.register(xpath=ResourceMgrEvent.VDU_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_commit=on_vdu_request_commit, + on_prepare=on_vdu_request_prepare), + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.FILE_DATASTORE,) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py new file mode 100755 index 0000000..bb64ba6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py @@ -0,0 +1,234 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import sys + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +import rift.tasklets + +from . import rwresmgr_core as Core +from . import rwresmgr_config as Config +from . import rwresmgr_events as Event + + +class ResourceManager(object): + def __init__(self, log, log_hdl, loop, dts): + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._dts = dts + self.config_handler = Config.ResourceMgrConfig(self._dts, self._log, self._log_hdl, self._loop, self) + self.event_handler = Event.ResourceMgrEvent(self._dts, self._log, self._loop, self) + self.core = Core.ResourceMgrCore(self._dts, self._log, self._log_hdl, self._loop, self) + + @asyncio.coroutine + def register(self): + yield from self.config_handler.register() + yield from self.event_handler.register() + + def add_cloud_account_config(self, account): + self._log.debug("Received Cloud-Account add config event for account: %s", account.name) + self.core.add_cloud_account(account) + + def update_cloud_account_config(self, account): + self._log.debug("Received Cloud-Account update config event for account: %s", account.name) + self.core.update_cloud_account(account) + + def delete_cloud_account_config(self, account_name, dry_run=False): + self._log.debug("Received Cloud-Account delete event for account (dry_run: %s): %s", + dry_run, account_name) + self.core.delete_cloud_account(account_name, dry_run) + + def get_cloud_account_names(self): + cloud_account_names = self.core.get_cloud_account_names() + return cloud_account_names + + def pool_add(self, cloud_account_name, pool): + self._log.debug("Received Pool add event for cloud account %s pool: %s", + cloud_account_name, pool.name) + self.core.add_resource_pool(cloud_account_name, pool) + + def pool_modify(self, cloud_account_name, pool): + self._log.debug("Received Pool modify event for cloud account %s pool: %s", + cloud_account_name, pool.name) + self.core.modify_resource_pool(cloud_account_name, pool) + + def pool_delete(self, cloud_account_name, pool_name): + self._log.debug("Received Pool delete event for cloud account %s pool: %s", + cloud_account_name, pool_name) + self.core.delete_resource_pool(cloud_account_name, pool_name) + + def get_pool_list(self, cloud_account_name): + return self.core.get_resource_pool_list(cloud_account_name) + + def get_pool_info(self, cloud_account_name, pool_name): + self._log.debug("Received get-pool-info event for cloud account %s pool: %s", + cloud_account_name, pool_name) + return self.core.get_resource_pool_info(cloud_account_name, pool_name) + + def lock_pool(self, cloud_account_name, pool_name): + self._log.debug("Received pool unlock event for pool: %s", + cloud_account_name, pool_name) + self.core.lock_resource_pool(cloud_account_name, pool_name) + + def unlock_pool(self, cloud_account_name, pool_name): + self._log.debug("Received pool unlock event for pool: %s", + cloud_account_name, pool_name) + self.core.unlock_resource_pool(cloud_account_name, pool_name) + + @asyncio.coroutine + def allocate_virtual_network(self, event_id, cloud_account_name, request): + self._log.info("Received network resource allocation request with event-id: %s", event_id) + resource = yield from self.core.allocate_virtual_resource(event_id, cloud_account_name, request, 'network') + return resource + + @asyncio.coroutine + def reallocate_virtual_network(self, event_id, cloud_account_name, request, resource): + self._log.info("Received network resource allocation request with event-id: %s", event_id) + resource = yield from self.core.reallocate_virtual_resource(event_id, cloud_account_name, request, 'network', resource) + return resource + + @asyncio.coroutine + def release_virtual_network(self, event_id): + self._log.info("Received network resource release request with event-id: %s", event_id) + yield from self.core.release_virtual_resource(event_id, 'network') + + @asyncio.coroutine + def read_virtual_network_info(self, event_id): + self._log.info("Received network resource read request with event-id: %s", event_id) + info = yield from self.core.read_virtual_resource(event_id, 'network') + return info + + @asyncio.coroutine + def allocate_virtual_compute(self, event_id, cloud_account_name, request): + self._log.info("Received compute resource allocation request " + "(cloud account: %s) with event-id: %s", + cloud_account_name, event_id) + resource = yield from self.core.allocate_virtual_resource( + event_id, cloud_account_name, request, 'compute', + ) + return resource + + @asyncio.coroutine + def reallocate_virtual_compute(self, event_id, cloud_account_name, request, resource): + self._log.info("Received compute resource allocation request " + "(cloud account: %s) with event-id: %s", + cloud_account_name, event_id) + resource = yield from self.core.reallocate_virtual_resource( + event_id, cloud_account_name, request, 'compute', resource, + ) + return resource + + @asyncio.coroutine + def release_virtual_compute(self, event_id): + self._log.info("Received compute resource release request with event-id: %s", event_id) + yield from self.core.release_virtual_resource(event_id, 'compute') + + @asyncio.coroutine + def read_virtual_compute_info(self, event_id): + self._log.info("Received compute resource read request with event-id: %s", event_id) + info = yield from self.core.read_virtual_resource(event_id, 'compute') + return info + + +class ResMgrTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(ResMgrTasklet, self).__init__(*args, **kwargs) + self._dts = None + self._resource_manager = None + + def start(self): + super(ResMgrTasklet, self).start() + self.log.info("Starting ResMgrTasklet") + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwResourceMgrYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in RESMGR stop:", sys.exc_info()[0]) + raise + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def init(self): + self._log.info("Initializing the Resource Manager tasklet") + self._resource_manager = ResourceManager(self.log, + self.log_hdl, + self.loop, + self._dts) + yield from self._resource_manager.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py new file mode 100755 index 0000000..506b433 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py @@ -0,0 +1,29 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwresmgrtasklet +class Tasklet(rift.tasklets.rwresmgrtasklet.ResMgrTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py new file mode 100755 index 0000000..9c494fa --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py @@ -0,0 +1,784 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import logging +import os +import sys +import types +import unittest +import uuid +import random + +import xmlrunner + +import gi +gi.require_version('CF', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwMain', '1.0') +gi.require_version('RwManifestYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') + +import gi.repository.CF as cf +import gi.repository.RwDts as rwdts +import gi.repository.RwMain as rwmain +import gi.repository.RwManifestYang as rwmanifest +import gi.repository.RwResourceMgrYang as rmgryang +from gi.repository import RwcalYang +from gi.repository import RwCloudYang +from gi.repository.RwTypes import RwStatus + +import rw_peas +import rift.tasklets +import rift.test.dts + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +openstack_info = { + 'username' : 'pluto', + 'password' : 'mypasswd', + 'auth_url' : 'http://10.66.4.14:5000/v3/', + 'project_name' : 'demo', + 'mgmt_network' : 'private', + 'image_id' : '5cece2b1-1a49-42c5-8029-833c56574652', + 'vms' : ['res-test-1', 'res-test-2'], + 'networks' : ['testnet1', 'testnet2']} + + +def create_mock_resource_temaplate(): + ### Resource to be reuqested for 'mock' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0')) + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1')) + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 20 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + +def create_cloudsim_resource_template(): + ### Resource to be reuqested for 'cloudsim' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = "1" + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = "1" + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 20 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-1'] = msg + + return resource_requests + +def create_mock_resource_temaplate(): + ### Resource to be reuqested for 'mock' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0')) + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1')) + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 20 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + +def create_openstack_static_template(): + ### Resource to be reuqested for 'openstack_static' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 80 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 4096 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + msg.provider_network.physical_network = 'PHYSNET1' + msg.provider_network.overlay_type = 'VLAN' + msg.provider_network.segmentation_id = 17 + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + msg.provider_network.physical_network = 'PHYSNET1' + msg.provider_network.overlay_type = 'VLAN' + msg.provider_network.segmentation_id = 18 + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + +def create_openstack_dynamic_template(): + ### Resource to be reuqested for 'openstack_dynamic' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 4096 + msg.vm_flavor.storage_gb = 40 + msg.guest_epa.mempage_size = 'LARGE' + msg.guest_epa.cpu_pinning_policy = 'DEDICATED' + msg.allocate_public_address = True + + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + msg.guest_epa.mempage_size = 'LARGE' + msg.guest_epa.cpu_pinning_policy = 'DEDICATED' + msg.allocate_public_address = True + + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + #msg.provider_network.overlay_type = 'VXLAN' + #msg.provider_network.segmentation_id = 71 + + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + #msg.provider_network.overlay_type = 'VXLAN' + #msg.provider_network.segmentation_id = 73 + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + + + +resource_requests = { + 'mock' : create_mock_resource_temaplate(), + 'openstack_static': create_openstack_static_template(), + 'openstack_dynamic': create_openstack_dynamic_template(), + 'cloudsim': create_cloudsim_resource_template(), +} + + +def get_cal_account(account_type): + """ + Creates an object for class RwcalYang.CloudAccount() + """ + account = RwcalYang.CloudAccount() + if account_type == 'mock': + account.name = 'mock_account' + account.account_type = "mock" + account.mock.username = "mock_user" + elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')): + account.name = 'openstack_cal' + account.account_type = 'openstack' + account.openstack.key = openstack_info['username'] + account.openstack.secret = openstack_info['password'] + account.openstack.auth_url = openstack_info['auth_url'] + account.openstack.tenant = openstack_info['project_name'] + account.openstack.mgmt_network = openstack_info['mgmt_network'] + + elif account_type == 'cloudsim': + account.name = 'cloudsim' + account.account_type = "cloudsim_proxy" + + return account + +def create_cal_plugin(account, log_hdl): + plugin_name = getattr(account, account.account_type).plugin_name + plugin = rw_peas.PeasPlugin(plugin_name, 'RwCal-1.0') + engine, info, extension = plugin() + rwcal = plugin.get_interface("Cloud") + try: + rc = rwcal.init(log_hdl) + assert rc == RwStatus.SUCCESS + except Exception as e: + raise + return rwcal + + +class RMMgrTestCase(rift.test.dts.AbstractDTSTest): + rwcal = None + rwcal_acct_info = None + + @classmethod + def configure_suite(cls, rwmain): + rm_dir = os.environ.get('RM_DIR') + cnt_mgr_dir = os.environ.get('CNTR_MGR_DIR') + cal_proxy_dir = os.environ.get('CAL_PROXY_DIR') + + cls.rwmain.add_tasklet(cal_proxy_dir, 'rwcalproxytasklet') + cls.rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet') + cls.rwmain.add_tasklet(cnt_mgr_dir, 'rwcntmgrtasklet') + + @classmethod + def configure_schema(cls): + return rmgryang.get_schema() + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(1, loop=self.loop) + + @classmethod + def configure_timeout(cls): + return 360 + + def get_cloud_account_msg(self, acct_type): + cloud_account = RwCloudYang.CloudAccount() + acct = get_cal_account(acct_type) + cloud_account.from_dict(acct.as_dict()) + cloud_account.name = acct.name + return cloud_account + + def get_compute_pool_msg(self, name, pool_type, cloud_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "compute" + if pool_type == "static": + pool.pool_type = 'static' + acct = get_cal_account(cloud_type) + rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx()) + rc, rsp = rwcal.get_vdu_list(acct) + assert rc == RwStatus.SUCCESS + + if cloud_type == 'openstack_static': + for vdu in rsp.vdu_info_list: + if vdu.name in openstack_info['vms']: + self.log.info("Adding the static compute resource: %s to compute pool", vdu.name) + r = pool.resources.add() + r.resource_id = vdu.vdu_id + else: + # 'mock', 'cloudsim' 'openstack_dynamic' etc + for vdu in rsp.vdu_info_list: + self.log.info("Adding the static compute resource: %s to compute pool", vdu.name) + r = pool.resources.add() + r.resource_id = vdu.vdu_id + else: + pool.pool_type = 'dynamic' + pool.max_size = 10 + return pool_config + + def get_network_pool_msg(self, name, pool_type, cloud_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "network" + if pool_type == "static": + pool.pool_type = 'static' + acct = get_cal_account(cloud_type) + rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx()) + rc, rsp = rwcal.get_virtual_link_list(acct) + assert rc == RwStatus.SUCCESS + if cloud_type == 'openstack_static': + for vlink in rsp.virtual_link_info_list: + if vlink.name in openstack_info['networks']: + self.log.info("Adding the static network resource: %s to network pool", vlink.name) + r = pool.resources.add() + r.resource_id = vlink.virtual_link_id + else: + # 'mock', 'cloudsim', 'openstack_dynamic' etc + for vlink in rsp.virtual_link_info_list: + self.log.info("Adding the static network resource: %s to network pool", vlink.name) + r = pool.resources.add() + r.resource_id = vlink.virtual_link_id + else: + pool.pool_type = 'dynamic' + pool.max_size = 4 + return pool_config + + + def get_network_reserve_msg(self, name, cloud_type, xpath): + event_id = str(uuid.uuid4()) + msg = rmgryang.VirtualLinkEventData() + msg.event_id = event_id + msg.request_info.name = name + attributes = ['physical_network', 'name', 'overlay_type', 'segmentation_id'] + + for attr in attributes: + if resource_requests[cloud_type]['network'][name].has_field('provider_network'): + if resource_requests[cloud_type]['network'][name].provider_network.has_field(attr): + setattr(msg.request_info.provider_network, attr, + getattr(resource_requests[cloud_type]['network'][name].provider_network ,attr)) + + return msg, xpath.format(event_id) + + def get_compute_reserve_msg(self, name, cloud_type, xpath, vlinks): + event_id = str(uuid.uuid4()) + msg = rmgryang.VDUEventData() + msg.event_id = event_id + msg.request_info.name = name + msg.request_info.image_id = resource_requests[cloud_type]['compute'][name].image_id + attributes = ['image_id', 'vcpu_count', 'memory_mb', 'storage_gb'] + + if resource_requests[cloud_type]['compute'][name].has_field('vm_flavor'): + for attr in attributes: + if resource_requests[cloud_type]['compute'][name].vm_flavor.has_field(attr): + setattr(msg.request_info.vm_flavor, + attr, + getattr(resource_requests[cloud_type]['compute'][name].vm_flavor , attr)) + + attributes = ['mempage_size', 'cpu_pinning_policy'] + + if resource_requests[cloud_type]['compute'][name].has_field('guest_epa'): + for attr in attributes: + if resource_requests[cloud_type]['compute'][name].guest_epa.has_field(attr): + setattr(msg.request_info.guest_epa, + attr, + getattr(resource_requests[cloud_type]['compute'][name].guest_epa , attr)) + + if resource_requests[cloud_type]['compute'][name].has_field('allocate_public_address'): + msg.request_info.allocate_public_address = resource_requests[cloud_type]['compute'][name].allocate_public_address + + cnt = 0 + for link in vlinks: + c1 = msg.request_info.connection_points.add() + c1.name = name+"-port-"+str(cnt) + cnt += 1 + c1.virtual_link_id = link + + self.log.info("Sending message :%s", msg) + return msg, xpath.format(event_id) + + @asyncio.coroutine + def configure_cloud_account(self, dts, acct_type): + account_xpath = "C,/rw-cloud:cloud/account" + msg = self.get_cloud_account_msg(acct_type) + self.log.info("Configuring cloud-account: %s",msg) + yield from dts.query_create(account_xpath, + rwdts.Flag.ADVISE, + msg) + + @asyncio.coroutine + def configure_compute_resource_pools(self, dts, resource_type, cloud_type): + pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools" + msg = self.get_compute_pool_msg("virtual-compute", resource_type, cloud_type) + self.log.info("Configuring compute-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + + + @asyncio.coroutine + def configure_network_resource_pools(self, dts, resource_type, cloud_type): + pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools" + msg = self.get_network_pool_msg("virtual-network", resource_type, cloud_type) + self.log.info("Configuring network-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + + @asyncio.coroutine + def verify_resource_pools_config(self, dts): + pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records" + self.log.debug("Verifying test_create_resource_pools results") + res_iter = yield from dts.query_read(pool_records_xpath,) + for result in res_iter: + response = yield from result + records = response.result.records + #self.assertEqual(len(records), 2) + #names = [i.name for i in records] + #self.assertTrue('virtual-compute' in names) + #self.assertTrue('virtual-network' in names) + for record in records: + self.log.debug("Received Pool Record, Name: %s, Resource Type: %s, Pool Status: %s, Pool Size: %d, Allocated Resources: %d, Free Resources: %d", + record.name, + record.resource_type, + record.pool_status, + record.total_resources, + record.allocated_resources, + record.free_resources) + + @asyncio.coroutine + def read_resource(self, dts, xpath): + self.log.debug("Reading data for XPATH:%s", xpath) + result = yield from dts.query_read(xpath, rwdts.Flag.MERGE) + msg = None + for r in result: + msg = yield from r + self.log.debug("Received data: %s", msg.result) + return msg.result + + @asyncio.coroutine + def reserve_network_resources(self, name, dts, cloud_type): + network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']" + msg,xpath = self.get_network_reserve_msg(name, cloud_type, network_xpath) + self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, 0, msg) + return xpath + + + @asyncio.coroutine + def reserve_compute_resources(self, name, dts, cloud_type, vlinks = []): + compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']" + msg,xpath = self.get_compute_reserve_msg(name, cloud_type, compute_xpath, vlinks) + self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, 0, msg) + return xpath + + @asyncio.coroutine + def release_network_resources(self, dts, xpath): + self.log.debug("Initiating network resource release for : %s ", xpath) + yield from dts.query_delete(xpath, 0) + + @asyncio.coroutine + def release_compute_resources(self, dts, xpath): + self.log.debug("Initiating compute resource release for : %s ", xpath) + yield from dts.query_delete(xpath, 0) + + @unittest.skip("Skipping test_static_pool_resource_allocation") + def test_static_pool_resource_allocation(self): + self.log.debug("STARTING - test_static_pool_resource_allocation") + tinfo = self.new_tinfo('static_mock') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'mock' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + + yield from self.configure_network_resource_pools(dts, "static", cloud_type) + yield from self.configure_compute_resource_pools(dts, "static", cloud_type) + yield from self.verify_resource_pools_config(dts) + + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + + for r in computes: + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_static_pool_resource_allocation") + + @unittest.skip("Skipping test_dynamic_pool_resource_allocation") + def test_dynamic_pool_resource_allocation(self): + self.log.debug("STARTING - test_dynamic_pool_resource_allocation") + tinfo = self.new_tinfo('dynamic_mock') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'mock' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type) + yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type) + yield from self.verify_resource_pools_config(dts) + + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_dynamic_pool_resource_allocation") + + @unittest.skip("Skipping test_dynamic_pool_resource_allocation") + def test_dynamic_cloudsim_pool_resource_allocation(self): + self.log.debug("STARTING - test_dynamic_pool_resource_allocation") + tinfo = self.new_tinfo('dynamic_mock') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'cloudsim' + + yield from asyncio.sleep(120, loop=self.loop) + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type) + yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type) + yield from self.verify_resource_pools_config(dts) + + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_dynamic_pool_resource_allocation") + + @unittest.skip("Skipping test_static_pool_openstack_resource_allocation") + def test_static_pool_openstack_resource_allocation(self): + self.log.debug("STARTING - test_static_pool_openstack_resource_allocation") + tinfo = self.new_tinfo('static_openstack') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'openstack_static' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "static", cloud_type) + yield from self.configure_compute_resource_pools(dts, "static", cloud_type) + yield from self.verify_resource_pools_config(dts) + + self.log.debug("Creating virtual-network-resources in openstack") + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + self.log.debug("virtual-network-resources successfully created in openstack") + + self.log.debug("Creating virtual-network-compute in openstack") + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + self.log.debug("Openstack static resource allocation completed") + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_static_pool_openstack_resource_allocation") + + #@unittest.skip("Skipping test_dynamic_pool_openstack_resource_allocation") + def test_dynamic_pool_openstack_resource_allocation(self): + self.log.debug("STARTING - test_dynamic_pool_openstack_resource_allocation") + tinfo = self.new_tinfo('dynamic_openstack') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'openstack_dynamic' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type) + yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type) + yield from self.verify_resource_pools_config(dts) + + self.log.debug("Creating virtual-network-resources in openstack") + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + self.log.debug("virtual-network-resources successfully created in openstack") + + self.log.debug("Creating virtual-network-compute in openstack") + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + #yield from self.release_compute_resources(dts, r[0]) + + self.log.debug("Releasing network resource with id: %s", r[1].resource_info.vdu_id) + #yield from self.release_network_resources(dts,networks[0][0]) + #yield from self.verify_resource_pools_config(dts) + self.log.debug("Openstack dynamic resource allocation completed") + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_dynamic_pool_openstack_resource_allocation") + + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + mc_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build') + launchpad_build_dir = os.path.join(mc_build_dir, 'rwlaunchpad') + cntr_mgr_build_dir = os.path.join(mc_build_dir, 'rwcntmgr') + + if 'MESSAGE_BROKER_DIR' not in os.environ: + os.environ['MESSAGE_BROKER_DIR'] = os.path.join(build_dir, 'rwmsg/plugins/rwmsgbroker-c') + + if 'ROUTER_DIR' not in os.environ: + os.environ['ROUTER_DIR'] = os.path.join(build_dir, 'rwdts/plugins/rwdtsrouter-c') + + if 'RM_DIR' not in os.environ: + os.environ['RM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwresmgrtasklet') + + if 'CAL_PROXY_DIR' not in os.environ: + os.environ['CAL_PROXY_DIR'] = os.path.join(build_dir, 'plugins/rwcalproxytasklet') + + if 'CNTR_MGR_DIR' not in os.environ: + os.environ['CNTR_MGR_DIR'] = os.path.join(cntr_mgr_build_dir, 'plugins/rwcntmgrtasklet') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt new file mode 100644 index 0000000..97aa0ca --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwvnfmtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/mon_params.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py new file mode 100644 index 0000000..4bde5b3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwvnfmtasklet import VnfmTasklet +from . import mon_params diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/mon_params.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/mon_params.py new file mode 100644 index 0000000..a6134d2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/mon_params.py @@ -0,0 +1,678 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import types + +import requests +import requests.auth +import tornado.escape + +from requests.packages.urllib3.exceptions import InsecureRequestWarning + +import gi +gi.require_version('RwDts', '1.0') +import rift.tasklets +from gi.repository import ( + RwDts as rwdts, + VnfrYang + ) + +class MonitoringParamError(Exception): + """Monitoring Parameter error""" + pass + + +class JsonPathValueQuerier(object): + def __init__(self, log, json_path): + self._log = log + self._json_path = json_path + self._json_path_expr = None + + try: + import jsonpath_rw + self._json_path_expr = jsonpath_rw.parse(self._json_path) + except Exception as e: + self._log.error("Could not create json_path parser: %s", str(e)) + + def query(self, json_msg): + try: + json_dict = tornado.escape.json_decode(json_msg) + except ValueError as e: + msg = "Failed to convert response into json" + self._log.warning(msg) + raise MonitoringParamError(e) + + if self._json_path_expr is None: + raise MonitoringParamError( + "Parser not created. Unable to extract value from %s" % json_msg + ) + + try: + matches = self._json_path_expr.find(json_dict) + values = [m.value for m in matches] + except Exception as e: + raise MonitoringParamError( + "Failed to run find using json_path (%s) against json_msg: %s" % + (self._json_path, str(e)) + ) + + if len(values) == 0: + raise MonitoringParamError( + "No values found from json_path (%s)" % self._json_path + ) + + if len(values) > 1: + self._log.debug("Got multiple values from json_path (%s). Only returning the first.", + self._json_path) + + return values[0] + + +class ObjectPathValueQuerier(object): + def __init__(self, log, object_path): + self._log = log + self._object_path = object_path + self._object_path_expr = None + + def query(self, object_msg): + try: + object_dict = tornado.escape.json_decode(object_msg) + except ValueError as e: + msg = "Failed to convert response into object" + self._log.warning(msg) + raise MonitoringParamError(e) + + import objectpath + try: + tree = objectpath.Tree(object_dict) + except Exception as e: + msg = "Could not create objectpath tree: %s", str(e) + self._log.error(msg) + raise MonitoringParamError(msg) + + try: + value = tree.execute(self._object_path) + except Exception as e: + raise MonitoringParamError( + "Failed to run execute object_path (%s) against object_msg: %s" % + (self._object_path, str(e)) + ) + + if isinstance(value, types.GeneratorType): + try: + value = next(value) + except Exception as e: + raise MonitoringParamError( + "Failed to get value from objectpath %s execute generator: %s" % + (self._object_path, str(e)) + ) + + if isinstance(value, (list, tuple)): + if len(value) == 0: + raise MonitoringParamError( + "No values found from object_path (%s)" % self._object_path + ) + + elif len(value) > 1: + self._log.debug( + "Got multiple values from object_path (%s). " + "Only returning the first.", self._object_path + ) + + # Only take the first element + value = value[0] + + return value + + +class JsonKeyValueQuerier(object): + def __init__(self, log, key): + self._log = log + self._key = key + + def query(self, json_msg): + try: + json_dict = tornado.escape.json_decode(json_msg) + except ValueError as e: + msg = "Failed to convert response into json" + self._log.warning(msg) + raise MonitoringParamError(e) + + if self._key not in json_dict: + msg = "Did not find '{}' key in response: {}".format( + self._key, json_dict + ) + self._log.warning(msg) + raise MonitoringParamError(msg) + + value = json_dict[self._key] + + return value + + +class ValueConverter(object): + def __init__(self, value_type): + self._value_type = value_type + + def _convert_int(self, value): + if isinstance(value, int): + return value + + try: + return int(value) + except (ValueError, TypeError) as e: + raise MonitoringParamError( + "Could not convert value into integer: %s", str(e) + ) + + def _convert_text(self, value): + if isinstance(value, str): + return value + + try: + return str(value) + except (ValueError, TypeError) as e: + raise MonitoringParamError( + "Could not convert value into string: %s", str(e) + ) + + def _convert_decimal(self, value): + if isinstance(value, float): + return value + + try: + return float(value) + except (ValueError, TypeError) as e: + raise MonitoringParamError( + "Could not convert value into string: %s", str(e) + ) + + def convert(self, value): + if self._value_type == "INT": + return self._convert_int(value) + elif self._value_type == "DECIMAL": + return self._convert_decimal(value) + elif self._value_type == "STRING": + return self._convert_text(value) + else: + raise MonitoringParamError("Unknown value type: %s", self._value_type) + + +class HTTPBasicAuth(object): + def __init__(self, username, password): + self.username = username + self.password = password + + +class HTTPEndpoint(object): + def __init__(self, log, loop, ip_address, ep_msg): + self._log = log + self._loop = loop + self._ip_address = ip_address + self._ep_msg = ep_msg + + # This is to suppress HTTPS related warning as we do not support + # certificate verification yet + requests.packages.urllib3.disable_warnings(InsecureRequestWarning) + self._session = requests.Session() + self._auth = None + self._headers = None + + @property + def poll_interval(self): + return self._ep_msg.polling_interval_secs + + @property + def ip_address(self): + return self._ip_address + + @property + def port(self): + return self._ep_msg.port + + @property + def protocol(self): + if self._ep_msg.has_field("https"): + if self._ep_msg.https is True: + return "https" + + return "http" + + @property + def path(self): + return self._ep_msg.path + + @property + def method(self): + if self._ep_msg.has_field("method"): + return self._ep_msg.method + return "GET" + + @property + def username(self): + if self._ep_msg.has_field("username"): + return self._ep_msg.username + + return None + + @property + def headers(self): + if self._headers is None: + headers = {} + for header in self._ep_msg.headers: + if header.has_field("key") and header.has_field("value"): + headers[header.key] = header.value + + self._headers = headers + + return self._headers + + @property + def password(self): + if self._ep_msg.has_field("password"): + return self._ep_msg.password + + return None + + @property + def auth(self): + if self._auth is None: + if self.username is not None and self.password is not None: + self._auth = requests.auth.HTTPBasicAuth( + self.username, + self.password, + ) + + return self._auth + + @property + def url(self): + url = "{protocol}://{ip_address}:{port}/{path}".format( + protocol=self.protocol, + ip_address=self.ip_address, + port=self.port, + path=self.path.lstrip("/"), + ) + + return url + + def _poll(self): + try: + resp = self._session.request( + self.method, self.url, timeout=10, auth=self.auth, + headers=self.headers, verify=False + ) + resp.raise_for_status() + except requests.exceptions.RequestException as e: + msg = "Got HTTP error when request monitoring method {} from url {}: {}".format( + self.method, + self.url, + str(e), + ) + self._log.warning(msg) + raise MonitoringParamError(msg) + + return resp.text + + @asyncio.coroutine + def poll(self): + try: + resp = yield from self._loop.run_in_executor( + None, + self._poll, + ) + + except Exception as e: + msg = "Caught exception when polling http endpoint: %s" % str(e) + self._log.warning(msg) + raise MonitoringParamError(msg) + + self._log.debug("Got response from http endpoint (%s): %s", + self.url, resp) + + return resp + + +class MonitoringParam(object): + def __init__(self, log, vnfr_mon_param_msg): + self._log = log + self._vnfr_mon_param_msg = vnfr_mon_param_msg + + self._current_value = None + + self._json_querier = self._create_json_querier() + self._value_converter = ValueConverter(self.value_type) + + def _create_json_querier(self): + if self.msg.json_query_method == "NAMEKEY": + return JsonKeyValueQuerier(self._log, self.msg.name) + elif self.msg.json_query_method == "JSONPATH": + if not self.msg.json_query_params.has_field("json_path"): + msg = "JSONPATH query_method requires json_query_params.json_path to be filled in %s" + self._log.error(msg, self.msg) + raise ValueError(msg) + return JsonPathValueQuerier(self._log, self.msg.json_query_params.json_path) + elif self.msg.json_query_method == "OBJECTPATH": + if not self.msg.json_query_params.has_field("object_path"): + msg = "OBJECTPATH query_method requires json_query_params.object_path to be filled in %s" + self._log.error(msg, self.msg) + raise ValueError(msg) + return ObjectPathValueQuerier(self._log, self.msg.json_query_params.object_path) + else: + msg = "Unknown JSON query method: %s" % self.json_query_method + self._log.error(msg) + raise ValueError(msg) + + @property + def current_value(self): + return self._current_value + + @property + def msg(self): + msg = self._vnfr_mon_param_msg + value_type = msg.value_type + + if self._current_value is None: + return msg + + if value_type == "INT": + msg.value_integer = self._current_value + + elif value_type == "DECIMAL": + msg.value_decimal = self._current_value + + elif value_type == "STRING": + msg.value_string = self._current_value + + else: + self._log.debug("Unknown value_type: %s", value_type) + + return msg + + @property + def path(self): + return self.msg.http_endpoint_ref + + @property + def value_type(self): + return self.msg.value_type + + @property + def json_query_method(self): + return self.msg.json_query_method + + @property + def json_path(self): + return self.msg.json_path_params.json_path + + @property + def name(self): + return self.msg.name + + def extract_value_from_response(self, response_msg): + if self._json_querier is None: + self._log.warning("json querier is not created. Cannot extract value form response.") + return + + try: + value = self._json_querier.query(response_msg) + converted_value = self._value_converter.convert(value) + except MonitoringParamError as e: + self._log.warning("Failed to extract value from json response: %s", str(e)) + return + else: + self._current_value = converted_value + + +class EndpointMonParamsPoller(object): + REQUEST_TIMEOUT_SECS = 10 + + def __init__(self, log, loop, endpoint, mon_params): + self._log = log + self._loop = loop + self._endpoint = endpoint + self._mon_params = mon_params + + self._poll_task = None + + @property + def poll_interval(self): + return self._endpoint.poll_interval + + def _apply_response_to_mon_params(self, response_msg): + for mon_param in self._mon_params: + mon_param.extract_value_from_response(response_msg) + + @asyncio.coroutine + def _poll_loop(self): + self._log.debug("Starting http endpoint %s poll loop", self._endpoint.url) + while True: + try: + response = yield from self._endpoint.poll() + self._apply_response_to_mon_params(response) + + except Exception as e: + msg = "Caught exception when polling http endpoint: %s", str(e) + self._log.warning(msg) + + yield from asyncio.sleep(self.poll_interval, loop=self._loop) + + def start(self): + self._log.debug("Got start request for endpoint poller: %s", + self._endpoint.url) + if self._poll_task is not None: + return + + self._poll_task = self._loop.create_task(self._poll_loop()) + + def stop(self): + self._log.debug("Got stop request for endpoint poller: %s", + self._endpoint.url) + if self._poll_task is None: + return + + self._poll_task.cancel() + + self._poll_task = None + + +class VnfMonitoringParamsController(object): + def __init__(self, log, loop, vnfr_id, management_ip, + http_endpoint_msgs, monitoring_param_msgs): + self._log = log + self._loop = loop + self._vnfr_id = vnfr_id + self._management_ip = management_ip + self._http_endpoint_msgs = http_endpoint_msgs + self._monitoring_param_msgs = monitoring_param_msgs + + self._endpoints = self._create_endpoints() + self._mon_params = self._create_mon_params() + + self._endpoint_mon_param_map = self._create_endpoint_mon_param_map( + self._endpoints, self._mon_params + ) + self._endpoint_pollers = self._create_endpoint_pollers(self._endpoint_mon_param_map) + + def _create_endpoints(self): + path_endpoint_map = {} + for ep_msg in self._http_endpoint_msgs: + endpoint = HTTPEndpoint( + self._log, + self._loop, + self._management_ip, + ep_msg, + ) + path_endpoint_map[endpoint.path] = endpoint + + return path_endpoint_map + + def _create_mon_params(self): + mon_params = {} + for mp_msg in self._monitoring_param_msgs: + mon_params[mp_msg.id] = MonitoringParam( + self._log, + mp_msg, + ) + + return mon_params + + def _create_endpoint_mon_param_map(self, endpoints, mon_params): + ep_mp_map = collections.defaultdict(list) + for mp in mon_params.values(): + endpoint = endpoints[mp.path] + ep_mp_map[endpoint].append(mp) + + return ep_mp_map + + def _create_endpoint_pollers(self, ep_mp_map): + pollers = [] + for endpoint, mon_params in ep_mp_map.items(): + poller = EndpointMonParamsPoller( + self._log, + self._loop, + endpoint, + mon_params, + ) + + pollers.append(poller) + + return pollers + + @property + def msgs(self): + msgs = [] + for mp in self.mon_params: + msgs.append(mp.msg) + + return msgs + + @property + def mon_params(self): + return list(self._mon_params.values()) + + @property + def endpoints(self): + return list(self._endpoints.values()) + + def start(self): + """ Start monitoring """ + self._log.debug("Starting monitoring of VNF id: %s", self._vnfr_id) + for poller in self._endpoint_pollers: + poller.start() + + def stop(self): + """ Stop monitoring """ + self._log.debug("Stopping monitoring of VNF id: %s", self._vnfr_id) + for poller in self._endpoint_pollers: + poller.stop() + + +class VnfMonitorDtsHandler(object): + """ VNF monitoring class """ + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:monitoring-param" + + def __init__(self, dts, log, loop, vnfr): + self._dts = dts + self._log = log + self._loop = loop + self._vnfr = vnfr + self._group = None + self._regh = None + + mon_params = [] + for mon_param in self._vnfr.vnfd.msg.monitoring_param: + param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict( + mon_param.as_dict() + ) + mon_params.append(param) + + http_endpoints = [] + for endpoint in self._vnfr.vnfd.msg.http_endpoint: + endpoint = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict( + endpoint.as_dict() + ) + http_endpoints.append(endpoint) + + self._log.debug("Creating monitoring param controller") + self._log.debug(" - Endpoints: %s", http_endpoints) + self._log.debug(" - Monitoring Params: %s", mon_params) + + self._mon_param_controller = VnfMonitoringParamsController( + self._log, + self._loop, + self._vnfr.msg.id, + self._vnfr.msg.mgmt_interface.ip_address, + http_endpoints, + mon_params, + ) + + def start(self): + self._mon_param_controller.start() + + def stop(self): + self._mon_param_controller.stop() + + def xpath(self, id): + """ Monitoing params xpath """ + return (self._vnfr.xpath + + "/vnfr:monitoring-param[vnfr:id = '{}']".format(id)) + + @property + def msg(self): + """ The message with the monitoing params """ + return self._mon_param_controller.msgs + + def register(self): + """ Register with dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + if self._regh is None: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + if action == rwdts.QueryAction.READ: + for msg in self.msg: + xact_info.respond_xpath(rwdts.XactRspCode.MORE, + self.xpath(msg.id), + msg) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + with self._dts.group_create() as self._group: + self._regh = self._group.register(xpath=VnfMonitorDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER) + + def deregister(self): + """ de-register with dts """ + if self._regh is not None: + self._log.debug("Deregistering path %s, regh = %s", + VnfMonitorDtsHandler.XPATH, + self._regh) + self._regh.deregister() + self._regh=None + self._vnfr = None \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py new file mode 100755 index 0000000..d86a7b3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py @@ -0,0 +1,2396 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import enum +import logging +import uuid +import time +import os.path +import sys +import re + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwVnfmYang', '1.0') +gi.require_version('RwVlrYang', '1.0') +gi.require_version('RwManifestYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwVnfrYang, + RwVnfmYang, + RwVlrYang, + VnfrYang, + RwManifestYang, + RwBaseYang, + RwResourceMgrYang, + ProtobufC, + RwTypes, +) + +import rift.tasklets + +from . import mon_params + + +class VMResourceError(Exception): + """ VM resource Error""" + pass + + +class VnfRecordError(Exception): + """ VNF record instatiation failed""" + pass + + +class VduRecordError(Exception): + """ VDU record instatiation failed""" + pass + + +class NotImplemented(Exception): + """Not implemented """ + pass + + +class VnfrRecordExistsError(Exception): + """VNFR record already exist with the same VNFR id""" + pass + + +class InternalVirtualLinkRecordError(Exception): + """Internal virtual link record error""" + pass + + +class VDUImageNotFound(Exception): + """VDU Image not found error""" + pass + + +class VirtualDeploymentUnitRecordError(Exception): + """VDU Instantiation failed""" + pass + + +class VMNotReadyError(Exception): + """ VM Not yet received from resource manager """ + pass + + +class VDURecordNotFound(Exception): + """ Could not find a VDU record """ + pass + + +class VirtualNetworkFunctionRecordDescNotFound(Exception): + """ Cannot find Virtual Network Function Record Descriptor """ + pass + + +class VirtualNetworkFunctionDescriptorError(Exception): + """ Virtual Network Function Record Descriptor Error """ + pass + + +class VirtualNetworkFunctionDescriptorNotFound(Exception): + """ Virtual Network Function Record Descriptor Not Found """ + pass + + +class VirtualNetworkFunctionRecordNotFound(Exception): + """ Virtual Network Function Record Not Found """ + pass + + +class VirtualNetworkFunctionDescriptorRefCountExists(Exception): + """ Virtual Network Funtion Descriptor reference count exists """ + pass + + +class VnfrInstantiationFailed(Exception): + """ Virtual Network Funtion Instantiation failed""" + pass + + +class VirtualNetworkFunctionRecordState(enum.Enum): + """ VNFR state """ + INIT = 1 + VL_INIT_PHASE = 2 + VM_INIT_PHASE = 3 + READY = 4 + TERMINATE = 5 + VL_TERMINATE_PHASE = 6 + VDU_TERMINATE_PHASE = 7 + TERMINATED = 7 + FAILED = 10 + + +class VDURecordState(enum.Enum): + """VDU record state """ + INIT = 1 + INSTANTIATING = 2 + RESOURCE_ALLOC_PENDING = 3 + READY = 4 + TERMINATING = 5 + TERMINATED = 6 + FAILED = 10 + + +class VcsComponent(object): + """ VCS Component within the VNF descriptor """ + def __init__(self, dts, log, loop, cluster_name, vcs_handler, component, mangled_name): + self._dts = dts + self._log = log + self._loop = loop + self._component = component + self._cluster_name = cluster_name + self._vcs_handler = vcs_handler + self._mangled_name = mangled_name + + @staticmethod + def mangle_name(component_name, vnf_name, vnfd_id): + """ mangled component name """ + return vnf_name + ":" + component_name + ":" + vnfd_id + + @property + def name(self): + """ name of this component""" + return self._mangled_name + + @property + def path(self): + """ The path for this object """ + return("D,/rw-manifest:manifest" + + "/rw-manifest:operational-inventory" + + "/rw-manifest:component" + + "[rw-manifest:component-name = '{}']").format(self.name) + + @property + def instance_xpath(self): + """ The path for this object """ + return("D,/rw-base:vcs" + + "/instances" + + "/instance" + + "[instance-name = '{}']".format(self._cluster_name)) + + @property + def start_comp_xpath(self): + """ start component xpath """ + return (self.instance_xpath + + "/child-n[instance-name = 'START-REQ']") + + def get_start_comp_msg(self, ip_address): + """ start this component """ + start_msg = RwBaseYang.VcsInstance_Instance_ChildN() + start_msg.instance_name = 'START-REQ' + start_msg.component_name = self.name + start_msg.admin_command = "START" + start_msg.ip_address = ip_address + + return start_msg + + @property + def msg(self): + """ Returns the message for this vcs component""" + + vcs_comp_dict = self._component.as_dict() + + def mangle_comp_names(comp_dict): + """ mangle component name with VNF name, id""" + for key, val in comp_dict.items(): + if isinstance(val, dict): + comp_dict[key] = mangle_comp_names(val) + elif isinstance(val, list): + i = 0 + for ent in val: + if isinstance(ent, dict): + val[i] = mangle_comp_names(ent) + else: + val[i] = ent + i += 1 + elif key == "component_name": + comp_dict[key] = VcsComponent.mangle_name(val, + self._vnfd_name, + self._vnfd_id) + return comp_dict + + + mangled_dict = mangle_comp_names(vcs_comp_dict) + msg = RwManifestYang.OpInventory_Component.from_dict(mangled_dict) + return msg + + @asyncio.coroutine + def publish(self, xact): + """ Publishes the VCS component """ + self._log.debug("Publishing the VcsComponent %s, path = %s comp = %s", + self.name, self.path, self.msg) + yield from self._vcs_handler.publish(xact, self.path, self.msg) + + @asyncio.coroutine + def start(self, xact, parent, ip_addr=None): + """ Starts this VCS component """ + # ATTN RV - replace with block add + start_msg = self.get_start_comp_msg(ip_addr) + self._log.debug("starting component %s %s", + self.start_comp_xpath, start_msg) + yield from self._dts.query_create(self.start_comp_xpath, + 0, + start_msg) + self._log.debug("started component %s, %s", + self.start_comp_xpath, start_msg) + + +class VirtualDeploymentUnitRecord(object): + """ Virtual Deployment Unit Record """ + def __init__(self, dts, log, loop, vdud, vnfr, mgmt_intf, cloud_account_name, vdur_id=None): + self._dts = dts + self._log = log + self._loop = loop + self._vdud = vdud + self._vnfr = vnfr + self._mgmt_intf = mgmt_intf + self._cloud_account_name = cloud_account_name + + self._vdur_id = vdur_id or str(uuid.uuid4()) + self._int_intf = [] + self._ext_intf = [] + self._state = VDURecordState.INIT + self._request_id = str(uuid.uuid4()) + self._name = vnfr.name + "." + vdud.name + + self._rm_regh = None + self._vm_resp = None + + def cp_ip_addr(self, cp_name): + """ Find ip address by connection point name """ + if self._vm_resp is not None: + for conn_point in self._vm_resp.connection_points: + if conn_point.name == cp_name: + return conn_point.ip_address + return "0.0.0.0" + + def cp_id(self, cp_name): + """ Find connection point id by connection point name """ + if self._vm_resp is not None: + for conn_point in self._vm_resp.connection_points: + if conn_point.name == cp_name: + return conn_point.connection_point_id + return '' + + + @property + def vdu_id(self): + return self._vdud.id + + @property + def vm_resp(self): + return self._vm_resp + + @property + def name(self): + """ Return this VDUR's name """ + return self._name + + @property + def cloud_account_name(self): + """ Cloud account this VDU should be created in """ + return self._cloud_account_name + + @property + def image_name(self): + """ name that should be used to lookup the image on the CMP """ + return os.path.basename(self._vdud.image) + + @property + def image_checksum(self): + """ name that should be used to lookup the image on the CMP """ + return self._vdud.image_checksum if self._vdud.has_field("image_checksum") else None + + @property + def management_ip(self): + if not self.active: + return None + return self._vm_resp.public_ip if self._vm_resp.has_field('public_ip') else self._vm_resp.management_ip + + @property + def vm_management_ip(self): + if not self.active: + return None + return self._vm_resp.management_ip + + @property + def operational_status(self): + """ Operational status of this VDU""" + op_stats_dict = {"INIT": "init", + "INSTANTIATING": "vm_init_phase", + "RESOURCE_ALLOC_PENDING": "vm_alloc_pending", + "READY": "running", + "FAILED": "failed", + "TERMINATING": "terminated", + "TERMINATED": "terminated", + } + return op_stats_dict[self._state.name] + + @property + def msg(self): + """ VDU message """ + vdu_fields = ["vm_flavor", + "guest_epa", + "vswitch_epa", + "hypervisor_epa", + "host_epa"] + vdu_copy_dict = {k: v for k, v in + self._vdud.as_dict().items() if k in vdu_fields} + vdur_dict = {"id": self._vdur_id, + "vdu_id_ref": self._vdud.id, + "operational_status": self.operational_status, + } + if self.vm_resp is not None: + vdur_dict.update({"vim_id": self.vm_resp.vdu_id, + "flavor_id": self.vm_resp.flavor_id, + "image_id": self.vm_resp.image_id, + }) + + if self.management_ip is not None: + vdur_dict["management_ip"] = self.management_ip + + if self.vm_management_ip is not None: + vdur_dict["vm_management_ip"] = self.vm_management_ip + + vdur_dict.update(vdu_copy_dict) + + icp_list = [] + ii_list = [] + for intf, cp, vlr in self._int_intf: + icp_list.append({"id": cp, + "type_yang": "VPORT", + "ip_address": self.cp_ip_addr(cp)}) + + ii_list.append({"name": intf.name, + "vdur_internal_connection_point_ref": cp, + "virtual_interface": {}}) + + vdur_dict["internal_connection_point"] = icp_list + vdur_dict["internal_interface"] = ii_list + + ei_list = [] + for intf, cp, vlr in self._ext_intf: + ei_list.append({"name": cp, + "vnfd_connection_point_ref": cp, + "virtual_interface": {}}) + self._vnfr.update_cp(cp, self.cp_ip_addr(cp),self.cp_id(cp)) + + vdur_dict["external_interface"] = ei_list + + return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict) + + @property + def resmgr_path(self): + """ path for resource-mgr""" + return ("D,/rw-resource-mgr:resource-mgmt" + + "/vdu-event" + + "/vdu-event-data[event-id='{}']".format(self._request_id)) + + @property + def vm_flavor_msg(self): + """ VM flavor message """ + flavor = self._vdud.vm_flavor.__class__() + flavor.copy_from(self._vdud.vm_flavor) + + return flavor + + def resmgr_msg(self, config=None): + vdu_fields = ["vm_flavor", + "guest_epa", + "vswitch_epa", + "hypervisor_epa", + "host_epa"] + + self._log.debug("Creating params based on VDUD: %s", self._vdud) + vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields} + + vm_create_msg_dict = { + "name": self.name, + "image_name": self.image_name, + } + + if self.image_checksum is not None: + vm_create_msg_dict["image_checksum"] = self.image_checksum + + vm_create_msg_dict["allocate_public_address"] = self._mgmt_intf + if self._vdud.has_field('mgmt_vpci'): + vm_create_msg_dict["mgmt_vpci"] = self._vdud.mgmt_vpci + + self._log.debug("VDUD: %s", self._vdud) + if config is not None: + vm_create_msg_dict['vdu_init'] = {'userdata': config} + + cp_list = [] + for intf, cp, vlr in self._ext_intf: + if (intf.virtual_interface.has_field('vpci') and + intf.virtual_interface.vpci is not None): + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang, + "vpci": intf.virtual_interface.vpci}) + else: + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang}) + + for intf, cp, vlr in self._int_intf: + if (intf.virtual_interface.has_field('vpci') and + intf.virtual_interface.vpci is not None): + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang, + "vpci": intf.virtual_interface.vpci}) + else: + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang}) + + vm_create_msg_dict["connection_points"] = cp_list + vm_create_msg_dict.update(vdu_copy_dict) + + msg = RwResourceMgrYang.VDUEventData() + msg.event_id = self._request_id + msg.cloud_account = self.cloud_account_name + msg.request_info.from_dict(vm_create_msg_dict) + return msg + + @asyncio.coroutine + def terminate(self, xact): + """ Delete resource in VIM """ + if self._state != VDURecordState.READY and self._state != VDURecordState.FAILED: + self._log.warning("VDU terminate in not ready state - Ignoring request") + return + + self._state = VDURecordState.TERMINATING + if self._vm_resp is not None: + try: + with self._dts.transaction() as new_xact: + yield from self.delete_resource(new_xact) + except Exception: + self._log.exception("Caught exception while deleting VDU %s", self.vdu_id) + + if self._rm_regh is not None: + self._log.debug("Deregistering resource manager registration handle") + self._rm_regh.deregister() + self._rm_regh = None + + self._state = VDURecordState.TERMINATED + + @asyncio.coroutine + def create_resource(self, xact, vnfr, config=None): + """ Request resource from ResourceMgr """ + def find_cp_by_name(cp_name): + """ Find a connection point by name """ + cp = None + self._log.debug("find_cp_by_name(%s) called", cp_name) + for ext_cp in vnfr._cprs: + self._log.debug("Checking ext cp (%s) called", ext_cp.name) + if ext_cp.name == cp_name: + cp = ext_cp + break + if cp is None: + self._log.debug("Failed to find cp %s in external connection points", + cp_name) + return cp + + def find_internal_vlr_by_cp_name(cp_name): + """ Find the VLR corresponding to the connection point name""" + cp = None + + self._log.debug("find_internal_vlr_by_cp_name(%s) called", + cp_name) + + for int_cp in self._vdud.internal_connection_point: + self._log.debug("Checking for int cp %s in internal connection points", + int_cp.id) + if int_cp.id == cp_name: + cp = int_cp + break + + if cp is None: + self._log.debug("Failed to find cp %s in internal connection points", + cp_name) + msg = "Failed to find cp %s in internal connection points" % cp_name + raise VduRecordError(msg) + + # return the VLR associated with the connection point + return vnfr.find_vlr_by_cp(cp_name) + + block = xact.block_create() + + self._log.debug("Executing vm request id: %s, action: create", + self._request_id) + + # Resolve the networks associated external interfaces + for ext_intf in self._vdud.external_interface: + self._log.debug("Resolving external interface name [%s], cp[%s]", + ext_intf.name, ext_intf.vnfd_connection_point_ref) + cp = find_cp_by_name(ext_intf.vnfd_connection_point_ref) + if cp is None: + self._log.debug("Failed to find connection point - %s", + ext_intf.vnfd_connection_point_ref) + continue + self._log.debug("Connection point name [%s], type[%s]", + cp.name, cp.type_yang) + + vlr = vnfr.ext_vlr_by_id(cp.vlr_ref) + + etuple = (ext_intf, cp.name, vlr) + self._ext_intf.append(etuple) + + self._log.debug("Created external interface tuple : %s", etuple) + + # Resolve the networks associated internal interfaces + for intf in self._vdud.internal_interface: + cp_id = intf.vdu_internal_connection_point_ref + self._log.debug("Resolving internal interface name [%s], cp[%s]", + intf.name, cp_id) + + try: + vlr = find_internal_vlr_by_cp_name(cp_id) + except Exception as e: + self._log.debug("Failed to find cp %s in internal VLR list", cp_id) + msg = "Failed to find cp %s in internal VLR list, e = %s" % (cp_id, e) + raise VduRecordError(msg) + + ituple = (intf, cp_id, vlr) + self._int_intf.append(ituple) + + self._log.debug("Created internal interface tuple : %s", ituple) + + resmgr_path = self.resmgr_path + resmgr_msg = self.resmgr_msg(config) + + self._log.debug("Creating new VM request at: %s, params: %s", resmgr_path, resmgr_msg) + block.add_query_create(resmgr_path, resmgr_msg) + + res_iter = yield from block.execute(flags=0, now=True) + + resp = None + + for i in res_iter: + r = yield from i + resp = r.result + + if resp is None or not (resp.has_field('resource_info') and resp.resource_info.has_field('vdu_id')): + raise VMResourceError("Did not get a vm resource response (resp: %s)", resp) + self._log.debug("Got vm request response: %s", resp.resource_info) + return resp.resource_info + + @asyncio.coroutine + def delete_resource(self, xact): + block = xact.block_create() + + self._log.debug("Executing vm request id: %s, action: delete", + self._request_id) + + block.add_query_delete(self.resmgr_path) + + yield from block.execute(flags=0, now=True) + + @asyncio.coroutine + def start_component(self): + """ This VDUR is active """ + self._log.debug("Starting component %s for vdud %s vdur %s", + self._vdud.vcs_component_ref, + self._vdud, + self._vdur_id) + yield from self._vnfr.start_component(self._vdud.vcs_component_ref, + self.vm_resp.management_ip) + + @property + def active(self): + """ Is this VDU active """ + return True if self._state is VDURecordState.READY else False + + @asyncio.coroutine + def instantiation_failed(self): + """ VDU instantiation failed """ + self._log.debug("VDU %s instantiation failed ", self._vdur_id) + self._state = VDURecordState.FAILED + yield from self._vnfr.instantiation_failed() + + @asyncio.coroutine + def vdu_is_active(self): + """ This VDU is active""" + if self.active: + self._log.warning("VDU %s was already marked as active", self._vdur_id) + return + + self._log.debug("VDUR id %s in VNFR %s is active", self._vdur_id, self._vnfr.vnfr_id) + + if self._vdud.vcs_component_ref is not None: + yield from self.start_component() + + self._state = VDURecordState.READY + + if self._vnfr.all_vdus_active(): + self._log.debug("Inside vdu_is_active. VNFR is READY. Info: %s", self._vnfr) + yield from self._vnfr.is_ready() + + @asyncio.coroutine + def instantiate(self, xact, vnfr, config=None): + """ Instantiate this VDU """ + self._state = VDURecordState.INSTANTIATING + + @asyncio.coroutine + def on_prepare(xact_info, query_action, ks_path, msg): + """ This VDUR is active """ + self._log.debug("Received VDUR instantiate on_prepare (%s:%s:%s)", + query_action, + ks_path, + msg) + + if (query_action == rwdts.QueryAction.UPDATE or + query_action == rwdts.QueryAction.CREATE): + self._vm_resp = msg + + if msg.resource_state == "active": + # Move this VDU to ready state + yield from self.vdu_is_active() + elif msg.resource_state == "failed": + yield from self.instantiation_failed() + elif query_action == rwdts.QueryAction.DELETE: + self._log.debug("DELETE action in on_prepare for VDUR instantiation, ignoring") + else: + raise NotImplementedError( + "%s action on VirtualDeployementUnitRecord not supported", + query_action) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + try: + vm_resp = yield from self.create_resource(xact, vnfr, config) + self._vm_resp = vm_resp + + self._state = VDURecordState.RESOURCE_ALLOC_PENDING + self._log.debug("Requested VM from resource manager response %s", + vm_resp) + if vm_resp.resource_state == "active": + self._log.debug("Resourcemgr responded wih an active vm resp %s", + vm_resp) + yield from self.vdu_is_active() + self._state = VDURecordState.READY + elif (vm_resp.resource_state == "pending" or + vm_resp.resource_state == "inactive"): + self._log.debug("Resourcemgr responded wih a pending vm resp %s", + vm_resp) + handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + self._rm_regh = yield from self._dts.register(self.resmgr_path + '/resource-info', + flags=rwdts.Flag.SUBSCRIBER, + handler=handler) + else: + self._log.debug("Resourcemgr responded wih an error vm resp %s", + vm_resp) + raise VirtualDeploymentUnitRecordError( + "Failed VDUR instantiation %s " % vm_resp) + + except Exception as e: + import traceback + traceback.print_exc() + self._log.exception(e) + self._log.error("Instantiation of VDU record failed: %s", str(e)) + self._state = VDURecordState.FAILED + yield from self.instantiation_failed() + + +class VlRecordState(enum.Enum): + """ VL Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class InternalVirtualLinkRecord(object): + """ Internal Virtual Link record """ + def __init__(self, dts, log, loop, ivld_msg, vnfr_name, cloud_account_name): + self._dts = dts + self._log = log + self._loop = loop + self._ivld_msg = ivld_msg + self._vnfr_name = vnfr_name + self._cloud_account_name = cloud_account_name + + self._vlr_req = self.create_vlr() + self._vlr = None + self._state = VlRecordState.INIT + + @property + def vlr_id(self): + """ Find VLR by id """ + return self._vlr_req.id + + @property + def name(self): + """ Name of this VL """ + return self._vnfr_name + "." + self._ivld_msg.name + + @property + def network_id(self): + """ Find VLR by id """ + return self._vlr.network_id if self._vlr else None + + def vlr_path(self): + """ VLR path for this VLR instance""" + return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self.vlr_id) + + def create_vlr(self): + """ Create the VLR record which will be instantiated """ + + vld_fields = ["short_name", + "vendor", + "description", + "version", + "type_yang", + "provider_network"] + + vld_copy_dict = {k: v for k, v in self._ivld_msg.as_dict().items() if k in vld_fields} + + vlr_dict = {"id": str(uuid.uuid4()), + "name": self.name, + "cloud_account": self._cloud_account_name, + } + vlr_dict.update(vld_copy_dict) + + vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict) + return vlr + + @asyncio.coroutine + def instantiate(self, xact, restart_mode=False): + """ Instantiate VL """ + + @asyncio.coroutine + def instantiate_vlr(): + """ Instantiate VLR""" + self._log.debug("Create VL with xpath %s and vlr %s", + self.vlr_path(), self._vlr_req) + + with self._dts.transaction(flags=0) as xact: + block = xact.block_create() + block.add_query_create(xpath=self.vlr_path(), msg=self._vlr_req) + self._log.debug("Executing VL create path:%s msg:%s", + self.vlr_path(), self._vlr_req) + + res_iter = None + try: + res_iter = yield from block.execute() + except Exception: + self._state = VlRecordState.FAILED + self._log.exception("Caught exception while instantial VL") + raise + + for ent in res_iter: + res = yield from ent + self._vlr = res.result + + if self._vlr.operational_status == 'failed': + self._log.debug("VL creation failed for vlr id %s", self._vlr.id) + self._state = VlRecordState.FAILED + raise VnfrInstantiationFailed("instantiation due to VL failure %s" % (self._vlr.id)) + + self._log.info("Created VL with xpath %s and vlr %s", + self.vlr_path(), self._vlr) + + @asyncio.coroutine + def get_vlr(): + """ Get the network id """ + res_iter = yield from self._dts.query_read(self.vlr_path(), rwdts.Flag.MERGE) + vlr = None + for ent in res_iter: + res = yield from ent + vlr = res.result + + if vlr is None: + err = "Failed to get VLR for path %s" % self.vlr_path() + self._log.warn(err) + raise InternalVirtualLinkRecordError(err) + return vlr + + self._state = VlRecordState.INSTANTIATION_PENDING + + if restart_mode: + vl = yield from get_vlr() + if vl is None: + yield from instantiate_vlr() + else: + yield from instantiate_vlr() + + self._state = VlRecordState.ACTIVE + + def vlr_in_vns(self): + """ Is there a VLR record in VNS """ + if (self._state == VlRecordState.ACTIVE or + self._state == VlRecordState.INSTANTIATION_PENDING or + self._state == VlRecordState.FAILED): + return True + + return False + + @asyncio.coroutine + def terminate(self, xact): + """Terminate this VL """ + if not self.vlr_in_vns(): + self._log.debug("Ignoring terminate request for id %s in state %s", + self.vlr_id, self._state) + return + + self._log.debug("Terminating VL with path %s", self.vlr_path()) + self._state = VlRecordState.TERMINATE_PENDING + block = xact.block_create() + block.add_query_delete(self.vlr_path()) + yield from block.execute(flags=0, now=True) + self._state = VlRecordState.TERMINATED + self._log.debug("Terminated VL with path %s", self.vlr_path()) + + +class VirtualNetworkFunctionRecord(object): + """ Virtual Network Function Record """ + def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg): + self._dts = dts + self._log = log + self._loop = loop + self._cluster_name = cluster_name + self._vnfr_id = vnfr_msg.id + self._vnfd_id = vnfr_msg.vnfd_ref + self._vnfm = vnfm + self._vcs_handler = vcs_handler + self._vnfr = vnfr_msg + + self._vnfd = None + self._state = VirtualNetworkFunctionRecordState.INIT + self._ext_vlrs = {} # The list of external virtual links + self._vlrs = [] # The list of internal virtual links + self._vdus = [] # The list of vdu + self._vlr_by_cp = {} + self._cprs = [] + self._inventory = {} + self._create_time = int(time.time()) + self._vnf_mon = None + self._config_status = vnfr_msg.config_status + + def _get_vdur_from_vdu_id(self, vdu_id): + self._log.debug("Finding vdur for vdu_id %s", vdu_id) + self._log.debug("Searching through vdus: %s", self._vdus) + for vdu in self._vdus: + self._log.debug("vdu_id: %s", vdu.vdu_id) + if vdu.vdu_id == vdu_id: + return vdu + + raise VDURecordNotFound("Could not find vdu record from id: %s", vdu_id) + + @property + def operational_status(self): + """ Operational status of this VNFR """ + op_status_map = {"INIT": "init", + "VL_INIT_PHASE": "vl_init_phase", + "VM_INIT_PHASE": "vm_init_phase", + "READY": "running", + "TERMINATE": "terminate", + "VL_TERMINATE_PHASE": "vl_terminate_phase", + "VDU_TERMINATE_PHASE": "vm_terminate_phase", + "TERMINATED": "terminated", + "FAILED": "failed", } + return op_status_map[self._state.name] + + @property + def vnfd_xpath(self): + """ VNFD xpath associated with this VNFR """ + return("C,/vnfd:vnfd-catalog/" + "vnfd:vnfd[vnfd:id = '{}']".format(self._vnfd_id)) + + @property + def vnfd(self): + """ VNFD for this VNFR """ + return self._vnfd + + @property + def vnf_name(self): + """ VNFD name associated with this VNFR """ + return self.vnfd.name + + @property + def name(self): + """ Name of this VNF in the record """ + return self._vnfr.name + + @property + def cloud_account_name(self): + """ Name of the cloud account this VNFR is instantiated in """ + return self._vnfr.cloud_account + + @property + def vnfd_id(self): + """ VNFD Id associated with this VNFR """ + return self.vnfd.id + + @property + def vnfr_id(self): + """ VNFR Id associated with this VNFR """ + return self._vnfr_id + + @property + def member_vnf_index(self): + """ Member VNF index associated with this VNFR """ + return self._vnfr.member_vnf_index_ref + + @property + def config_status(self): + """ Config agent status for this VNFR """ + return self._config_status + + def component_by_name(self, component_name): + """ Find a component by name in the inventory list""" + mangled_name = VcsComponent.mangle_name(component_name, + self.vnf_name, + self.vnfd_id) + return self._inventory[mangled_name] + + @asyncio.coroutine + def start_component(self, component_name, ip_addr): + """ Start a component in the VNFR by name """ + comp = self.component_by_name(component_name) + yield from comp.start(None, None, ip_addr) + + def cp_ip_addr(self, cp_name): + """ Get ip address for connection point """ + self._log.debug("cp_ip_addr()") + for cp in self._cprs: + if cp.name == cp_name and cp.ip_address is not None: + return cp.ip_address + return "0.0.0.0" + + def mgmt_intf_info(self): + """ Get Management interface info for this VNFR """ + mgmt_intf_desc = self.vnfd.msg.mgmt_interface + self._log.debug("Find mgmt interface info for vnfr id %s, mgmt_intf %s", + self._vnfr_id, mgmt_intf_desc) + ip_addr = None + if mgmt_intf_desc.has_field("cp"): + ip_addr = self.cp_ip_addr(mgmt_intf_desc.cp) + elif mgmt_intf_desc.has_field("vdu_id"): + try: + vdur = self._get_vdur_from_vdu_id(mgmt_intf_desc.vdu_id) + ip_addr = vdur.management_ip + except VDURecordNotFound: + ip_addr = None + else: + ip_addr = mgmt_intf_desc.ip_address + port = mgmt_intf_desc.port + + self._log.debug("Found mgmt interface for vnfr id %s, %s:%s", + self._vnfr_id, ip_addr, port) + + return ip_addr, port + + @property + def msg(self): + """ Message associated with this VNFR """ + vnfd_fields = ["short_name", "vendor", "description", "version"] + vnfd_copy_dict = {k: v for k, v in self.vnfd.msg.as_dict().items() if k in vnfd_fields} + + mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface() + ip_address, port = self.mgmt_intf_info() + + if ip_address is not None: + mgmt_intf.ip_address = ip_address + if port is not None: + mgmt_intf.port = port + + vnfr_dict = {"id": self._vnfr_id, + "name": self.name, + "member_vnf_index_ref": self.member_vnf_index, + "vnfd_ref": self.vnfd_id, + "operational_status": self.operational_status, + "cloud_account": self.cloud_account_name, + "config_status" : self._config_status + } + + vnfr_dict.update(vnfd_copy_dict) + + vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict) + vnfr_msg.mgmt_interface = mgmt_intf + + # Add all the VLRs to VNFR + for vlr in self._vlrs: + ivlr = vnfr_msg.internal_vlr.add() + ivlr.vlr_ref = vlr.vlr_id + + # Add all the VDURs to VDUR + if self._vdus is not None: + for vdu in self._vdus: + vdur = vnfr_msg.vdur.add() + vdur.from_dict(vdu.msg.as_dict()) + + if self.vnfd.msg.mgmt_interface.has_field('dashboard_params'): + vnfr_msg.dashboard_url = self.dashboard_url + + for cpr in self._cprs: + new_cp = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict()) + vnfr_msg.connection_point.append(new_cp) + + if self._vnf_mon is not None: + for monp in self._vnf_mon.msg: + vnfr_msg.monitoring_param.append( + VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict())) + + if self._vnfr.vnf_configuration is not None: + vnfr_msg.vnf_configuration.from_dict(self._vnfr.vnf_configuration.as_dict()) + if (ip_address is not None and + vnfr_msg.vnf_configuration.config_access.mgmt_ip_address is None): + vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = ip_address + + return vnfr_msg + + @property + def dashboard_url(self): + ip, cfg_port = self.mgmt_intf_info() + protocol = 'http' + http_port = 80 + if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('https'): + if self.vnfd.msg.mgmt_interface.dashboard_params.https is True: + protocol = 'https' + http_port = 443 + if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('port'): + http_port = self.vnfd.msg.mgmt_interface.dashboard_params.port + + + url = "{protocol}://{ip_address}:{port}/{path}".format( + protocol=protocol, + ip_address=ip, + port=http_port, + path=self.vnfd.msg.mgmt_interface.dashboard_params.path.lstrip("/"), + ) + + return url + + @property + def xpath(self): + """ path for this VNFR """ + return("D,/vnfr:vnfr-catalog" + "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id)) + + @asyncio.coroutine + def publish(self, xact): + """ publish this VNFR """ + vnfr = self.msg + self._log.debug("Publishing VNFR path = [%s], record = [%s]", + self.xpath, self.msg) + vnfr.create_time = self._create_time + yield from self._vnfm.publish_vnfr(xact, self.xpath, self.msg) + self._log.debug("Published VNFR path = [%s], record = [%s]", + self.xpath, self.msg) + + @asyncio.coroutine + def create_vls(self): + """ Publish The VLs associated with this VNF """ + self._log.debug("Publishing Internal Virtual Links for vnfd id: %s", + self.vnfd_id) + for ivld_msg in self.vnfd.msg.internal_vld: + self._log.debug("Creating internal vld:" + " %s, int_cp_ref = %s", + ivld_msg, ivld_msg.internal_connection_point_ref + ) + vlr = InternalVirtualLinkRecord(dts=self._dts, + log=self._log, + loop=self._loop, + ivld_msg=ivld_msg, + vnfr_name=self.name, + cloud_account_name=self.cloud_account_name + ) + self._vlrs.append(vlr) + + for int_cp in ivld_msg.internal_connection_point_ref: + if int_cp in self._vlr_by_cp: + msg = ("Connection point %s already " + " bound %s" % (int_cp, self._vlr_by_cp[int_cp])) + raise InternalVirtualLinkRecordError(msg) + self._log.debug("Setting vlr %s to internal cp = %s", + vlr, int_cp) + self._vlr_by_cp[int_cp] = vlr + + @asyncio.coroutine + def instantiate_vls(self, xact, restart_mode=False): + """ Instantiate the VLs associated with this VNF """ + self._log.debug("Instantiating Internal Virtual Links for vnfd id: %s", + self.vnfd_id) + + for vlr in self._vlrs: + self._log.debug("Instantiating VLR %s", vlr) + yield from vlr.instantiate(xact, restart_mode) + + def find_vlr_by_cp(self, cp_name): + """ Find the VLR associated with the cp name """ + return self._vlr_by_cp[cp_name] + + def create_vdus(self, vnfr, restart_mode=False): + """ Create the VDUs associated with this VNF """ + + def get_vdur_id(vdud): + """Get the corresponding VDUR's id for the VDUD. This is useful in + case of a restart. + + In restart mode we check for exiting VDUR's ID and use them, if + available. This way we don't end up creating duplicate VDURs + """ + vdur_id = None + + if restart_mode and vdud is not None: + try: + vdur = [vdur.id for vdur in vnfr._vnfr.vdur if vdur.vdu_id_ref == vdud.id] + vdur_id = vdur[0] + except IndexError: + self._log.error("Unable to find a VDUR for VDUD {}", vdud) + + return vdur_id + + self._log.info("Creating VDU's for vnfd id: %s", self.vnfd_id) + for vdu in self.vnfd.msg.vdu: + self._log.debug("Creating vdu: %s", vdu) + vdur_id = get_vdur_id(vdu) + + vdur = VirtualDeploymentUnitRecord( + dts=self._dts, + log=self._log, + loop=self._loop, + vdud=vdu, + vnfr=vnfr, + mgmt_intf=self.has_mgmt_interface(vdu), + cloud_account_name=self.cloud_account_name, + vdur_id=vdur_id + ) + + self._vdus.append(vdur) + + @asyncio.coroutine + def instantiate_vdus(self, xact, vnfr): + """ Instantiate the VDUs associated with this VNF """ + self._log.debug("Instantiating VDU's for vnfd id %s: %s", self.vnfd_id, self._vdus) + + lookup = {vdu.vdu_id:vdu for vdu in self._vdus} + + # Identify any dependencies among the VDUs + dependencies = collections.defaultdict(list) + vdu_id_pattern = re.compile(r"\{\{ vdu\[([^]]+)\]\S* \}\}") + + for vdu in self._vdus: + if vdu._vdud.cloud_init is not None: + for vdu_id in vdu_id_pattern.findall(vdu._vdud.cloud_init): + if vdu_id != vdu.vdu_id: + # This means that vdu.vdu_id depends upon vdu_id, + # i.e. vdu_id must be instantiated before + # vdu.vdu_id. + dependencies[vdu.vdu_id].append(lookup[vdu_id]) + + # Define the terminal states of VDU instantiation + terminal = ( + VDURecordState.READY, + VDURecordState.TERMINATED, + VDURecordState.FAILED, + ) + + datastore = VdurDatastore() + processed = set() + + @asyncio.coroutine + def instantiate_monitor(vdu): + """Monitor the state of the VDU during instantiation + + Arguments: + vdu - a VirtualDeploymentUnitRecord + + """ + # wait for the VDUR to enter a terminal state + while vdu._state not in terminal: + yield from asyncio.sleep(1, loop=self._loop) + + # update the datastore + datastore.update(vdu) + + # add the VDU to the set of processed VDUs + processed.add(vdu.vdu_id) + + @asyncio.coroutine + def instantiate(vdu): + """Instantiate the specified VDU + + Arguments: + vdu - a VirtualDeploymentUnitRecord + + Raises: + if the VDU, or any of the VDUs this VDU depends upon, are + terminated or fail to instantiate properly, a + VirtualDeploymentUnitRecordError is raised. + + """ + for dependency in dependencies[vdu.vdu_id]: + self._log.debug("{}: waiting for {}".format(vdu.vdu_id, dependency.vdu_id)) + + while dependency.vdu_id not in processed: + yield from asyncio.sleep(1, loop=self._loop) + + if not dependency.active: + raise VirtualDeploymentUnitRecordError() + + self._log.debug('instantiating {}'.format(vdu.vdu_id)) + + # Populate the datastore with the current values of the VDU + datastore.add(vdu) + + # Substitute any variables contained in the cloud config script + config = str(vdu._vdud.cloud_init) + + parts = re.split("\{\{ ([^\}]+) \}\}", config) + if len(parts) > 1: + + # Extract the variable names + variables = list() + for variable in parts[1::2]: + variables.append(variable.lstrip('{{').rstrip('}}').strip()) + + # Iterate of the variables and substitute values from the + # datastore. + for variable in variables: + + # Handle a reference to a VDU by ID + if variable.startswith('vdu['): + value = datastore.get(variable) + if value is None: + msg = "Unable to find a substitute for {} in {} cloud-init script" + raise ValueError(msg.format(variable, vdu.vdu_id)) + + config = config.replace("{{ %s }}" % variable, value) + continue + + # Handle a reference to the current VDU + if variable.startswith('vdu'): + value = datastore.get('vdu[{}]'.format(vdu.vdu_id) + variable[3:]) + config = config.replace("{{ %s }}" % variable, value) + continue + + # Handle unrecognized variables + msg = 'unrecognized cloud-config variable: {}' + raise ValueError(msg.format(variable)) + + # Instantiate the VDU + with self._dts.transaction() as xact: + self._log.debug("Instantiating vdu: %s", vdu) + yield from vdu.instantiate(xact, vnfr, config=config) + if self._state == VirtualNetworkFunctionRecordState.FAILED: + self._log.error("Instatiation of VNF %s failed while instantiating vdu %s", + self.vnfr_id, vdu) + + # First create a set of tasks to monitor the state of the VDUs and + # report when they have entered a terminal state + for vdu in self._vdus: + self._loop.create_task(instantiate_monitor(vdu)) + + for vdu in self._vdus: + self._loop.create_task(instantiate(vdu)) + + def has_mgmt_interface(self, vdu): + # ## TODO: Support additional mgmt_interface type options + if self.vnfd.msg.mgmt_interface.vdu_id == vdu.id: + return True + return False + + def vlr_xpath(self, vlr_id): + """ vlr xpath """ + return( + "D,/vlr:vlr-catalog/" + "vlr:vlr[vlr:id = '{}']".format(vlr_id)) + + def ext_vlr_by_id(self, vlr_id): + """ find ext vlr by id """ + return self._ext_vlrs[vlr_id] + + @asyncio.coroutine + def publish_inventory(self, xact): + """ Publish the inventory associated with this VNF """ + self._log.debug("Publishing inventory for VNFR id: %s", self._vnfr_id) + + for component in self.vnfd.msg.component: + self._log.debug("Creating inventory component %s", component) + mangled_name = VcsComponent.mangle_name(component.component_name, + self.vnf_name, + self.vnfd_id + ) + comp = VcsComponent(dts=self._dts, + log=self._log, + loop=self._loop, + cluster_name=self._cluster_name, + vcs_handler=self._vcs_handler, + component=component, + mangled_name=mangled_name, + ) + if comp.name in self._inventory: + self._log.debug("Duplicate entries in inventory %s for vnfr %s", + component, self._vnfd_id) + return + self._log.debug("Adding component %s for vnrf %s", + comp.name, self._vnfr_id) + self._inventory[comp.name] = comp + yield from comp.publish(xact) + + def all_vdus_active(self): + """ Are all VDUS in this VNFR active? """ + for vdu in self._vdus: + if not vdu.active: + return False + + self._log.debug("Inside all_vdus_active. Returning True") + return True + + @asyncio.coroutine + def instantiation_failed(self): + """ VNFR instantiation failed """ + self._log.debug("VNFR %s instantiation failed ", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.FAILED) + + # Update the VNFR with the changed status + with self._dts.transaction(flags=0) as xact: + yield from self.publish(xact) + + @asyncio.coroutine + def is_ready(self): + """ This VNF is ready""" + self._log.debug("VNFR id %s is ready", self.vnfr_id) + + if self._state != VirtualNetworkFunctionRecordState.FAILED: + self.set_state(VirtualNetworkFunctionRecordState.READY) + + # Start the the VNFR monitor + self._vnf_mon = mon_params.VnfMonitorDtsHandler(self._dts, self._log, self._loop, self) + self._vnf_mon.register() + self._vnf_mon.start() + else: + self._log.debug("VNFR id %s ignoring state change", self.vnfr_id) + + # Update the VNFR with the changed status + with self._dts.transaction(flags=0) as xact: + yield from self.publish(xact) + + def update_cp(self, cp_name, ip_address, cp_id): + """Updated the connection point with ip address""" + for cp in self._cprs: + if cp.name == cp_name: + self._log.debug("Setting ip address and id for cp %s, cpr %s with ip %s id %s", + cp_name, cp, ip_address,cp_id) + cp.ip_address = ip_address + cp.connection_point_id = cp_id + return + + err = "No connection point %s found in VNFR id %s" % (cp.name, self._vnfr_id) + self._log.debug(err) + raise VirtualDeploymentUnitRecordError(err) + + def set_state(self, state): + """ Set state for this VNFR""" + self._state = state + + @asyncio.coroutine + def instantiate(self, xact, restart_mode=False): + """ instantiate this VNF """ + self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE) + + @asyncio.coroutine + def fetch_vlrs(): + """ Fetch VLRs """ + # Iterate over all the connection points in VNFR and fetch the + # associated VLRs + + def cpr_from_cp(cp): + """ Creates a record level connection point from the desciptor cp""" + cp_fields = ["name", "image", "vm-flavor"] + cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields} + cpr_dict = {} + cpr_dict.update(cp_copy_dict) + return VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict) + + self._log.debug("Fetching VLRs for VNFR id = %s, cps = %s", + self._vnfr_id, self._vnfr.connection_point) + + for cp in self._vnfr.connection_point: + cpr = cpr_from_cp(cp) + self._cprs.append(cpr) + self._log.debug("Adding Connection point record %s ", cp) + + vlr_path = self.vlr_xpath(cp.vlr_ref) + self._log.debug("Fetching VLR with path = %s", vlr_path) + res_iter = yield from self._dts.query_read(self.vlr_xpath(cp.vlr_ref), + rwdts.Flag.MERGE) + for i in res_iter: + r = yield from i + d = r.result + self._ext_vlrs[cp.vlr_ref] = d + cpr.vlr_ref = cp.vlr_ref + self._log.debug("Fetched VLR [%s] with path = [%s]", d, vlr_path) + + # Fetch the VNFD associated with the VNFR + self._log.debug("VNFR-ID %s: Fetching vnfds", self._vnfr_id) + self._vnfd = yield from self._vnfm.get_vnfd_ref(self._vnfd_id) + self._log.debug("VNFR-ID %s: Fetched vnfd:%s", self._vnfr_id, self._vnfd) + + assert self.vnfd is not None + + # Fetch External VLRs + self._log.debug("VNFR-ID %s: Fetching vlrs", self._vnfr_id) + yield from fetch_vlrs() + + # Publish inventory + self._log.debug("VNFR-ID %s: Publishing Inventory", self._vnfr_id) + yield from self.publish_inventory(xact) + + # Publish inventory + self._log.debug("VNFR-ID %s: Creating VLs", self._vnfr_id) + yield from self.create_vls() + + # publish the VNFR + self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id) + yield from self.publish(xact) + + # instantiate VLs + self._log.debug("VNFR-ID %s: Instantiate VLs", self._vnfr_id) + try: + yield from self.instantiate_vls(xact, restart_mode) + except Exception: + self._log.exception("VL instantiation failed") + yield from self.instantiation_failed() + return + + self.set_state(VirtualNetworkFunctionRecordState.VM_INIT_PHASE) + + # instantiate VDUs + self._log.debug("VNFR-ID %s: Create VDUs", self._vnfr_id) + self.create_vdus(self, restart_mode) + + # publish the VNFR + self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id) + yield from self.publish(xact) + + # instantiate VDUs + # ToDo: Check if this should be prevented during restart + self._log.debug("VNFR-ID %s: Instantiate VDUs", self._vnfr_id) + _ = self._loop.create_task(self.instantiate_vdus(xact, self)) + + # publish the VNFR + self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id) + yield from self.publish(xact) + + self._log.debug("VNFR-ID %s: Instantiation Done", self._vnfr_id) + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this virtual network function """ + + self._log.debug("Terminatng VNF id %s", self.vnfr_id) + + self.set_state(VirtualNetworkFunctionRecordState.TERMINATE) + + # stop monitoring + if self._vnf_mon is not None: + self._vnf_mon.stop() + self._vnf_mon.deregister() + self._vnf_mon = None + + @asyncio.coroutine + def terminate_vls(): + """ Terminate VLs in this VNF """ + for vl in self._vlrs: + yield from vl.terminate(xact) + + @asyncio.coroutine + def terminate_vdus(): + """ Terminate VDUS in this VNF """ + for vdu in self._vdus: + yield from vdu.terminate(xact) + + self._log.debug("Terminatng VLs in VNF id %s", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.VL_TERMINATE_PHASE) + yield from terminate_vls() + + self._log.debug("Terminatng VDUs in VNF id %s", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.VDU_TERMINATE_PHASE) + yield from terminate_vdus() + + self._log.debug("Terminated VNF id %s", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.TERMINATED) + + +class VnfdDtsHandler(object): + """ DTS handler for VNFD config changes """ + XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd" + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._vnfm = vnfm + self._regh = None + + @asyncio.coroutine + def regh(self): + """ DTS registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for VNFD configuration""" + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got VNFM VNFD apply (xact: %s) (action: %s)(scr: %s)", + xact, action, scratch) + # Create/Update a VNFD record + for cfg in self._regh.get_xact_elements(xact): + # Only interested in those VNFD cfgs whose ID was received in prepare callback + if cfg.id in acg.scratch['vnfds']: + self._vnfm.update_vnfd(cfg) + + del acg._scratch['vnfds'][:] + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ on prepare callback """ + self._log.debug("Got on prepare for VNFD (path: %s) (action: %s)", + ks_path.to_xpath(RwVnfmYang.get_schema()), msg) + # RIFT-10161 + try: + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + # Handle deletes in prepare_callback, but adds/updates in apply_callback + if fref.is_field_deleted(): + # Delete an VNFD record + self._log.debug("Deleting VNFD with id %s", msg.id) + if self._vnfm.vnfd_in_use(msg.id): + self._log.debug("Cannot delete VNFD in use - %s", msg) + err = "Cannot delete a VNFD in use - %s" % msg + raise VirtualNetworkFunctionDescriptorRefCountExists(err) + # Delete a VNFD record + yield from self._vnfm.delete_vnfd(msg.id) + else: + # Handle actual adds/updates in apply_callback, + # just check if VNFD in use in prepare_callback + if self._vnfm.vnfd_in_use(msg.id): + self._log.debug("Cannot modify an VNFD in use - %s", msg) + err = "Cannot modify an VNFD in use - %s" % msg + raise VirtualNetworkFunctionDescriptorRefCountExists(err) + + # Add this VNFD to scratch to create/update in apply callback + acg._scratch['vnfds'].append(msg.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + except Exception: + self._log.exception("VNFD delete/modification failed:%s", msg.id) + acg._acg.prepare_complete_fail(xact_info=xact_info._xact_info, + rs=RwTypes.RwStatus.FAILURE, + errstr="Cannot modify VNFD in use") + + self._log.debug( + "Registering for VNFD config using xpath: %s", + VnfdDtsHandler.XPATH, + ) + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + # Need a list in scratch to store VNFDs to create/update later + acg._scratch['vnfds'] = list() + self._regh = acg.register( + xpath=VnfdDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class VcsComponentDtsHandler(object): + """ Vcs Component DTS handler """ + XPATH = ("D,/rw-manifest:manifest" + + "/rw-manifest:operational-inventory" + + "/rw-manifest:component") + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._regh = None + self._vnfm = vnfm + + @property + def regh(self): + """ DTS registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Registers VCS component dts publisher registration""" + self._log.debug("VCS Comp publisher DTS handler registering path %s", + VcsComponentDtsHandler.XPATH) + + hdl = rift.tasklets.DTS.RegistrationHandler() + handlers = rift.tasklets.Group.Handler() + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register(xpath=VcsComponentDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.FILE_DATASTORE),) + + @asyncio.coroutine + def publish(self, xact, path, msg): + """ Publishes the VCS component """ + self._log.debug("Publishing the VcsComponent xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Published the VcsComponent to %s xact = %s, %s:%s", + VcsComponentDtsHandler.XPATH, xact, path, msg) + + +class VnfrDtsHandler(object): + """ registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS""" + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._vnfm = vnfm + + self._regh = None + + @property + def regh(self): + """ Return registration handle""" + return self._regh + + @property + def vnfm(self): + """ Return VNF manager instance """ + return self._vnfm + + @asyncio.coroutine + def register(self): + """ Register for vnfr create/update/delete/read requests from dts """ + def on_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vnfr commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + def on_abort(*args): + """ Abort callback """ + self._log.debug("VNF transaction got aborted") + + @asyncio.coroutine + def on_event(dts, g_reg, xact, xact_event, scratch_data): + + @asyncio.coroutine + def instantiate_realloc_vnfr(vnfr): + """Re-populate the vnfm after restart + + Arguments: + vlink + + """ + + with self._dts.transaction(flags=0) as xact: + yield from vnfr.instantiate(xact, restart_mode=True) + + if xact_event == rwdts.MemberEvent.INSTALL: + curr_cfg = self.regh.elements + for cfg in curr_cfg: + vnfr = self.vnfm.create_vnfr(cfg) + self._loop.create_task(instantiate_realloc_vnfr(vnfr)) + + self._log.debug("Got on_event in vnfm") + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + self._log.debug( + "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + + if action == rwdts.QueryAction.CREATE: + if not msg.has_field("vnfd_ref"): + err = "Vnfd reference not provided" + self._log.error(err) + raise VnfRecordError(err) + + vnfr = self.vnfm.create_vnfr(msg) + try: + # RIFT-9105: Unable to add a READ query under an existing transaction + # xact = xact_info.xact + with self._dts.transaction(flags=0) as xact: + yield from vnfr.instantiate(xact) + except Exception as e: + self._log.exception(e) + self._log.error("Error while instantiating vnfr:%s", vnfr.vnfr_id) + vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED) + with self._dts.transaction(flags=0) as xact: + yield from vnfr.publish(xact) + elif action == rwdts.QueryAction.DELETE: + schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vnfr = self._vnfm.get_vnfr(path_entry.key00.id) + + if vnfr is None: + self._log.debug("VNFR id %s not found for delete", path_entry.key00.id) + raise VirtualNetworkFunctionRecordNotFound( + "VNFR id %s", path_entry.key00.id) + + try: + yield from vnfr.terminate(xact_info.xact) + # Unref the VNFD + vnfr.vnfd.unref() + yield from self._vnfm.delete_vnfr(xact_info.xact, vnfr) + except Exception as e: + self._log.exception(e) + self._log.error("Caught exception while deleting vnfr %s", + path_entry.key00.id) + + elif action == rwdts.QueryAction.UPDATE: + schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vnfr = self._vnfm.get_vnfr(path_entry.key00.id) + + if vnfr is None: + self._log.debug("VNFR id %s not found for update", path_entry.key00.id) + raise VirtualNetworkFunctionRecordNotFound( + "VNFR id %s", path_entry.key00.id) + + self._log.debug("VNFR {} update config status {} (current {})". + format(vnfr.name, msg.config_status, vnfr.config_status)) + if vnfr.config_status != msg.config_status: + # Update the config status and publish + vnfr._config_status = msg.config_status + with self._dts.transaction(flags=0) as xact: + yield from vnfr.publish(xact) + + else: + raise NotImplementedError( + "%s action on VirtualNetworkFunctionRecord not supported", + action) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for VNFR using xpath: %s", + VnfrDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit, + on_prepare=on_prepare,) + handlers = rift.tasklets.Group.Handler(on_event=on_event,) + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register(xpath=VnfrDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.CACHE | + rwdts.Flag.FILE_DATASTORE),) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VNFR record in DTS with path and message + """ + self._log.debug("Creating VNFR xact = %s, %s:%s", + xact, path, msg) + + self.regh.create_element(path, msg) + self._log.debug("Created VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VNFR record in DTS with path and message + """ + self._log.debug("Updating VNFR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VNFR record in DTS with path and message + """ + self._log.debug("Deleting VNFR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VNFR xact = %s, %s", xact, path) + + +class VirtualNetworkFunctionDescriptor(object): + """ + Virtual Network Function descriptor class + """ + + def __init__(self, dts, log, loop, vnfm, vnfd): + self._dts = dts + self._log = log + self._loop = loop + + self._vnfm = vnfm + self._vnfd = vnfd + self._ref_count = 0 + + @property + def ref_count(self): + """ Returns the reference count associated with + this Virtual Network Function Descriptor""" + return self._ref_count + + @property + def id(self): + """ Returns vnfd id """ + return self._vnfd.id + + @property + def name(self): + """ Returns vnfd name """ + return self._vnfd.name + + def in_use(self): + """ Returns whether vnfd is in use or not """ + return True if self._ref_count > 0 else False + + def ref(self): + """ Take a reference on this object """ + self._ref_count += 1 + return self._ref_count + + def unref(self): + """ Release reference on this object """ + if self.ref_count < 1: + msg = ("Unref on a VNFD object - vnfd id %s, ref_count = %s" % + (self.id, self._ref_count)) + self._log.critical(msg) + raise VnfRecordError(msg) + self._log.debug("Releasing ref on VNFD %s - curr ref_count:%s", + self.id, self.ref_count) + self._ref_count -= 1 + return self._ref_count + + @property + def msg(self): + """ Return the message associated with this NetworkServiceDescriptor""" + return self._vnfd + + @staticmethod + def path_for_id(vnfd_id): + """ Return path for the passed vnfd_id""" + return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id) + + def path(self): + """ Return the path associated with this NetworkServiceDescriptor""" + return VirtualNetworkFunctionDescriptor.path_for_id(self.id) + + def update(self, vnfd): + """ Update the Virtual Network Function Descriptor """ + if self.in_use(): + self._log.error("Cannot update descriptor %s in use", self.id) + raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot update descriptor in use %s" % self.id) + self._vnfd = vnfd + + def delete(self): + """ Delete the Virtual Network Function Descriptor """ + if self.in_use(): + self._log.error("Cannot delete descriptor %s in use", self.id) + raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot delete descriptor in use %s" % self.id) + self._vnfm.delete_vnfd(self.id) + + +class VnfdRefCountDtsHandler(object): + """ The VNFD Ref Count DTS handler """ + XPATH = "D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._vnfm = vnfm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def vnfm(self): + """ Return the NS manager instance """ + return self._vnfm + + @asyncio.coroutine + def register(self): + """ Register for VNFD ref count read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwVnfrYang.get_schema()) + self._log.debug( + "Got VNFD ref count get xact_info: %s, action: %s): %s:%s", + xact_info, action, xpath, msg + ) + + if action == rwdts.QueryAction.READ: + schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vnfd_list = yield from self._vnfm.get_vnfd_refcount(path_entry.key00.vnfd_id_ref) + for xpath, msg in vnfd_list: + self._log.debug("Responding to ref count query path:%s, msg:%s", + xpath, msg) + xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE, + xpath=xpath, + msg=msg) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + raise VnfRecordError("Not supported operation %s" % action) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=VnfdRefCountDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER, + ) + + +class VdurDatastore(object): + """ + This VdurDatastore is intended to expose select information about a VDUR + such that it can be referenced in a cloud config file. The data that is + exposed does not necessarily follow the structure of the data in the yang + model. This is intentional. The data that are exposed are intended to be + agnostic of the yang model so that changes in the model do not necessarily + require changes to the interface provided to the user. It also means that + the user does not need to be familiar with the RIFT.ware yang models. + """ + + def __init__(self): + """Create an instance of VdurDatastore""" + self._vdur_data = dict() + self._pattern = re.compile("vdu\[([^]]+)\]\.(.+)") + + def add(self, vdur): + """Add a new VDUR to the datastore + + Arguments: + vdur - a VirtualDeploymentUnitRecord instance + + Raises: + A ValueError is raised if the VDUR is (1) None or (2) already in + the datastore. + + """ + if vdur.vdu_id is None: + raise ValueError('VDURs are required to have an ID') + + if vdur.vdu_id in self._vdur_data: + raise ValueError('cannot add a VDUR more than once') + + self._vdur_data[vdur.vdu_id] = dict() + + def set_if_not_none(key, attr): + if attr is not None: + self._vdur_data[vdur.vdu_id][key] = attr + + set_if_not_none('name', vdur._vdud.name) + set_if_not_none('mgmt.ip', vdur.vm_management_ip) + + def update(self, vdur): + """Update the VDUR information in the datastore + + Arguments: + vdur - a GI representation of a VDUR + + Raises: + A ValueError is raised if the VDUR is (1) None or (2) already in + the datastore. + + """ + if vdur.vdu_id is None: + raise ValueError('VNFDs are required to have an ID') + + if vdur.vdu_id not in self._vdur_data: + raise ValueError('VNF is not recognized') + + def set_or_delete(key, attr): + if attr is None: + if key in self._vdur_data[vdur.vdu_id]: + del self._vdur_data[vdur.vdu_id][key] + + else: + self._vdur_data[vdur.vdu_id][key] = attr + + set_or_delete('name', vdur._vdud.name) + set_or_delete('mgmt.ip', vdur.vm_management_ip) + + def remove(self, vdur_id): + """Remove all of the data associated with specified VDUR + + Arguments: + vdur_id - the identifier of a VNFD in the datastore + + Raises: + A ValueError is raised if the VDUR is not contained in the + datastore. + + """ + if vdur_id not in self._vdur_data: + raise ValueError('VNF is not recognized') + + del self._vdur_data[vdur_id] + + def get(self, expr): + """Retrieve VDUR information from the datastore + + An expression should be of the form, + + vdu[]. + + where is the VDUR ID (an unquoted UUID), and is the name of + the exposed attribute that the user wishes to retrieve. + + If the requested data is not available, None is returned. + + Arguments: + expr - a string that specifies the data to return + + Raises: + A ValueError is raised if the provided expression cannot be parsed. + + Returns: + The requested data or None + + """ + result = self._pattern.match(expr) + if result is None: + raise ValueError('data expression not recognized ({})'.format(expr)) + + vdur_id, key = result.groups() + + if vdur_id not in self._vdur_data: + return None + + return self._vdur_data[vdur_id].get(key, None) + + +class VnfManager(object): + """ The virtual network function manager class """ + def __init__(self, dts, log, loop, cluster_name): + self._dts = dts + self._log = log + self._loop = loop + self._cluster_name = cluster_name + + self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self) + self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self) + + self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self), + self._vnfr_handler, + self._vcs_handler, + VnfdRefCountDtsHandler(dts, log, loop, self)] + self._vnfrs = {} + self._vnfds = {} + + @property + def vnfr_handler(self): + """ VNFR dts handler """ + return self._vnfr_handler + + @property + def vcs_handler(self): + """ VCS dts handler """ + return self._vcs_handler + + @asyncio.coroutine + def register(self): + """ Register all static DTS handlers """ + for hdl in self._dts_handlers: + yield from hdl.register() + + @asyncio.coroutine + def run(self): + """ Run this VNFM instance """ + self._log.debug("Run VNFManager - registering static DTS handlers""") + yield from self.register() + + def get_vnfr(self, vnfr_id): + """ get VNFR by vnfr id """ + + if vnfr_id not in self._vnfrs: + raise VnfRecordError("VNFR id %s not found", vnfr_id) + + return self._vnfrs[vnfr_id] + + def create_vnfr(self, vnfr): + """ Create a VNFR instance """ + if vnfr.id in self._vnfrs: + msg = "Vnfr id %s already exists" % vnfr.id + self._log.error(msg) + raise VnfRecordError(msg) + + self._log.info("Create VirtualNetworkFunctionRecord %s from vnfd_id: %s", + vnfr.id, + vnfr.vnfd_ref) + + self._vnfrs[vnfr.id] = VirtualNetworkFunctionRecord( + self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr + ) + return self._vnfrs[vnfr.id] + + @asyncio.coroutine + def delete_vnfr(self, xact, vnfr): + """ Create a VNFR instance """ + if vnfr.vnfr_id in self._vnfrs: + self._log.debug("Deleting VNFR id %s", vnfr.vnfr_id) + yield from self._vnfr_handler.delete(xact, vnfr.xpath) + del self._vnfrs[vnfr.vnfr_id] + + @asyncio.coroutine + def fetch_vnfd(self, vnfd_id): + """ Fetch VNFDs based with the vnfd id""" + vnfd_path = VirtualNetworkFunctionDescriptor.path_for_id(vnfd_id) + self._log.debug("Fetch vnfd with path %s", vnfd_path) + vnfd = None + + res_iter = yield from self._dts.query_read(vnfd_path, rwdts.Flag.MERGE) + + for ent in res_iter: + res = yield from ent + vnfd = res.result + + if vnfd is None: + err = "Failed to get Vnfd %s" % vnfd_id + self._log.error(err) + raise VnfRecordError(err) + + self._log.debug("Fetched vnfd for path %s, vnfd - %s", vnfd_path, vnfd) + + return vnfd + + @asyncio.coroutine + def get_vnfd_ref(self, vnfd_id): + """ Get Virtual Network Function descriptor for the passed vnfd_id""" + vnfd = yield from self.get_vnfd(vnfd_id) + vnfd.ref() + return vnfd + + @asyncio.coroutine + def get_vnfd(self, vnfd_id): + """ Get Virtual Network Function descriptor for the passed vnfd_id""" + vnfd = None + if vnfd_id not in self._vnfds: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + vnfd = yield from self.fetch_vnfd(vnfd_id) + + if vnfd is None: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + raise VirtualNetworkFunctionDescriptorError("Cannot find VNFD id:%s", vnfd_id) + + if vnfd.id != vnfd_id: + self._log.error("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id)) + raise VirtualNetworkFunctionDescriptorError("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id)) + + if vnfd.id not in self._vnfds: + self.create_vnfd(vnfd) + + return self._vnfds[vnfd_id] + + def vnfd_in_use(self, vnfd_id): + """ Is this VNFD in use """ + self._log.debug("Is this VNFD in use - msg:%s", vnfd_id) + if vnfd_id in self._vnfds: + return self._vnfds[vnfd_id].in_use() + return False + + @asyncio.coroutine + def publish_vnfr(self, xact, path, msg): + """ Publish a VNFR """ + self._log.debug("publish_vnfr called with path %s, msg %s", + path, msg) + yield from self.vnfr_handler.update(xact, path, msg) + + def create_vnfd(self, vnfd): + """ Create a virtual network function descriptor """ + self._log.debug("Create virtual networkfunction descriptor - %s", vnfd) + if vnfd.id in self._vnfds: + self._log.error("Cannot create VNFD %s -VNFD id already exists", vnfd) + raise VirtualNetworkFunctionDescriptorError("VNFD already exists-%s", vnfd.id) + + self._vnfds[vnfd.id] = VirtualNetworkFunctionDescriptor(self._dts, + self._log, + self._loop, + self, + vnfd) + return self._vnfds[vnfd.id] + + def update_vnfd(self, vnfd): + """ update the Virtual Network Function descriptor """ + self._log.debug("Update virtual network function descriptor - %s", vnfd) + + # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511 + for ivld in vnfd.internal_vld: + ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref)) + + if vnfd.id not in self._vnfds: + self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id) + self.create_vnfd(vnfd) + else: + self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd) + self._vnfds[vnfd.id].update(vnfd) + + @asyncio.coroutine + def delete_vnfd(self, vnfd_id): + """ Delete the Virtual Network Function descriptor with the passed id """ + self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id) + if vnfd_id not in self._vnfds: + self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id) + raise VirtualNetworkFunctionDescriptorNotFound("Cannot find %s", vnfd_id) + + if vnfd_id not in self._vnfds: + self._log.debug("Cannot delete VNFD id %s reference exists %s", + vnfd_id, + self._vnfds[vnfd_id].ref_count) + raise VirtualNetworkFunctionDescriptorRefCountExists( + "Cannot delete :%s, ref_count:%s", + vnfd_id, + self._vnfds[vnfd_id].ref_count) + + del self._vnfds[vnfd_id] + + def vnfd_refcount_xpath(self, vnfd_id): + """ xpath for ref count entry """ + return (VnfdRefCountDtsHandler.XPATH + + "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id) + + @asyncio.coroutine + def get_vnfd_refcount(self, vnfd_id): + """ Get the vnfd_list from this VNFM""" + vnfd_list = [] + if vnfd_id is None or vnfd_id == "": + for vnfd in self._vnfds.values(): + vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount() + vnfd_msg.vnfd_id_ref = vnfd.id + vnfd_msg.instance_ref_count = vnfd.ref_count + vnfd_list.append((self.vnfd_refcount_xpath(vnfd.id), vnfd_msg)) + elif vnfd_id in self._vnfds: + vnfd_msg.vnfd_id_ref = self._vnfds[vnfd_id].id + vnfd_msg.instance_ref_count = self._vnfds[vnfd_id].ref_count + vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg)) + + return vnfd_list + + +class VnfmTasklet(rift.tasklets.Tasklet): + """ VNF Manager tasklet class """ + def __init__(self, *args, **kwargs): + super(VnfmTasklet, self).__init__(*args, **kwargs) + self._dts = None + self._vnfm = None + + def start(self): + try: + super(VnfmTasklet, self).start() + self.log.info("Starting VnfmTasklet") + + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwVnfmYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + except Exception: + print("Caught Exception in VNFM start:", sys.exc_info()[0]) + raise + + def on_instance_started(self): + """ Task insance started callback """ + self.log.debug("Got instance started callback") + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in VNFM stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + """ Task init callback """ + try: + vm_parent_name = self.tasklet_info.get_parent_vm_parent_instance_name() + assert vm_parent_name is not None + self._vnfm = VnfManager(self._dts, self.log, self.loop, vm_parent_name) + yield from self._vnfm.run() + except Exception: + print("Caught Exception in VNFM init:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def run(self): + """ Task run callback """ + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py new file mode 100755 index 0000000..0857818 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwvnfmtasklet + +class Tasklet(rift.tasklets.rwvnfmtasklet.VnfmTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/test/mon_params_test.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/test/mon_params_test.py new file mode 100755 index 0000000..159971a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/test/mon_params_test.py @@ -0,0 +1,514 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import base64 +import logging +import os +import sys +import tornado.escape +import tornado.platform.asyncio +import tornado.testing +import tornado.web +import unittest +import xmlrunner + +import rift.tasklets.rwvnfmtasklet.mon_params as mon_params + + +from gi.repository import VnfrYang + +logging.basicConfig(format='TEST %(message)s', level=logging.DEBUG) +logger = logging.getLogger("mon_params_test.py") + + +class AsyncioTornadoTest(tornado.testing.AsyncHTTPTestCase): + def setUp(self): + self._loop = asyncio.get_event_loop() + super().setUp() + + def get_new_ioloop(self): + return tornado.platform.asyncio.AsyncIOMainLoop() + + +class MonParamsPingStatsTest(AsyncioTornadoTest): + ping_path = r"/api/v1/ping/stats" + ping_response = { + 'ping-request-tx-count': 5, + 'ping-response-rx-count': 10 + } + + mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam() + mon_param_msg.from_dict({ + 'id': '1', + 'name': 'ping-request-tx-count', + 'json_query_method': "NAMEKEY", + 'http_endpoint_ref': ping_path, + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }) + + endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint() + endpoint_msg.from_dict({ + 'path': ping_path, + 'polling_interval_secs': 1, + 'username': 'admin', + 'password': 'password', + 'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}], + }) + + def create_endpoint(self, endpoint_msg): + self.mon_port = self.get_http_port() + endpoint = mon_params.HTTPEndpoint( + logger, + self._loop, + "127.0.0.1", + self.endpoint_msg, + ) + # For each creation, update the descriptor as well + endpoint_msg.port = self.mon_port + + return endpoint + + def create_mon_param(self): + return mon_params.MonitoringParam(logger, self.mon_param_msg) + + def get_app(self): + class PingStatsHandler(tornado.web.RequestHandler): + def get(this): + test_header = this.request.headers.get('TEST_KEY') + if test_header is None or test_header != 'TEST_VALUE': + this.set_status(401) + this.finish() + return None + + auth_header = this.request.headers.get('Authorization') + if auth_header is None or not auth_header.startswith('Basic '): + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + auth_header = auth_header.encode('ascii') + auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii') + login, password = auth_decoded.split(':', 2) + login = login.encode('ascii') + password = password.encode('ascii') + is_auth = (login == b"admin" and password == b"password") + + if not is_auth: + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + this.write(self.ping_response) + + return tornado.web.Application([ + (self.ping_path, PingStatsHandler), + ]) + + def test_value_convert(self): + float_con = mon_params.ValueConverter("DECIMAL") + int_con = mon_params.ValueConverter("INT") + text_con = mon_params.ValueConverter("STRING") + + a = float_con.convert("1.23") + self.assertEqual(a, 1.23) + + a = float_con.convert(1) + self.assertEqual(a, float(1)) + + t = text_con.convert(1.23) + self.assertEqual(t, "1.23") + + t = text_con.convert("asdf") + self.assertEqual(t, "asdf") + + i = int_con.convert(1.23) + self.assertEqual(i, 1) + + def test_json_key_value_querier(self): + kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count") + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_json_path_value_querier(self): + kv_querier = mon_params.JsonPathValueQuerier(logger, '$.ping-request-tx-count') + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_object_path_value_querier(self): + kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.*['ping-request-tx-count']") + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_endpoint(self): + @asyncio.coroutine + def run_test(): + endpoint = self.create_endpoint(self.endpoint_msg) + resp = yield from endpoint.poll() + resp_json = tornado.escape.json_decode(resp) + self.assertEqual(resp_json["ping-request-tx-count"], 5) + self.assertEqual(resp_json["ping-response-rx-count"], 10) + + self._loop.run_until_complete( + asyncio.wait_for(run_test(), 10, loop=self._loop) + ) + + def test_mon_param(self): + a = self.create_mon_param() + a.extract_value_from_response(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(a.current_value, 5) + self.assertEqual(a.msg.value_integer, 5) + + def test_endpoint_poller(self): + endpoint = self.create_endpoint(self.endpoint_msg) + mon_param = self.create_mon_param() + poller = mon_params.EndpointMonParamsPoller( + logger, self._loop, endpoint, [mon_param], + ) + poller.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + self.assertEqual(mon_param.current_value, 5) + + poller.stop() + + def test_params_controller(self): + new_port = self.get_http_port() + # Update port after new port is initialized + self.endpoint_msg.port = new_port + ctrl = mon_params.VnfMonitoringParamsController( + logger, self._loop, "1", "127.0.0.1", + [self.endpoint_msg], [self.mon_param_msg], + ) + ctrl.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + + ctrl.stop() + + self.assertEqual(1, len(ctrl.mon_params)) + mon_param = ctrl.mon_params[0] + self.assertEqual(mon_param.current_value, 5) + + +class AsyncioTornadoHttpsTest(tornado.testing.AsyncHTTPSTestCase): + def setUp(self): + self._loop = asyncio.get_event_loop() + super().setUp() + + def get_new_ioloop(self): + return tornado.platform.asyncio.AsyncIOMainLoop() + + +class MonParamsPingStatsHttpsTest(AsyncioTornadoHttpsTest): + ping_path = r"/api/v1/ping/stats" + ping_response = { + 'ping-request-tx-count': 5, + 'ping-response-rx-count': 10 + } + + mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam() + mon_param_msg.from_dict({ + 'id': '1', + 'name': 'ping-request-tx-count', + 'json_query_method': "NAMEKEY", + 'http_endpoint_ref': ping_path, + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }) + + endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint() + endpoint_msg.from_dict({ + 'path': ping_path, + 'https': 'true', + 'polling_interval_secs': 1, + 'username': 'admin', + 'password': 'password', + 'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}], + }) + + def create_endpoint(self, endpoint_msg): + self.mon_port = self.get_http_port() + endpoint = mon_params.HTTPEndpoint( + logger, + self._loop, + "127.0.0.1", + self.endpoint_msg, + ) + # For each creation, update the descriptor as well + endpoint_msg.port = self.mon_port + + return endpoint + + def create_mon_param(self): + return mon_params.MonitoringParam(logger, self.mon_param_msg) + + def get_app(self): + class PingStatsHandler(tornado.web.RequestHandler): + def get(this): + test_header = this.request.headers.get('TEST_KEY') + if test_header is None or test_header != 'TEST_VALUE': + this.set_status(401) + this.finish() + return None + + auth_header = this.request.headers.get('Authorization') + if auth_header is None or not auth_header.startswith('Basic '): + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + auth_header = auth_header.encode('ascii') + auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii') + login, password = auth_decoded.split(':', 2) + login = login.encode('ascii') + password = password.encode('ascii') + is_auth = (login == b"admin" and password == b"password") + + if not is_auth: + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + this.write(self.ping_response) + + return tornado.web.Application([ + (self.ping_path, PingStatsHandler), + ]) + + def test_value_convert(self): + float_con = mon_params.ValueConverter("DECIMAL") + int_con = mon_params.ValueConverter("INT") + text_con = mon_params.ValueConverter("STRING") + + a = float_con.convert("1.23") + self.assertEqual(a, 1.23) + + a = float_con.convert(1) + self.assertEqual(a, float(1)) + + t = text_con.convert(1.23) + self.assertEqual(t, "1.23") + + t = text_con.convert("asdf") + self.assertEqual(t, "asdf") + + i = int_con.convert(1.23) + self.assertEqual(i, 1) + + def test_json_key_value_querier(self): + kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count") + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_endpoint(self): + @asyncio.coroutine + def run_test(): + endpoint = self.create_endpoint(self.endpoint_msg) + resp = yield from endpoint.poll() + resp_json = tornado.escape.json_decode(resp) + self.assertEqual(resp_json["ping-request-tx-count"], 5) + self.assertEqual(resp_json["ping-response-rx-count"], 10) + + self._loop.run_until_complete( + asyncio.wait_for(run_test(), 10, loop=self._loop) + ) + + def test_mon_param(self): + a = self.create_mon_param() + a.extract_value_from_response(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(a.current_value, 5) + self.assertEqual(a.msg.value_integer, 5) + + def test_endpoint_poller(self): + endpoint = self.create_endpoint(self.endpoint_msg) + mon_param = self.create_mon_param() + poller = mon_params.EndpointMonParamsPoller( + logger, self._loop, endpoint, [mon_param], + ) + poller.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + self.assertEqual(mon_param.current_value, 5) + + poller.stop() + + def test_params_controller(self): + new_port = self.get_http_port() + # Update port after new port is initialized + self.endpoint_msg.port = new_port + ctrl = mon_params.VnfMonitoringParamsController( + logger, self._loop, "1", "127.0.0.1", + [self.endpoint_msg], [self.mon_param_msg], + ) + ctrl.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + + ctrl.stop() + + self.assertEqual(1, len(ctrl.mon_params)) + mon_param = ctrl.mon_params[0] + self.assertEqual(mon_param.current_value, 5) + + +class VRouterStatsTest(unittest.TestCase): + system_response = { + "system": { + "cpu": [ + { + "usage": 2.35, + "cpu": "all" + }, + { + "usage": 5.35, + "cpu": "1" + } + ] + } + } + + def test_object_path_value_querier(self): + kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.system.cpu[@.cpu is 'all'].usage") + value = kv_querier.query(tornado.escape.json_encode(self.system_response)) + self.assertEqual(value, 2.35) + + +class TrafsinkStatsTest(unittest.TestCase): + system_response = { + "rw-vnf-base-opdata:port-state": [ + { + "ip": [ + { + "address": "12.0.0.3/24" + } + ], + "rw-trafgen-data:trafgen-info": { + "src_l4_port": 1234, + "dst_l4_port": 5678, + "dst_ip_address": "192.168.1.1", + "tx_state": "Off", + "dst_mac_address": "00:00:00:00:00:00", + "tx_mode": "single-template", + "packet-count": 0, + "tx-cycles": 5478, + "tx_burst": 16, + "src_ip_address": "192.168.0.1", + "pkt_size": 64, + "src_mac_address": "fa:16:3e:07:b1:52", + "descr-string": "", + "tx_rate": 100 + }, + "counters": { + "input-errors": 0, + "output-bytes": 748, + "input-pause-xoff-pkts": 0, + "input-badcrc-pkts": 0, + "input-bytes": 62, + "rx-rate-mbps": 9576, + "output-pause-xoff-pkts": 0, + "input-missed-pkts": 0, + "input-packets": 1, + "output-errors": 0, + "tx-rate-mbps": 0, + "input-pause-xon-pkts": 0, + "output-pause-xon-pkts": 0, + "tx-rate-pps": 0, + "input-mcast-pkts": 0, + "rx-rate-pps": 0, + "output-packets": 6, + "input-nombuf-pkts": 0 + }, + "info": { + "numa-socket": 0, + "transmit-queues": 1, + "privatename": "eth_uio:pci=0000:00:04.0", + "duplex": "full-duplex", + "virtual-fabric": "No", + "link-state": "up", + "rte-port-id": 0, + "fastpath-instance": 1, + "id": 0, + "app-name": "rw_trafgen", + "speed": 10000, + "receive-queues": 1, + "descr-string": "", + "mac": "fa:16:3e:07:b1:52" + }, + "portname": "trafsink_vnfd/cp0", + "queues": { + "rx-queue": [ + { + "packets": 1, + "bytes-MB": 0, + "qid": 0, + "rate-mbps": 0, + "rate-pps": 0 + } + ], + "tx-queue": [ + { + "bytes-MB": 0, + "packets": 6, + "rate-pps": 0, + "errors": 0, + "qid": 0, + "rate-mbps": 0 + } + ] + } + } + ] + } + + def test_object_path_value_querier(self): + kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafsink_vnfd/cp0'].counters.'rx-rate-mbps'") + value = kv_querier.query(tornado.escape.json_encode(self.system_response)) + self.assertEqual(value, 9576) + + +def main(argv=sys.argv[1:]): + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main( + argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + ) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/CMakeLists.txt new file mode 100644 index 0000000..55a29e9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/CMakeLists.txt @@ -0,0 +1,39 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +cmake_minimum_required(VERSION 2.8) + +include(rift_plugin) + +set(TASKLET_NAME rwvnstasklet) + +set(subdirs yang vala) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/vlmgr/__init__.py + rift/vlmgr/rwvlmgr.py + rift/topmgr/__init__.py + rift/topmgr/rwtopmgr.py + rift/topmgr/rwtopdatastore.py + rift/topmgr/core.py + rift/topmgr/mock.py + rift/topmgr/sdnsim.py + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwvns/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py new file mode 100644 index 0000000..35c44ef --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwvnstasklet import VnsTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py new file mode 100755 index 0000000..af2638e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py @@ -0,0 +1,356 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import os +import sys + +import gi +gi.require_version('RwVnsYang', '1.0') +gi.require_version('RwDts', '1.0') +from gi.repository import ( + RwVnsYang, + RwDts as rwdts, + RwTypes, + ProtobufC, +) + +import rift.tasklets + +from rift.vlmgr import ( + VlrDtsHandler, + VldDtsHandler, + VirtualLinkRecord, +) + +from rift.topmgr import ( + NwtopStaticDtsHandler, + NwtopDiscoveryDtsHandler, + NwtopDataStore, + SdnAccountMgr, +) + + +class SdnInterfaceError(Exception): + """ SDN interface creation Error """ + pass + + +class SdnPluginError(Exception): + """ SDN plugin creation Error """ + pass + + +class VlRecordError(Exception): + """ Vlr Record creation Error """ + pass + + +class VlRecordNotFound(Exception): + """ Vlr Record not found""" + pass + +class SdnAccountExistsError(Exception): + pass + + +class SDNAccountDtsHandler(object): + XPATH = "C,/rw-sdn:sdn-account" + + def __init__(self, dts, log, parent): + self._dts = dts + self._log = log + self._parent = parent + + self._sdn_account = {} + + def _set_sdn_account(self, account): + self._log.info("Setting sdn account: {}".format(account)) + if account.name in self._sdn_account: + self._log.error("SDN Account with name %s already exists. Ignoring config", account.name); + self._sdn_account[account.name] = account + self._parent._acctmgr.set_sdn_account(account) + + def _del_sdn_account(self, account_name): + self._log.info("Deleting sdn account: {}".format(account_name)) + del self._sdn_account[account_name] + + self._parent._acctmgr.del_sdn_account(account_name) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action) + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + return RwTypes.RwStatus.SUCCESS + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for SDN Account config """ + + self._log.info("SDN Cloud account config received: %s", msg) + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + # Delete the sdn account record + self._del_sdn_account(msg.name) + else: + if msg.name in self._sdn_account: + msg = "Cannot update a SDN account that already was set." + self._log.error(msg) + xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE, + SDNAccountDtsHandler.XPATH, + msg) + raise SdnAccountExistsError(msg) + + # Set the sdn account record + self._set_sdn_account(msg) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + + self._log.debug("Registering for Sdn Account config using xpath: %s", + SDNAccountDtsHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + acg.register( + xpath=SDNAccountDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class VnsManager(object): + """ The Virtual Network Service Manager """ + def __init__(self, dts, log, log_hdl, loop): + self._dts = dts + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._vlr_handler = VlrDtsHandler(dts, log, loop, self) + self._vld_handler = VldDtsHandler(dts, log, loop, self) + self._sdn_handler = SDNAccountDtsHandler(dts,log,self) + self._acctmgr = SdnAccountMgr(self._log, self._log_hdl, self._loop) + self._nwtopdata_store = NwtopDataStore(log) + self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store) + self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store) + + self._log.info("type %s", type(self._log)) + + self._vlrs = {} + + @asyncio.coroutine + def register_vlr_handler(self): + """ Register vlr DTS handler """ + self._log.debug("Registering DTS VLR handler") + yield from self._vlr_handler.register() + + @asyncio.coroutine + def register_vld_handler(self): + """ Register vlr DTS handler """ + self._log.debug("Registering DTS VLD handler") + yield from self._vld_handler.register() + + @asyncio.coroutine + def register_sdn_handler(self): + """ Register vlr DTS handler """ + self._log.debug("Registering SDN Account config handler") + yield from self._sdn_handler.register() + + @asyncio.coroutine + def register_nwtopstatic_handler(self): + """ Register static NW topology DTS handler """ + self._log.debug("Registering static DTS NW topology handler") + yield from self._nwtopstatic_handler.register() + + @asyncio.coroutine + def register_nwtopdiscovery_handler(self): + """ Register discovery-based NW topology DTS handler """ + self._log.debug("Registering discovery-based DTS NW topology handler") + yield from self._nwtopdiscovery_handler.register() + + @asyncio.coroutine + def register(self): + """ Register all static DTS handlers""" + yield from self.register_sdn_handler() + yield from self.register_vlr_handler() + yield from self.register_vld_handler() + yield from self.register_nwtopstatic_handler() + # Not used for now + yield from self.register_nwtopdiscovery_handler() + + def create_vlr(self, msg): + """ Create VLR """ + if msg.id in self._vlrs: + err = "Vlr id %s already exists" % msg.id + self._log.error(err) + raise VlRecordError(err) + + self._log.info("Creating VirtualLinkRecord %s", msg.id) + self._vlrs[msg.id] = VirtualLinkRecord(self._dts, + self._log, + self._loop, + self, + msg, + msg.res_id + ) + return self._vlrs[msg.id] + + def get_vlr(self, vlr_id): + """ Get VLR by vlr id """ + return self._vlrs[vlr_id] + + @asyncio.coroutine + def delete_vlr(self, vlr_id, xact): + """ Delete VLR with the passed id""" + if vlr_id not in self._vlrs: + err = "Delete Failed - Vlr id %s not found" % vlr_id + self._log.error(err) + raise VlRecordNotFound(err) + + self._log.info("Deleting virtual link id %s", vlr_id) + yield from self._vlrs[vlr_id].terminate(xact) + del self._vlrs[vlr_id] + self._log.info("Deleted virtual link id %s", vlr_id) + + def find_vlr_by_vld_id(self, vld_id): + """ Find a VLR matching the VLD Id """ + for vlr in self._vlrs.values(): + if vlr.vld_id == vld_id: + return vlr + return None + + @asyncio.coroutine + def run(self): + """ Run this VNSM instance """ + self._log.debug("Run VNSManager - registering static DTS handlers") + yield from self.register() + + def vld_in_use(self, vld_id): + """ Is this VLD in use """ + return False + + @asyncio.coroutine + def publish_vlr(self, xact, path, msg): + """ Publish a VLR """ + self._log.debug("Publish vlr called with path %s, msg %s", + path, msg) + yield from self._vlr_handler.update(xact, path, msg) + + @asyncio.coroutine + def unpublish_vlr(self, xact, path): + """ Publish a VLR """ + self._log.debug("Unpublish vlr called with path %s", path) + yield from self._vlr_handler.delete(xact, path) + + +class VnsTasklet(rift.tasklets.Tasklet): + """ The VNS tasklet class """ + def __init__(self, *args, **kwargs): + super(VnsTasklet, self).__init__(*args, **kwargs) + + #self.add_log_stderr_handler() + self._dts = None + self._vlr_handler = None + + self._vnsm = None + # A mapping of instantiated vlr_id's to VirtualLinkRecord objects + self._vlrs = {} + + def start(self): + super(VnsTasklet, self).start() + self.log.info("Starting VnsTasklet") + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwVnsYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + """ The task instance started callback""" + self.log.debug("Got instance started callback") + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in VNS stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + """ task init callback""" + self._vnsm = VnsManager(dts=self._dts, + log=self.log, + log_hdl=self.log_hdl, + loop=self.loop) + yield from self._vnsm.run() + + # NSM needs to detect VLD deletion that has active VLR + # self._vld_handler = VldDescriptorConfigDtsHandler( + # self._dts, self.log, self.loop, self._vlrs, + # ) + # yield from self._vld_handler.register() + + @asyncio.coroutine + def run(self): + """ tasklet run callback """ + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py new file mode 100644 index 0000000..6ab8066 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py @@ -0,0 +1,38 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Ravi Chamarty +# Creation Date: 10/28/2015 +# + +from .rwtopmgr import ( + NwtopDiscoveryDtsHandler, + NwtopStaticDtsHandler, + SdnAccountMgr, +) + +from .rwtopdatastore import ( + NwtopDataStore, +) + +try: + from .sdnsim import SdnSim + from .core import Topology + from .mock import Mock + +except ImportError as e: + print("Error: Unable to load sdn implementation: %s" % str(e)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py new file mode 100644 index 0000000..597bb4a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py @@ -0,0 +1,50 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import functools + +#from . import exceptions + + +def unsupported(f): + @functools.wraps(f) + def impl(*args, **kwargs): + msg = '{} not supported'.format(f.__name__) + raise exceptions.RWErrorNotSupported(msg) + + return impl + + +class Topology(object): + """ + Topoology defines a base class for sdn driver implementations. Note that + not all drivers will support the complete set of functionality presented + here. + """ + + @unsupported + def get_network_list(self, account): + """ + Returns the discovered network associated with the specified account. + + @param account - a SDN account + + @return a discovered network + """ + pass \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py new file mode 100644 index 0000000..395b866 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py @@ -0,0 +1,51 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import mock + +import gi +gi.require_version('RwcalYang', '1.0') +from gi.repository import RwcalYang + +from . import core + +import logging + +logger = logging.getLogger('rwsdn.mock') + +class Mock(core.Topology): + """This class implements the abstract methods in the Topology class. + Mock is used for unit testing.""" + + def __init__(self): + super(Mock, self).__init__() + + m = mock.MagicMock() + + create_default_topology() + + def get_network_list(self, account): + """ + Returns the discovered network + + @param account - a SDN account + + """ + logger.debug("Not yet implemented") + return None \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py new file mode 100644 index 0000000..b952108 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py @@ -0,0 +1,187 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + IetfNetworkYang, + IetfNetworkTopologyYang, + IetfL2TopologyYang, + RwTopologyYang, + RwTypes +) +import logging +from gi.repository.RwTypes import RwStatus + + +class NwtopDataStore(object): + """ Common datastore for discovered and static topologies """ + def __init__(self, log): + self._networks = {} + self._log = log + + """ Deep copy utility for topology class """ + def rwtop_copy_object(self, obj): + dup = obj.__class__() + dup.copy_from(obj) + return dup + + """ Utility for updating L2 topology attributes """ + def _update_l2_attr(self, current_elem, new_elem, new_l2_attr, attr_field_name): + if not getattr(current_elem, attr_field_name): + self._log.debug ("Creating L2 attributes..%s", l2_attr_field) + setattr(current_elem, attr_field_name, new_l2_attr) + return + + for l2_attr_field in new_l2_attr.fields: + l2_elem_attr_value = getattr(new_l2_attr, l2_attr_field) + if l2_elem_attr_value: + self._log.debug ("Updating L2 attributes..%s", l2_attr_field) + setattr(getattr(current_elem, attr_field_name), l2_attr_field, getattr(new_l2_attr, l2_attr_field)) + + """ Utility for updating termination point attributes """ + def _update_termination_point(self, current_node, new_node, new_tp): + current_tp = next((x for x in current_node.termination_point if x.tp_id == new_tp.tp_id), None) + if current_tp is None: + self._log.debug("Creating termination point..%s", new_tp) + # Add tp to current node + new_tp_dup = self.rwtop_copy_object(new_tp) + current_node.termination_point.append(new_tp_dup) + return + # Update current tp + for tp_field in new_tp.fields: + tp_field_value = getattr(new_tp, tp_field) + if tp_field_value: + self._log.debug("Updating termination point..%s", tp_field) + if (tp_field == 'tp_id'): + # Don't change key + pass + elif (tp_field == 'l2_termination_point_attributes'): + self._update_l2_attr(current_tp, new_tp, tp_field_value, tp_field) + elif (tp_field == 'supporting_termination_point'): + self._log.debug(tp_field) + else: + self._log.info("Updating termination point..Not implemented %s", tp_field) + #raise NotImplementedError + + """ Utility for updating link attributes """ + def _update_link(self, current_nw, new_nw, new_link): + current_link = next((x for x in current_nw.link if x.link_id == new_link.link_id), None) + if current_link is None: + # Add link to current nw + self._log.info("Creating link..%s", new_link ) + new_link_dup = self.rwtop_copy_object(new_link) + current_nw.link.append(new_link_dup) + return + # Update current link + for link_field in new_link.fields: + link_field_value = getattr(new_link, link_field) + if link_field_value: + self._log.info("Updating link..%s", link_field) + if (link_field == 'link_id'): + # Don't change key + pass + elif (link_field == 'source'): + if getattr(link_field_value, 'source_node') is not None: + current_link.source.source_node = getattr(link_field_value, 'source_node') + if getattr(link_field_value, 'source_tp') is not None: + current_link.source.source_tp = getattr(link_field_value, 'source_tp') + elif (link_field == 'destination'): + if getattr(link_field_value, 'dest_node') is not None: + current_link.destination.dest_node = link_field_value.dest_node + if getattr(link_field_value, 'dest_tp') is not None: + current_link.destination.dest_tp = link_field_value.dest_tp + elif (link_field == 'l2_link_attributes'): + self._update_l2_attr(current_link, new_link, link_field_value, link_field) + elif (link_field == 'supporting_link'): + self._log.debug(link_field) + else: + self._log.info("Update link..Not implemented %s", link_field) + #raise NotImplementedError + + + """ Utility for updating node attributes """ + def _update_node(self, current_nw, new_nw, new_node): + current_node = next((x for x in current_nw.node if x.node_id == new_node.node_id), None) + if current_node is None: + # Add node to current nw + self._log.debug("Creating node..%s", new_node) + new_node_dup = self.rwtop_copy_object(new_node) + current_nw.node.append(new_node_dup) + return + # Update current node + for node_field in new_node.fields: + node_field_value = getattr(new_node, node_field) + if node_field_value: + self._log.debug("Updating node..%s", node_field) + if (node_field == 'node_id'): + # Don't change key + pass + elif (node_field == 'l2_node_attributes'): + self._update_l2_attr(current_node, new_node, node_field_value, node_field) + elif (node_field == 'termination_point'): + for tp in new_node.termination_point: + self._update_termination_point(current_node, new_node, tp) + elif (node_field == 'supporting-node'): + self._log.debug(node_field) + else: + self._log.info("Update node..Not implemented %s", node_field) + #raise NotImplementedError + + + """ API for retrieving internal network """ + def get_network(self, network_id): + if (network_id not in self._networks): + return None + return self._networks[network_id] + + """ API for creating internal network """ + def create_network(self, key, nw): + self._networks[key] = self.rwtop_copy_object(nw) + + """ API for updating internal network """ + def update_network(self, key, new_nw): + if key not in self._networks: + self._log.debug("Creating network..New_nw %s", new_nw) + self._networks[key] = self.rwtop_copy_object(new_nw) + return + # Iterating thru changed fields + for nw_field in new_nw.fields: + nw_field_value = getattr(new_nw, nw_field) + self._log.debug("Update nw..nw_field %s", nw_field) + if nw_field_value: + if (nw_field == 'node'): + for node in new_nw.node: + self._update_node(self._networks[key], new_nw, node) + elif (nw_field == 'network_id'): + # Don't change key + pass + elif (nw_field == 'link'): + for link in new_nw.link: + self._update_link(self._networks[key], new_nw, link) + elif (nw_field == 'network_types'): + self._networks[key].network_types.l2_network = self._networks[key].network_types.l2_network.new() + elif (nw_field == 'l2_network_attributes'): + self._update_l2_attr(self._networks[key], new_nw, nw_field_value, nw_field) + else: + self._log.info("Update nw..Not implemented %s", nw_field) + #raise NotImplementedError + + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py new file mode 100755 index 0000000..bf78174 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py @@ -0,0 +1,253 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + RwDts as rwdts, + IetfNetworkYang, + IetfNetworkTopologyYang, + IetfL2TopologyYang, + RwTopologyYang, + RwsdnYang, + RwTypes +) + +from gi.repository.RwTypes import RwStatus +import rw_peas +import rift.tasklets + +class SdnGetPluginError(Exception): + """ Error while fetching SDN plugin """ + pass + + +class SdnGetInterfaceError(Exception): + """ Error while fetching SDN interface""" + pass + + +class SdnAccountMgr(object): + """ Implements the interface to backend plugins to fetch topology """ + def __init__(self, log, log_hdl, loop): + self._account = {} + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._sdn = {} + + self._regh = None + + def set_sdn_account(self,account): + if (account.name in self._account): + self._log.error("SDN Account is already set") + else: + sdn_account = RwsdnYang.SDNAccount() + sdn_account.from_dict(account.as_dict()) + sdn_account.name = account.name + self._account[account.name] = sdn_account + self._log.debug("Account set is %s , %s",type(self._account), self._account) + + def del_sdn_account(self, name): + self._log.debug("Account deleted is %s , %s", type(self._account), name) + del self._account[name] + + def get_sdn_account(self, name): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + if (name in self._account): + return self._account[name] + else: + self._log.error("ERROR : SDN account is not configured") + + + def get_sdn_plugin(self,name): + """ + Loads rw.sdn plugin via libpeas + """ + if (name in self._sdn): + return self._sdn[name] + account = self.get_sdn_account(name) + plugin_name = getattr(account, account.account_type).plugin_name + self._log.info("SDN plugin being created") + plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0') + engine, info, extension = plugin() + + self._sdn[name] = plugin.get_interface("Topology") + try: + rc = self._sdn[name].init(self._log_hdl) + assert rc == RwStatus.SUCCESS + except: + self._log.error("ERROR:SDN plugin instantiation failed ") + else: + self._log.info("SDN plugin successfully instantiated") + return self._sdn[name] + + +class NwtopDiscoveryDtsHandler(object): + """ Handles DTS interactions for the Discovered Topology registration """ + DISC_XPATH = "D,/nd:network" + + def __init__(self, dts, log, loop, acctmgr, nwdatastore): + self._dts = dts + self._log = log + self._loop = loop + self._acctmgr = acctmgr + self._nwdatastore = nwdatastore + + self._regh = None + + @property + def regh(self): + """ The registration handle associated with this Handler""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for the Discovered Topology path """ + + @asyncio.coroutine + def on_ready(regh, status): + """ On_ready for Discovered Topology registration """ + self._log.debug("PUB reg ready for Discovered Topology handler regn_hdl(%s) status %s", + regh, status) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare for Discovered Topology registration""" + self._log.debug( + "Got topology on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + + if action == rwdts.QueryAction.READ: + + for name in self._acctmgr._account: + _sdnacct = self._acctmgr.get_sdn_account(name) + if (_sdnacct is None): + raise SdnGetPluginError + + _sdnplugin = self._acctmgr.get_sdn_plugin(name) + if (_sdnplugin is None): + raise SdnGetInterfaceError + + rc, nwtop = _sdnplugin.get_network_list(_sdnacct) + #assert rc == RwStatus.SUCCESS + if rc != RwStatus.SUCCESS: + self._log.error("Fetching get network list for SDN Account %s failed", name) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + self._log.debug("Topology: Retrieved network attributes ") + for nw in nwtop.network: + # Add SDN account name + nw.rw_network_attributes.sdn_account_name = name + nw.network_id = name + ':' + nw.network_id + self._log.debug("...Network id %s", nw.network_id) + nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id) + xact_info.respond_xpath(rwdts.XactRspCode.MORE, + nw_xpath, nw) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + return + else: + err = "%s action on discovered Topology not supported" % action + raise NotImplementedError(err) + + self._log.debug("Registering for discovered topology using xpath %s", NwtopDiscoveryDtsHandler.DISC_XPATH) + + handler = rift.tasklets.DTS.RegistrationHandler( + on_ready=on_ready, + on_prepare=on_prepare, + ) + + yield from self._dts.register( + NwtopDiscoveryDtsHandler.DISC_XPATH, + flags=rwdts.Flag.PUBLISHER, + handler=handler + ) + + +class NwtopStaticDtsHandler(object): + """ Handles DTS interactions for the Static Topology registration """ + STATIC_XPATH = "C,/nd:network" + + def __init__(self, dts, log, loop, acctmgr, nwdatastore): + self._dts = dts + self._log = log + self._loop = loop + self._acctmgr = acctmgr + + self._regh = None + self.pending = {} + self._nwdatastore = nwdatastore + + @property + def regh(self): + """ The registration handle associated with this Handler""" + return self._regh + + + @asyncio.coroutine + def register(self): + """ Register for the Static Topology path """ + + @asyncio.coroutine + def prepare_nw_cfg(dts, acg, xact, xact_info, ksp, msg): + """Prepare for application configuration. Stash the pending + configuration object for subsequent transaction phases""" + self._log.debug("Prepare Network config received network id %s, msg %s", + msg.network_id, msg) + self.pending[xact.id] = msg + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + def apply_nw_config(dts, acg, xact, action, scratch): + """Apply the pending configuration object""" + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + return + + if xact.id not in self.pending: + raise KeyError("No stashed configuration found with transaction id [{}]".format(xact.id)) + + try: + if action == rwdts.AppconfAction.INSTALL: + self._nwdatastore.create_network(self.pending[xact.id].network_id, self.pending[xact.id]) + elif action == rwdts.AppconfAction.RECONCILE: + self._nwdatastore.update_network(self.pending[xact.id].network_id, self.pending[xact.id]) + except: + raise + + self._log.debug("Create network config done") + return RwTypes.RwStatus.SUCCESS + + self._log.debug("Registering for static topology using xpath %s", NwtopStaticDtsHandler.STATIC_XPATH) + handler=rift.tasklets.AppConfGroup.Handler( + on_apply=apply_nw_config) + + with self._dts.appconf_group_create(handler=handler) as acg: + acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH, + flags = rwdts.Flag.SUBSCRIBER, + on_prepare=prepare_nw_cfg) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py new file mode 100644 index 0000000..03e9c2f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py @@ -0,0 +1,62 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +from . import core +import logging + +import xml.etree.ElementTree as etree +import json +from gi.repository import RwTopologyYang as RwTl + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import RwYang + + +logger = logging.getLogger(__name__) + + +class SdnSim(core.Topology): + def __init__(self): + super(SdnSim, self).__init__() + + def get_network_list(self, account): + """ + Returns the discovered network + + @param account - a SDN account + + """ + topology_source = "/net/boson/home1/rchamart/work/topology/l2_top.xml" + logger.info("Reading topology file: %s", topology_source) + tree = etree.parse(topology_source) + root = tree.getroot() + xmlstr = etree.tostring(root, encoding="unicode") + + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + nwtop = RwTl.YangData_IetfNetwork() + # The top level topology object does not have XML conversion + # Hence going one level down + l2nw1 = nwtop.network.add() + l2nw1.from_xml_v2(model, xmlstr) + + logger.debug("Returning topology data imported from XML file") + + return nwtop \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py new file mode 100644 index 0000000..e40a495 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py @@ -0,0 +1,27 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Ravi Chamarty +# Creation Date: 9/2/2015 +# + +from .rwvlmgr import ( + VirtualLinkRecordState, + VirtualLinkRecord, + VlrDtsHandler, + VldDtsHandler, +) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py new file mode 100755 index 0000000..9b8c72d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py @@ -0,0 +1,468 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import enum +import uuid +import time + +import gi +gi.require_version('RwVlrYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +from gi.repository import ( + RwVlrYang, + VldYang, + RwDts as rwdts, + RwResourceMgrYang, +) +import rift.tasklets + + +class NetworkResourceError(Exception): + """ Network Resource Error """ + pass + + +class VlrRecordExistsError(Exception): + """ VLR record already exists""" + pass + + +class VlRecordError(Exception): + """ VLR record error """ + pass + + +class VirtualLinkRecordState(enum.Enum): + """ Virtual Link record state """ + INIT = 1 + INSTANTIATING = 2 + RESOURCE_ALLOC_PENDING = 3 + READY = 4 + TERMINATING = 5 + TERMINATED = 6 + FAILED = 10 + + +class VirtualLinkRecord(object): + """ + Virtual Link Record object + """ + def __init__(self, dts, log, loop, vnsm, vlr_msg, req_id=None): + self._dts = dts + self._log = log + self._loop = loop + self._vnsm = vnsm + self._vlr_msg = vlr_msg + + self._network_id = None + self._network_pool = None + self._create_time = int(time.time()) + if req_id == None: + self._request_id = str(uuid.uuid4()) + else: + self._request_id = req_id + + self._state = VirtualLinkRecordState.INIT + + @property + def vld_xpath(self): + """ VLD xpath associated with this VLR record """ + return "C,/vld:vld-catalog/vld:vld[id='{}']".format(self.vld_id) + + @property + def vld_id(self): + """ VLD id associated with this VLR record """ + return self._vlr_msg.vld_ref + + @property + def vlr_id(self): + """ VLR id associated with this VLR record """ + return self._vlr_msg.id + + @property + def xpath(self): + """ path for this VLR """ + return("D,/vlr:vlr-catalog" + "/vlr:vlr[vlr:id='{}']".format(self.vlr_id)) + + @property + def name(self): + """ Name of this VLR """ + return self._vlr_msg.name + + @property + def cloud_account_name(self): + """ Cloud Account to instantiate the virtual link on """ + return self._vlr_msg.cloud_account + + @property + def resmgr_path(self): + """ path for resource-mgr""" + return ("D,/rw-resource-mgr:resource-mgmt" + + "/vlink-event/vlink-event-data[event-id='{}']".format(self._request_id)) + + @property + def operational_status(self): + """ Operational status of this VLR""" + op_stats_dict = {"INIT": "init", + "INSTANTIATING": "vl_alloc_pending", + "RESOURCE_ALLOC_PENDING": "vl_alloc_pending", + "READY": "running", + "FAILED": "failed", + "TERMINATING": "vl_terminate_pending", + "TERMINATED": "terminated"} + + return op_stats_dict[self._state.name] + + @property + def msg(self): + """ VLR message for this VLR """ + msg = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr() + msg.copy_from(self._vlr_msg) + + if self._network_id is not None: + msg.network_id = self._network_id + + if self._network_pool is not None: + msg.network_pool = self._network_pool + + msg.operational_status = self.operational_status + msg.res_id = self._request_id + + return msg + + @property + def resmgr_msg(self): + """ VLR message for this VLR """ + msg = RwResourceMgrYang.VirtualLinkEventData() + msg.event_id = self._request_id + msg.cloud_account = self.cloud_account_name + msg.request_info.name = self.name + msg.request_info.provider_network.from_dict( + self._vlr_msg.provider_network.as_dict() + ) + return msg + + @asyncio.coroutine + def create_network(self, xact): + """ Create network for this VL """ + self._log.debug("Creating network req-id: %s", self._request_id) + return (yield from self.request_network(xact, "create")) + + @asyncio.coroutine + def delete_network(self, xact): + """ Delete network for this VL """ + self._log.debug("Deleting network - req-id: %s", self._request_id) + return (yield from self.request_network(xact, "delete")) + + @asyncio.coroutine + def read_network(self, xact): + """ Read network for this VL """ + self._log.debug("Reading network - req-id: %s", self._request_id) + return (yield from self.request_network(xact, "read")) + + @asyncio.coroutine + def request_network(self, xact, action): + """Request creation/deletion network for this VL """ + + block = xact.block_create() + + if action == "create": + self._log.debug("Creating network path:%s, msg:%s", + self.resmgr_path, self.resmgr_msg) + block.add_query_create(self.resmgr_path, self.resmgr_msg) + elif action == "delete": + self._log.debug("Deleting network path:%s", self.resmgr_path) + if self.resmgr_msg.request_info.name != "multisite": + block.add_query_delete(self.resmgr_path) + elif action == "read": + self._log.debug("Reading network path:%s", self.resmgr_path) + block.add_query_read(self.resmgr_path) + else: + raise VlRecordError("Invalid action %s received" % action) + + res_iter = yield from block.execute(flags=rwdts.Flag.TRACE, now=True) + + resp = None + + if action == "create" or action == "read": + for i in res_iter: + r = yield from i + resp = r.result + + if resp is None or not (resp.has_field('resource_info') and + resp.resource_info.has_field('virtual_link_id')): + raise NetworkResourceError("Did not get a network resource response (resp: %s)", + resp) + + self._log.debug("Got network request response: %s", resp) + + return resp + + @asyncio.coroutine + def instantiate(self, xact, restart=0): + """ Instantiate this VL """ + self._state = VirtualLinkRecordState.INSTANTIATING + + self._log.debug("Instantiating VLR path = [%s]", self.xpath) + + try: + self._state = VirtualLinkRecordState.RESOURCE_ALLOC_PENDING + + if restart == 0: + network_resp = yield from self.create_network(xact) + else: + network_resp = yield from self.read_network(xact) + if network_resp == None: + network_resp = yield from self.create_network(xact) + + # Note network_resp.virtual_link_id is CAL assigned network_id. + + self._network_id = network_resp.resource_info.virtual_link_id + self._network_pool = network_resp.resource_info.pool_name + + self._state = VirtualLinkRecordState.READY + + yield from self.publish(xact) + + except Exception as e: + self._log.error("Instantiatiation of VLR record failed: %s", str(e)) + self._state = VirtualLinkRecordState.FAILED + yield from self.publish(xact) + + @asyncio.coroutine + def publish(self, xact): + """ publish this VLR """ + vlr = self.msg + self._log.debug("Publishing VLR path = [%s], record = [%s]", + self.xpath, self.msg) + vlr.create_time = self._create_time + yield from self._vnsm.publish_vlr(xact, self.xpath, self.msg) + self._log.debug("Published VLR path = [%s], record = [%s]", + self.xpath, self.msg) + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VL """ + if self._state not in [VirtualLinkRecordState.READY, VirtualLinkRecordState.FAILED]: + self._log.error("Ignoring terminate for VL %s is in %s state", + self.vlr_id, self._state) + return + + if self._state == VirtualLinkRecordState.READY: + self._log.debug("Terminating VL with id %s", self.vlr_id) + self._state = VirtualLinkRecordState.TERMINATING + try: + yield from self.delete_network(xact) + except Exception: + self._log.exception("Caught exception while deleting VL %s", self.vlr_id) + self._log.debug("Terminated VL with id %s", self.vlr_id) + + yield from self.unpublish(xact) + self._state = VirtualLinkRecordState.TERMINATED + + @asyncio.coroutine + def unpublish(self, xact): + """ Unpublish this VLR """ + self._log.debug("UnPublishing VLR id %s", self.vlr_id) + yield from self._vnsm.unpublish_vlr(xact, self.xpath) + self._log.debug("UnPublished VLR id %s", self.vlr_id) + + +class VlrDtsHandler(object): + """ Handles DTS interactions for the VLR registration """ + XPATH = "D,/vlr:vlr-catalog/vlr:vlr" + + def __init__(self, dts, log, loop, vnsm): + self._dts = dts + self._log = log + self._loop = loop + self._vnsm = vnsm + + self._regh = None + + @property + def regh(self): + """ The registration handle assocaited with this Handler""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for the VLR path """ + def on_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vlr commit (xact_info: %s)", xact_info) + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_event(dts, g_reg, xact, xact_event, scratch_data): + @asyncio.coroutine + def instantiate_realloc_vlr(vlr): + """Re-populate the virtual link information after restart + + Arguments: + vlink + + """ + + with self._dts.transaction(flags=0) as xact: + yield from vlr.instantiate(xact, 1) + + if (xact_event == rwdts.MemberEvent.INSTALL): + curr_cfg = self.regh.elements + for cfg in curr_cfg: + vlr = self._vnsm.create_vlr(cfg) + self._loop.create_task(instantiate_realloc_vlr(vlr)) + + self._log.debug("Got on_event") + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare for VLR registration""" + self._log.debug( + "Got vlr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + + if action == rwdts.QueryAction.CREATE: + vlr = self._vnsm.create_vlr(msg) + with self._dts.transaction(flags=0) as xact: + yield from vlr.instantiate(xact) + self._log.debug("Responding to VL create request path:%s, msg:%s", + vlr.xpath, vlr.msg) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath=vlr.xpath, msg=vlr.msg) + return + elif action == rwdts.QueryAction.DELETE: + # Delete an VLR record + schema = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + self._log.debug("Terminating VLR id %s", path_entry.key00.id) + yield from self._vnsm.delete_vlr(path_entry.key00.id, xact_info.xact) + else: + err = "%s action on VirtualLinkRecord not supported" % action + raise NotImplementedError(err) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + return + + self._log.debug("Registering for VLR using xpath: %s", + VlrDtsHandler.XPATH) + + reg_handle = rift.tasklets.DTS.RegistrationHandler( + on_commit=on_commit, + on_prepare=on_prepare, + ) + handlers = rift.tasklets.Group.Handler(on_event=on_event,) + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register( + xpath=VlrDtsHandler.XPATH, + handler=reg_handle, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.FILE_DATASTORE, + ) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VLR record in DTS with path and message + """ + self._log.debug("Creating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VLR record in DTS with path and message + """ + self._log.debug("Updating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VLR record in DTS with path and message + """ + self._log.debug("Deleting VLR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VLR xact = %s, %s", xact, path) + + +class VldDtsHandler(object): + """ DTS handler for the VLD registration """ + XPATH = "C,/vld:vld-catalog/vld:vld" + + def __init__(self, dts, log, loop, vnsm): + self._dts = dts + self._log = log + self._loop = loop + self._vnsm = vnsm + + self._regh = None + + @property + def regh(self): + """ The registration handle assocaited with this Handler""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register the VLD path """ + @asyncio.coroutine + def on_prepare(xact_info, query_action, ks_path, msg): + """ prepare callback on vld path """ + self._log.debug( + "Got on prepare for VLD update (ks_path: %s) (action: %s)", + ks_path.to_xpath(VldYang.get_schema()), msg) + + schema = VldYang.YangData_Vld_VldCatalog_Vld.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vld_id = path_entry.key00.id + + disabled_actions = [rwdts.QueryAction.DELETE, rwdts.QueryAction.UPDATE] + if query_action not in disabled_actions: + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + return + + vlr = self._vnsm.find_vlr_by_vld_id(vld_id) + if vlr is None: + self._log.debug( + "Did not find an existing VLR record for vld %s. " + "Permitting %s vld action", vld_id, query_action) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + return + + raise VlrRecordExistsError( + "Vlr record(s) exists." + "Cannot perform %s action on VLD." % query_action) + + handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + + yield from self._dts.register( + VldDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + handler=handler + ) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rwvnstasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rwvnstasklet.py new file mode 100755 index 0000000..be6058b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rwvnstasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwvnstasklet + +class Tasklet(rift.tasklets.rwvnstasklet.VnsTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py new file mode 100644 index 0000000..b79d310 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py @@ -0,0 +1,333 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + +from create_stackedl2topology import MyL2Network +from create_stackedl2topology import MyL2Topology + +class MyNwNotFound(Exception): + pass + +class MyNodeNotFound(Exception): + pass + +class MyTpNotFound(Exception): + pass + +class MyProvNetwork(object): + def __init__(self, nwtop, l2top, log): + self.next_mac = 11 + self.log = log + self.provnet1 = nwtop.network.add() + self.provnet1.network_id = "ProviderNetwork-1" + + self.nwtop = nwtop + self.l2top = l2top + + # L2 Network type augmentation + self.provnet1.network_types.l2_network = self.provnet1.network_types.l2_network.new() + # L2 Network augmentation + self.provnet1.l2_network_attributes.name = "Rift LAB SFC-Demo Provider Network" + ul_net = self.provnet1.supporting_network.add() + try: + ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1") + self.l2netid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.provnet1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = node.node_id + ":" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None): + logging.debug("Creating node %s", node_name) + node = self.provnet1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + if (mgmt_ip_addr is not None): + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + if (sup_node is not None): + logging.debug(" Adding support node %s", sup_node.node_id) + ul_node = node.supporting_node.add() + ul_node.network_ref = self.l2netid + ul_node.node_ref = sup_node.node_id + return node + + def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, vlan = False): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:4f:9c:ab:dd:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + if (vlan == True): + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vlan" + else: + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + if ((sup_tp is not None) and (sup_node is not None)): + logging.debug(" Adding support terminaton point %s", sup_tp.tp_id) + ul_tp = tp.supporting_termination_point.add() + ul_tp.network_ref = self.l2netid + ul_tp.node_ref = sup_node.node_id + ul_tp.tp_ref = sup_tp.tp_id + return tp + + def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.provnet1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + #lnk1.l2_link_attributes.rate = 1000000000.00 + + lnk2= self.provnet1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + #lnk2.l2_link_attributes.rate = 1000000000.00 + return lnk1, lnk2 + +class MyProvTopology(MyProvNetwork): + def __init__(self, nwtop, l2top, log): + super(MyProvTopology, self).__init__(nwtop, l2top, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + logging.debug("Setting up nodes") + self.pseudo_mgmt_node = self.create_node("Pseudo_mgmt_node", "Pseudo node for VM mgmt network LAN") + self.pseudo_dp_node = self.create_node("Pseudo_DP_node", "Pseudo node for DP network LAN") + + self.g118_node = self.l2top.find_node("Grunt118") + if (self.g118_node is None): + raise MyNodeNotFound() + self.g44_node = self.l2top.find_node("Grunt44") + if (self.g44_node is None): + raise MyNodeNotFound() + self.g120_node = self.l2top.find_node("Grunt120") + if (self.g120_node is None): + raise MyNodeNotFound() + + self.g118_br_int = self.create_node("G118_Br_Int","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node) + self.g118_br_eth1 = self.create_node("G118_Br_Eth1","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node) + # eth2 on g118 is being used in PCI passthrough mode + + self.g44_br_int = self.create_node("G44_Br_Int","OVS Integration bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + self.g44_br_eth1 = self.create_node("G44_Br_Eth1","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + self.g44_br_eth2 = self.create_node("G44_Br_Eth2","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + self.g44_br_eth3 = self.create_node("G44_Br_Eth3","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + + self.g120_br_int = self.create_node("G120_Br_Int","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node) + self.g120_br_eth1 = self.create_node("G120_Br_Eth1","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node) + # eth2 on g120 is being used in PCI passthrough mode + + def setup_tps(self): + logging.debug("Setting up termination points") + self.g118_e1 = self.l2top.find_tp(self.g118_node, "eth1") + if (self.g118_e1 is None): + raise MyTpNotFound() + self.g44_e1 = self.l2top.find_tp(self.g44_node, "eth1") + if (self.g44_e1 is None): + raise MyTpNotFound() + self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2") + if (self.g44_e2 is None): + raise MyTpNotFound() + self.g44_e3 = self.l2top.find_tp(self.g44_node, "eth3") + if (self.g44_e3 is None): + raise MyTpNotFound() + self.g120_e1 = self.l2top.find_tp(self.g120_node, "eth1") + if (self.g44_e3 is None): + raise MyTpNotFound() + + self.g118_br_int_eth1 = self.create_tp(self.g118_br_int, "int-br-eth1") + self.g118_br_int_tap1 = self.create_tp(self.g118_br_int, "tap1") + + self.g118_br_eth1_phyeth1 = self.create_tp(self.g118_br_eth1, "phyeth1") + self.g118_br_eth1_eth1 = self.create_tp(self.g118_br_eth1, "eth1", sup_node=self.g118_node, sup_tp=self.g118_e1, vlan=True) + + self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth1") + self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu1") + self.g44_br_int_eth2 = self.create_tp(self.g44_br_int, "int-br-eth2") + self.g44_br_int_vhu2 = self.create_tp(self.g44_br_int, "vhu2") + self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth3") + self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu3") + + self.g44_br_eth1_phyeth1 = self.create_tp(self.g44_br_eth1, "phyeth1") + self.g44_br_eth1_dpdk0 = self.create_tp(self.g44_br_eth1, "dpdk0", sup_node=self.g44_node, sup_tp=self.g44_e1, vlan=True) + + self.g44_br_eth2_phyeth1 = self.create_tp(self.g44_br_eth2, "phyeth2") + self.g44_br_eth2_dpdk1 = self.create_tp(self.g44_br_eth2, "dpdk1", sup_node=self.g44_node, sup_tp=self.g44_e2) + + self.g44_br_eth3_phyeth1 = self.create_tp(self.g44_br_eth3, "phyeth3") + self.g44_br_eth3_dpdk2 = self.create_tp(self.g44_br_eth3, "dpdk2", sup_node=self.g44_node, sup_tp=self.g44_e3) + + self.g120_br_int_eth1 = self.create_tp(self.g120_br_int, "int-br-eth1") + self.g120_br_int_tap1 = self.create_tp(self.g120_br_int, "tap1") + + self.g120_br_eth1_phyeth1 = self.create_tp(self.g120_br_eth1, "phyeth1") + self.g120_br_eth1_eth1 = self.create_tp(self.g120_br_eth1, "eth1", sup_node=self.g120_node, sup_tp=self.g120_e1, vlan=True) + + self.pmn_eth1 = self.create_tp(self.pseudo_mgmt_node, "eth1") + self.pmn_eth2 = self.create_tp(self.pseudo_mgmt_node, "eth2") + self.pmn_eth3 = self.create_tp(self.pseudo_mgmt_node, "eth3") + + def setup_links(self): + # Add links to provnet1 network + # These links are unidirectional and point-to-point + logging.debug("Setting up links") + # Bidir Links for OVS bridges + self.create_bidir_link(self.g118_br_eth1, self.g118_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth1, "Link_g118_be1_pmn_e1", "Link_pmn_e1_g118_be1") + self.create_bidir_link(self.g44_br_eth1, self.g44_br_eth1_dpdk0, self.pseudo_mgmt_node, self.pmn_eth2, "Link_g44_be1_pmn_d0", "Link_pmn_e2_g44_d0") + self.create_bidir_link(self.g120_br_eth1, self.g120_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth3, "Link_g120_be1_pmn_e3", "Link_pmn_e3_g120_be1") + # Data path links cannot be represented here since PCI pass through is beingused on G118 and G44 + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + self.setup_links() + +def adjust_xml_file(infile, outfile, begin_marker, end_marker): + buffer = [] + in_block = False + max_interesting_line_toread = 1 + interesting_line = 0 + with open(infile) as inf: + with open(outfile, 'w') as outf: + for line in inf: + if begin_marker in line: + in_block = True + # Go down + if end_marker in line: + assert in_block is True + print("End of gathering line...", line) + buffer.append(line) # gather lines + interesting_line = max_interesting_line_toread + in_block = False + continue + if interesting_line: + print("Interesting line printing ...", line) + outf.write(line) + interesting_line -= 1 + if interesting_line == 0: # output gathered lines + for lbuf in buffer: + outf.write(lbuf) + buffer = [] # empty buffer + print("\n\n") + continue + + if in_block: + print("Gathering line...", line) + buffer.append(line) # gather lines + else: + outf.write(line) + + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger('Provider Network Topology') + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logger.info('Creating an instance of Provider Network Topology') + + nwtop = RwTl.YangData_IetfNetwork() + + # Setup L2 topology + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + # Setup Provider network topology + provtop = MyProvTopology(nwtop, l2top, logger) + provtop.setup_all() + + print ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_provtop.xml" + xml_formatted_file = "/tmp/stacked_provtop2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + print ("Converting to JSON ") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_provtop.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_provtop.json > /tmp/stacked_provtop2.json", shell=True) + json_formatted_file = "/tmp/stacked_provtop2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py new file mode 100644 index 0000000..1a86847 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py @@ -0,0 +1,278 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + +from create_stackedl2topology import MyL2Network +from create_stackedl2topology import MyL2Topology +from create_stackedProvNettopology import MyProvNetwork +from create_stackedProvNettopology import MyProvTopology +from create_stackedVMNettopology import MyVMNetwork +from create_stackedVMNettopology import MyVMTopology + + +class MyNwNotFound(Exception): + pass + +class MyNodeNotFound(Exception): + pass + +class MyTpNotFound(Exception): + pass + +class MySfcNetwork(object): + def __init__(self, nwtop, l2top, provtop, vmtop, log): + self.next_mac = 81 + self.log = log + self.sfcnet1 = nwtop.network.add() + self.sfcnet1.network_id = "SfcNetwork-1" + + self.l2top = l2top + self.provtop = provtop + self.vmtop = vmtop + + # L2 Network type augmentation + self.sfcnet1.network_types.l2_network = self.sfcnet1.network_types.l2_network.new() + # L2 Network augmentation + self.sfcnet1.l2_network_attributes.name = "Rift LAB SFC-Demo SFC Network" + try: + self.l2netid = l2top.find_nw_id("L2HostNetwork-1") + except TypeError: + raise MyNwNotFound() + ul_net = self.sfcnet1.supporting_network.add() + try: + ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1") + self.provnetid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + ul_net = self.sfcnet1.supporting_network.add() + try: + ul_net.network_ref = vmtop.find_nw_id("VmNetwork-1") + self.vmnetid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.sfcnet1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = "urn:Rift:Lab:" + node.node_id + "_" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None, nw_ref = None): + logging.debug("Creating node %s", node_name) + node = self.sfcnet1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + if (mgmt_ip_addr is not None): + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + if (sup_node is not None): + logging.debug(" Adding support node %s", sup_node.node_id) + ul_node = node.supporting_node.add() + if (nw_ref is not None): + ul_node.network_ref = nw_ref + else: + ul_node.network_ref = self.l2netid + ul_node.node_ref = sup_node.node_id + return node + + def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + #tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:dd:" + str(self.next_mac) + #self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vxlan" + if ((sup_tp is not None) and (sup_node is not None)): + logging.debug(" Adding support terminaton point %s", sup_tp.tp_id) + ul_tp = tp.supporting_termination_point.add() + if (nw_ref is not None): + ul_tp.network_ref = nw_ref + else: + ul_tp.network_ref = self.l2netid + ul_tp.node_ref = sup_node.node_id + ul_tp.tp_ref = sup_tp.tp_id + return tp + + def create_link(self, node1, tp1, node2, tp2, link_name1, link_name2 = None): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.sfcnet1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + lnk1.l2_link_attributes.rate = 1000000000.00 + + # Create bidir link if second link is provided + if (link_name2 is not None): + lnk2= self.sfcnet1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + lnk2.l2_link_attributes.rate = 1000000000.00 + + +class MySfcTopology(MySfcNetwork): + def __init__(self, nwtop, l2top, provtop, vmnet, log): + super(MySfcTopology, self).__init__(nwtop, l2top, provtop, vmnet, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + logging.debug("Setting up nodes") + + self.tg_node = self.vmtop.find_node("Trafgen_VM") + if (self.tg_node is None): + raise MyNodeNotFound() + self.lb_node = self.vmtop.find_node("LB_VM") + if (self.lb_node is None): + raise MyNodeNotFound() + + self.g44_br_int_node = self.provtop.find_node("G44_Br_Int") + if (self.g44_br_int_node is None): + raise MyNodeNotFound() + + self.sf1 = self.create_node("SF1","SF on LB VM", sup_node = self.lb_node, nw_ref = self.vmnetid) + self.sfc1 = self.create_node("SFC1","SF classifier on Trafgen VM", sup_node = self.tg_node, nw_ref = self.vmnetid) + self.sff1 = self.create_node("SFF1","SF forwarder on Grunt44 OVS integration bridge", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_br_int_node, nw_ref = self.provnetid) + + def setup_tps(self): + logging.debug("Setting up termination points") + # FInd L2 hosts + #self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2") + #if (self.g44_e2 is None): + # raise MyTpNotFound() + + self.sfc1_vxlannsh1 = self.create_tp(self.sfc1, "vxlannsh1") + self.sf1_vxlannsh1 = self.create_tp(self.sf1, "vxlannsh1") + self.sff1_vxlannsh1 = self.create_tp(self.sff1, "vxlannsh1") + + + def setup_links(self): + # Add links to sfcnet1 network + # These links are unidirectional and point-to-point + logging.debug("Setting up links") + # Bidir Links for OVS bridges + self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sff1, self.sff1_vxlannsh1, "Link_sfc1_sff1") + self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sf1, self.sf1_vxlannsh1, "Link_sff1_sf1", "Link_sf1_sff1") + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + #self.setup_links() + + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger('SFC Network Topology') + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logger.info('Creating an instance of SFC Network Topology') + + nwtop = RwTl.YangData_IetfNetwork() + + # Setup L2 topology + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + # Setup Provider network topology + provtop = MyProvTopology(nwtop, l2top, logger) + provtop.setup_all() + + # Setup VM network topology + vmtop = MyVMTopology(nwtop, l2top, provtop, logger) + vmtop.setup_all() + + # Setup SFC network topology + sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, logger) + sfctop.setup_all() + + print ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_sfctop.xml" + xml_formatted_file = "/tmp/stacked_sfctop2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + print ("Converting to JSON ") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_sfctop.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_sfctop.json > /tmp/stacked_sfctop2.json", shell=True) + json_formatted_file = "/tmp/stacked_sfctop2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vxlan\"/\"vxlan\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py new file mode 100644 index 0000000..719fcf8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py @@ -0,0 +1,333 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + +from create_stackedl2topology import MyL2Network +from create_stackedl2topology import MyL2Topology +from create_stackedProvNettopology import MyProvNetwork +from create_stackedProvNettopology import MyProvTopology + +class MyNwNotFound(Exception): + pass + +class MyNodeNotFound(Exception): + pass + +class MyTpNotFound(Exception): + pass + +class MyVMNetwork(object): + def __init__(self, nwtop, l2top, provtop, log): + self.next_mac = 41 + self.log = log + self.vmnet1 = nwtop.network.add() + self.vmnet1.network_id = "VmNetwork-1" + + self.nwtop = nwtop + self.l2top = l2top + self.provtop = provtop + + # L2 Network type augmentation + self.vmnet1.network_types.l2_network = self.vmnet1.network_types.l2_network.new() + # L2 Network augmentation + self.vmnet1.l2_network_attributes.name = "Rift LAB SFC-Demo VM Network" + ul_net = self.vmnet1.supporting_network.add() + try: + ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1") + self.l2netid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + ul_net = self.vmnet1.supporting_network.add() + try: + ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1") + self.provnetid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.vmnet1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = node.node_id + "_" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, description, mgmt_ip_addr=None, sup_node_list=None): + logging.debug("Creating node %s", node_name) + node = self.vmnet1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + if (mgmt_ip_addr is not None): + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + if (sup_node_list is not None): + for sup_node in sup_node_list: + logging.debug(" Adding support node %s", sup_node[0].node_id) + ul_node = node.supporting_node.add() + # Second element is hardcoded as nw ref + if (sup_node[1] is not None): + ul_node.network_ref = sup_node[1] + else: + ul_node.network_ref = self.l2netid + ul_node.node_ref = sup_node[0].node_id + return node + + def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:cc:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + if ((sup_tp is not None) and (sup_node is not None)): + logging.debug(" Adding support terminaton point %s", sup_tp.tp_id) + ul_tp = tp.supporting_termination_point.add() + if (nw_ref is not None): + ul_tp.network_ref = nw_ref + else: + ul_tp.network_ref = self.l2netid + ul_tp.node_ref = sup_node.node_id + ul_tp.tp_ref = sup_tp.tp_id + return tp + + def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.vmnet1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + #lnk1.l2_link_attributes.rate = 1000000000.00 + + lnk2= self.vmnet1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + #lnk2.l2_link_attributes.rate = 1000000000.00 + return lnk1, lnk2 + +class MyVMTopology(MyVMNetwork): + def __init__(self, nwtop, l2top, provtop, log): + super(MyVMTopology, self).__init__(nwtop, l2top, provtop, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + logging.debug("Setting up nodes") + + self.g118_node = self.l2top.find_node("Grunt118") + if (self.g118_node is None): + raise MyNodeNotFound() + self.g44_node = self.l2top.find_node("Grunt44") + if (self.g44_node is None): + raise MyNodeNotFound() + self.g120_node = self.l2top.find_node("Grunt120") + if (self.g120_node is None): + raise MyNodeNotFound() + + self.g44_br_int_node = self.provtop.find_node("G44_Br_Int") + if (self.g44_br_int_node is None): + raise MyNodeNotFound() + + self.pseudo_vm = self.create_node("Pseudo_VM","Pseudo VM to manage eth0 LAN") + sup_node_list = [[self.g118_node, self.l2netid], [self.g44_br_int_node, self.provnetid]] + self.tg_vm = self.create_node("Trafgen_VM","Trafgen VM on Grunt118", mgmt_ip_addr="10.0.118.3", sup_node_list = sup_node_list) + sup_node_list = [[self.g44_node, self.l2netid], [self.g44_br_int_node, self.provnetid]] + self.lb_vm = self.create_node("LB_VM","LB VM on Grunt44", mgmt_ip_addr="10.0.118.35", sup_node_list = sup_node_list) + sup_node_list = [[self.g120_node, self.l2netid], [self.g44_br_int_node, self.provnetid]] + self.ts_vm = self.create_node("Trafsink_VM","Trafsink VM on Grunt120", mgmt_ip_addr="10.0.118.4", sup_node_list = sup_node_list) + + def setup_tps(self): + logging.debug("Setting up termination points") + # FInd L2 hosts + self.g118_e2 = self.l2top.find_tp(self.g118_node, "eth2") + if (self.g118_e2 is None): + raise MyTpNotFound() + self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2") + if (self.g44_e2 is None): + raise MyTpNotFound() + # Find OVS tps + self.g44_br_int_vhu2 = self.provtop.find_tp(self.g44_br_int_node, "vhu2") + if (self.g44_br_int_vhu2 is None): + raise MyTpNotFound() + self.g44_br_int_vhu3 = self.provtop.find_tp(self.g44_br_int_node, "vhu3") + if (self.g44_br_int_vhu3 is None): + raise MyTpNotFound() + + self.pvm_eth1 = self.create_tp(self.pseudo_vm, "eth1") + self.pvm_eth2 = self.create_tp(self.pseudo_vm, "eth2") + self.pvm_eth3 = self.create_tp(self.pseudo_vm, "eth3") + + self.tg_vm_eth0 = self.create_tp(self.tg_vm, "eth0") + self.tg_vm_trafgen11 = self.create_tp(self.tg_vm, "trafgen11", sup_node=self.g118_node, sup_tp=self.g118_e2) + + self.lb_vm_eth0 = self.create_tp(self.lb_vm, "eth0") + self.lb_vm_lb21 = self.create_tp(self.lb_vm, "load_balancer21", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu2, nw_ref=self.provnetid) + self.lb_vm_lb22 = self.create_tp(self.lb_vm, "load_balancer22", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu3, nw_ref=self.provnetid) + + self.ts_vm_eth0 = self.create_tp(self.ts_vm, "eth0") + self.ts_vm_trafsink31 = self.create_tp(self.ts_vm, "trafsink31", sup_node=self.g44_node, sup_tp=self.g44_e2) + + + def setup_links(self): + # Add links to vmnet1 network + # These links are unidirectional and point-to-point + logging.debug("Setting up links") + # Bidir Links for OVS bridges + self.create_bidir_link(self.tg_vm, self.tg_vm_trafgen11, self.lb_vm, self.lb_vm_lb21, "Link_tg_t11_lb_lb21", "Link_lb_lb21_tg_t11") + self.create_bidir_link(self.ts_vm, self.ts_vm_trafsink31, self.lb_vm, self.lb_vm_lb22, "Link_ts_t31_lb_lb22", "Link_lb_lb22_tg_t31") + + self.create_bidir_link(self.pseudo_vm, self.pvm_eth1, self.tg_vm, self.tg_vm_eth0, "Link_pvm_e1_tgv_e0", "Link_tgv_e0_pvm_e1") + self.create_bidir_link(self.pseudo_vm, self.pvm_eth2, self.lb_vm, self.lb_vm_eth0, "Link_pvm_e2_lbv_e0", "Link_lbv_e0_pvm_e2") + self.create_bidir_link(self.pseudo_vm, self.pvm_eth3, self.ts_vm, self.ts_vm_eth0, "Link_pvm_e3_tsv_e0", "Link_tsv_e0_pvm_e3") + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + self.setup_links() + +def adjust_xml_file(infile, outfile, begin_marker, end_marker): + buffer = [] + in_block = False + max_interesting_line_toread = 1 + interesting_line = 0 + with open(infile) as inf: + with open(outfile, 'w') as outf: + for line in inf: + if begin_marker in line: + in_block = True + # Go down + if end_marker in line: + assert in_block is True + print("End of gathering line...", line) + buffer.append(line) # gather lines + interesting_line = max_interesting_line_toread + in_block = False + continue + if interesting_line: + print("Interesting line printing ...", line) + outf.write(line) + interesting_line -= 1 + if interesting_line == 0: # output gathered lines + for lbuf in buffer: + outf.write(lbuf) + buffer = [] # empty buffer + print("\n\n") + continue + + if in_block: + print("Gathering line...", line) + buffer.append(line) # gather lines + else: + outf.write(line) + + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger('VM Network Topology') + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logger.info('Creating an instance of VM Network Topology') + + nwtop = RwTl.YangData_IetfNetwork() + + # Setup L2 topology + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + # Setup Provider network topology + provtop = MyProvTopology(nwtop, l2top, logger) + provtop.setup_all() + + # Setup VM network topology + vmtop = MyVMTopology(nwtop, l2top, provtop, logger) + vmtop.setup_all() + + print ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_vmtop.xml" + xml_formatted_file = "/tmp/stacked_vmtop2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + print ("Converting to JSON ") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_vmtop.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_vmtop.json > /tmp/stacked_vmtop2.json", shell=True) + json_formatted_file = "/tmp/stacked_vmtop2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py new file mode 100644 index 0000000..433bb9a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py @@ -0,0 +1,262 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + + +class MyL2Network(object): + def __init__(self, nwtop, log): + self.next_mac = 11 + self.log = log + self.nwtop = nwtop + self.l2net1 = nwtop.network.add() + self.l2net1.network_id = "L2HostNetwork-1" + + # L2 Network type augmentation + self.l2net1.network_types.l2_network = self.l2net1.network_types.l2_network.new() + # L2 Network augmentation + self.l2net1.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network" + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_nw(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.l2net1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = node.node_id + "_" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, mgmt_ip_addr, description): + logging.debug("Creating node %s", node_name) + node = self.l2net1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + return node + + def create_tp(self, node, cfg_tp): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}_{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + return tp + + def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.l2net1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + #lnk1.l2_link_attributes.rate = 1000000000.00 + + lnk2= self.l2net1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + #lnk2.l2_link_attributes.rate = 1000000000.00 + return lnk1, lnk2 + +class MyL2Topology(MyL2Network): + def __init__(self, nwtop, log): + super(MyL2Topology, self).__init__(nwtop, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_nw(self, nw_name): + return self.get_nw(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + self.g118 = self.create_node("Grunt118","10.66.4.118", "Host with OVS and PCI") + self.g44 = self.create_node("Grunt44","10.66.4.44", "Host with OVS-DPDK") + self.g120 = self.create_node("Grunt120","10.66.4.120", "Host with OVS and PCI") + self.hms = self.create_node("HostMgmtSwitch","10.66.4.98", "Switch for host eth0") + self.vms = self.create_node("VMMgmtSwitch","10.66.4.55", "Switch for VMs eth0") + self.ads = self.create_node("AristaDPSwitch","10.66.4.90", "10 Gbps Switch") + + def setup_tps(self): + self.g118_e0 = self.create_tp(self.g118, "eth0") + self.g118_e1 = self.create_tp(self.g118, "eth1") + self.g118_e2 = self.create_tp(self.g118, "eth2") + + self.g44_e0 = self.create_tp(self.g44, "eth0") + self.g44_e1 = self.create_tp(self.g44, "eth1") + self.g44_e2 = self.create_tp(self.g44, "eth2") + self.g44_e3 = self.create_tp(self.g44, "eth3") + + self.g120_e0 = self.create_tp(self.g120, "eth0") + self.g120_e1 = self.create_tp(self.g120, "eth1") + self.g120_e2 = self.create_tp(self.g120, "eth2") + + self.hms_e1 = self.create_tp(self.hms, "eth1") + self.hms_e2 = self.create_tp(self.hms, "eth2") + self.hms_e3 = self.create_tp(self.hms, "eth3") + + self.vms_e1 = self.create_tp(self.vms, "eth1") + self.vms_e2 = self.create_tp(self.vms, "eth2") + self.vms_e3 = self.create_tp(self.vms, "eth3") + + self.ads_57 = self.create_tp(self.ads, "Card_5:Port_7") + self.ads_58 = self.create_tp(self.ads, "Card_8:Port_8") + self.ads_47 = self.create_tp(self.ads, "Card_4:Port_7") + self.ads_48 = self.create_tp(self.ads, "Card_4:Port_8") + + def setup_links(self): + # Add links to l2net1 network + # These links are unidirectional and point-to-point + # Bidir Links for Grunt118 + self.create_bidir_link(self.g118, self.g118_e0, self.hms, self.hms_e1, "Link_g118_e0_hms_e1", "Link_hms_e1_g118_e0") + self.create_bidir_link(self.g118, self.g118_e1, self.vms, self.vms_e1, "Link_g118_e1_vms_e1", "Link_vms_e1_g118_e1") + self.create_bidir_link(self.g118, self.g118_e2, self.ads, self.ads_57, "Link_g118_e2_ads_47", "Link_ads_47_g118_e2") + # Bidir Links for Grunt44 + self.create_bidir_link(self.g44, self.g44_e0, self.hms, self.hms_e2, "Link_g44_e0_hms_e1", "Link_hms_e1_g44_e0") + self.create_bidir_link(self.g44, self.g44_e1, self.vms, self.vms_e2, "Link_g44_e1_vms_e1", "Link_vms_e1_g44_e1") + self.create_bidir_link(self.g44, self.g44_e2, self.ads, self.ads_47, "Link_g44_e2_ads_47", "Link_ads_47_g44_e2") + self.create_bidir_link(self.g44, self.g44_e3, self.ads, self.ads_48, "Link_g44_e3_ads_48", "Link_ads_48_g44_e3") + # Bidir Links for Grunt120 + self.create_bidir_link(self.g120, self.g120_e0, self.hms, self.hms_e3, "Link_g120_e0_hms_e1", "Link_hms_e1_g120_e0") + self.create_bidir_link(self.g120, self.g120_e1, self.vms, self.vms_e3, "Link_g120_e1_vms_e1", "Link_vms_e1_g120_e1") + self.create_bidir_link(self.g120, self.g120_e2, self.ads, self.ads_58, "Link_g120_e2_ads_58", "Link_ads_58_g120_e2") + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + self.setup_links() + +def adjust_xml_file(infile, outfile, begin_marker, end_marker): + buffer = [] + in_block = False + max_interesting_line_toread = 1 + interesting_line = 0 + with open(infile) as inf: + with open(outfile, 'w') as outf: + for line in inf: + if begin_marker in line: + in_block = True + # Go down + if end_marker in line: + assert in_block is True + print("End of gathering line...", line) + buffer.append(line) # gather lines + interesting_line = max_interesting_line_toread + in_block = False + continue + if interesting_line: + print("Interesting line printing ...", line) + outf.write(line) + interesting_line -= 1 + if interesting_line == 0: # output gathered lines + for lbuf in buffer: + outf.write(lbuf) + buffer = [] # empty buffer + print("\n\n") + continue + + if in_block: + print("Gathering line...", line) + buffer.append(line) # gather lines + else: + outf.write(line) + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger(__file__) + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logging.info('Creating an instance of L2 Host Topology') + nwtop = RwTl.YangData_IetfNetwork() + + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + logging.info ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_top.xml" + xml_formatted_file = "/tmp/stacked_top2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + logging.info ("Converting to JSON") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_top.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_top.json > /tmp/stacked_top2.json", shell=True) + json_formatted_file = "/tmp/stacked_top2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py new file mode 100644 index 0000000..d7bf609 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py @@ -0,0 +1,99 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import datetime +import logging +import unittest + +import rw_peas +import rwlogger + +from gi.repository import RwsdnYang +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import RwcalYang +from gi.repository import IetfNetworkYang +from gi.repository.RwTypes import RwStatus + + +logger = logging.getLogger('mock') + +def get_sdn_account(): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + account = RwsdnYang.SDNAccount() + account.account_type = "mock" + account.mock.username = "rift" + account.mock.plugin_name = "rwsdn_mock" + return account + +def get_sdn_plugin(): + """ + Loads rw.sdn plugin via libpeas + """ + plugin = rw_peas.PeasPlugin('rwsdn_mock', 'RwSdn-1.0') + engine, info, extension = plugin() + + # Get the RwLogger context + rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log") + + sdn = plugin.get_interface("Topology") + try: + rc = sdn.init(rwloggerctx) + assert rc == RwStatus.SUCCESS + except: + logger.error("ERROR:SDN plugin instantiation failed. Aborting tests") + else: + logger.info("Mock SDN plugin successfully instantiated") + return sdn + + + +class SdnMockTest(unittest.TestCase): + def setUp(self): + """ + Initialize test plugins + """ + self._acct = get_sdn_account() + logger.info("Mock-SDN-Test: setUp") + self.sdn = get_sdn_plugin() + logger.info("Mock-SDN-Test: setUpEND") + + def tearDown(self): + logger.info("Mock-SDN-Test: Done with tests") + + def test_get_network_list(self): + """ + First test case + """ + rc, nwtop = self.sdn.get_network_list(self._acct) + self.assertEqual(rc, RwStatus.SUCCESS) + logger.debug("SDN-Mock-Test: Retrieved network attributes ") + for nw in nwtop.network: + logger.debug("...Network id %s", nw.network_id) + logger.debug("...Network name %s", nw.l2_network_attributes.name) + print(nw) + + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + unittest.main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py new file mode 100644 index 0000000..be58aae --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py @@ -0,0 +1,97 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import datetime +import logging +import unittest + +import rw_peas +import rwlogger + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import RwsdnYang +from gi.repository import IetfNetworkYang +from gi.repository.RwTypes import RwStatus +from gi.repository import RwSdn + + +logger = logging.getLogger('sdnsim') + +def get_sdn_account(): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + account = RwsdnYang.SDNAccount() + account.account_type = "sdnsim" + account.sdnsim.username = "rift" + account.sdnsim.plugin_name = "rwsdn_sim" + return account + +def get_sdn_plugin(): + """ + Loads rw.sdn plugin via libpeas + """ + plugin = rw_peas.PeasPlugin('rwsdn_sim', 'RwSdn-1.0') + engine, info, extension = plugin() + + # Get the RwLogger context + rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log") + + sdn = plugin.get_interface("Topology") + try: + rc = sdn.init(rwloggerctx) + assert rc == RwStatus.SUCCESS + except: + logger.error("ERROR:SDN sim plugin instantiation failed. Aborting tests") + else: + logger.info("SDN sim plugin successfully instantiated") + return sdn + + + +class SdnSimTest(unittest.TestCase): + def setUp(self): + """ + Initialize test plugins + """ + self._acct = get_sdn_account() + logger.info("SDN-Sim-Test: setUp") + self.sdn = get_sdn_plugin() + logger.info("SDN-Sim-Test: setUpEND") + + def tearDown(self): + logger.info("SDN-Sim-Test: Done with tests") + + def test_get_network_list(self): + """ + First test case + """ + rc, nwtop = self.sdn.get_network_list(self._acct) + self.assertEqual(rc, RwStatus.SUCCESS) + logger.debug("SDN-Sim-Test: Retrieved network attributes ") + for nw in nwtop.network: + logger.debug("...Network id %s", nw.network_id) + logger.debug("...Network name %s", nw.l2_network_attributes.name) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + unittest.main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py new file mode 100644 index 0000000..f9529c4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py @@ -0,0 +1,732 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import datetime +import logging +import unittest + +import rwlogger + +# from gi.repository import IetfNetworkYang +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +# from gi.repository.RwTypes import RwStatus + +from create_stackedl2topology import MyL2Topology + +from rift.topmgr import ( + NwtopDataStore, +) +logger = logging.getLogger('sdntop') + +NUM_NWS = 1 +NUM_NODES_L2_NW = 6 +NUM_TPS_L2_NW = 20 +NUM_LINKS = 20 + +class SdnTopStoreNetworkTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + + # Add test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-2" + logger.info("SdnTopStoreNetworkTest: setUp") + + def tearDown(self): + self.l2net1 = None + self.new_l2net = None + logger.info("SdnTopStoreNetworkTest: Done with tests") + + def test_create_network(self): + """ + Test: Create first l2 network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreNetworkTest: Create network ") + # Get test data + # Created durign setup phase + assert self.l2net1 is not None + # Use data store APIs + # Network already stored + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + logger.debug("...Network id %s", nw.network_id) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + logger.debug("...Network name %s", nw.l2_network_attributes.name) + for node in nw.node: + logger.debug("...Node id %s", node.node_id) + num_nodes += 1 + for tp in node.termination_point: + logger.debug("...Tp id %s", tp.tp_id) + num_tps += 1 + self.assertEqual(num_nodes, NUM_NODES_L2_NW) + self.assertEqual(num_tps, NUM_TPS_L2_NW) + + + def test_add_network(self): + """ + Test: Add another network, Check network id + """ + logger.debug("SdnTopStoreNetworkTest: Add network ") + # Use data store APIs + self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-2") + self.assertEqual(len(self._nwtopdata_store._networks), 2) + + def test_add_networktype(self): + """ + Test: Add another network, Check network type + """ + logger.debug("SdnTopStoreTest: Add network type ") + # Use data store APIs + self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-2") + self.assertEqual(len(self._nwtopdata_store._networks), 2) + # Add new test data + self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new() + logger.debug("Adding update l2net..%s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw.network_types.l2_network) + + def test_add_networkl2name(self): + """ + Test: Add another network, Check L2 network name + """ + logger.debug("SdnTopStoreTest: Add L2 network name ") + # Use data store APIs + self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new() + self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-2") + self.assertEqual(len(self._nwtopdata_store._networks), 2) + # Add new test data + self.new_l2net.l2_network_attributes.name = "L2networkName" + logger.debug("Adding update l2net..%s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertEqual(nw.l2_network_attributes.name, "L2networkName") + + +class SdnTopStoreNetworkNodeTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + # Get test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-1" + self.node2 = self.new_l2net.node.add() + self.node2.node_id = "TempNode2" + logger.info("SdnTopStoreTest: setUp NetworkNodetest") + + def tearDown(self): + logger.info("SdnTopStoreTest: Done with NetworkNodetest") + + + def test_add_network_node(self): + """ + Test: Add a node to existing network + Test all parameters + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Add network node") + # Add test data + self.node2.node_id = "TempNode2" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + + #@unittest.skip("Skipping") + def test_update_network_node(self): + """ + Test: Updat a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network node") + # Add test data + self.node2.node_id = "TempNode2" + self.node2.l2_node_attributes.description = "TempNode2 desc" + self.node2.l2_node_attributes.name = "Nice Name2" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name2") + + #@unittest.skip("Skipping") + def test_update_network_node_l2attr1(self): + """ + Test: Update a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network node") + # Add test data + self.node2.node_id = "TempNode2" + self.node2.l2_node_attributes.description = "TempNode2 desc" + self.node2.l2_node_attributes.name = "Nice Name3" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + + # Add test data + self.node2.l2_node_attributes.name = "Nice Name4" + logger.debug("Network %s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + logger.debug("Node %s", nw.node[NUM_NODES_L2_NW]) + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name4") + + def test_update_network_node_l2attr2(self): + """ + Test: Updat a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network node") + # Add test data + self.node2.node_id = "TempNode2" + self.node2.l2_node_attributes.description = "TempNode2 desc" + self.node2.l2_node_attributes.name = "Nice Name3" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + + # Add test data + self.node2.l2_node_attributes.management_address.append("10.0.0.1") + logger.debug("Network %s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 1) + + # Add test data + self.node2.l2_node_attributes.management_address.append("10.0.0.2") + logger.debug("Network %s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 2) + + +class SdnTopStoreNetworkNodeTpTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + # Get test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-1" + self.node2 = self.new_l2net.node.add() + self.node2.node_id = "TempNode2" + self.tp1 = self.node2.termination_point.add() + self.tp1.tp_id = "TempTp1" + logger.info("SdnTopStoreTest: setUp NetworkNodeTptest") + + def tearDown(self): + logger.info("SdnTopStoreTest: Done with NetworkNodeTptest") + + self.new_l2net = None + self.node2 = None + self.tp1 = None + + def test_add_network_node_tp(self): + """ + Test: Add a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + + def test_update_network_node_tp(self): + """ + Test: Update a tp to existing network, add all tp elements + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + self.tp1.tp_id = "TempTp1" + self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc" + self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296 + self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01" + self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01") + + def test_update_network_node_tp2(self): + """ + Test: Update a tp to existing network, change tp elements + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + self.tp1.tp_id = "TempTp1" + self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc" + self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296 + self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01") + + # Change frame size + self.tp1.l2_termination_point_attributes.maximum_frame_size = 1396 + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01") + + # Change MAC address + self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:02" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02") + + # Add encapsulation type + self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.eth_encapsulation, "l2t:ethernet") + + def test_update_extra_network_node_tp2(self): + """ + Test: Update a tp to existing network, change tp elements + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + self.tp2 = self.node2.termination_point.add() + self.tp2.tp_id = "TempTp2" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[1].tp_id, "TempTp2") + + + +class SdnTopStoreNetworkLinkTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + # Get test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-1" + + self.src_node = self.new_l2net.node.add() + self.src_node.node_id = "TempNode1" + self.tp1 = self.src_node.termination_point.add() + self.tp1.tp_id = "TempTp1" + + self.dest_node = self.new_l2net.node.add() + self.dest_node.node_id = "TempNode2" + self.tp2 = self.dest_node.termination_point.add() + self.tp2.tp_id = "TempTp2" + logger.info("SdnTopStoreTest: setUp NetworkLinkTest") + + def tearDown(self): + logger.info("SdnTopStoreTest: Done with NetworkLinkTest") + + self.new_l2net = None + self.src_node = None + self.tp1 = None + self.dest_node = None + self.tp2 = None + + def test_add_network_link(self): + """ + Test: Add a link to existing network + """ + logger.info("SdnTopStoreTest: Update network link") + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(len(nw.link), NUM_LINKS ) + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + # Use data store APIs + logger.info("SdnTopStoreTest: Update network link - Part 2") + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + # Verify data created + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(nw.link[NUM_LINKS].source.source_node, self.src_node.node_id) + self.assertEqual(nw.link[NUM_LINKS].source.source_tp, self.tp1.tp_id) + self.assertEqual(nw.link[NUM_LINKS].destination.dest_node, self.dest_node.node_id) + self.assertEqual(nw.link[NUM_LINKS].destination.dest_tp, self.tp2.tp_id) + self.assertEqual(len(nw.link), NUM_LINKS + 1) + + def test_add_extra_network_link(self): + """ + Test: Add a link to existing network + """ + logger.info("SdnTopStoreTest: Update extra network link") + # Create initial state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify initial state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + + # Add extra link (reverse) + self.link2 = self.new_l2net.link.add() + self.link2.link_id = "Link2" + self.link2.source.source_node = self.dest_node.node_id + self.link2.source.source_tp = self.tp2.tp_id + self.link2.destination.dest_node = self.src_node.node_id + self.link2.destination.dest_tp = self.tp1.tp_id + # Use data store APIs + logger.info("SdnTopStoreTest: Update extra network link - Part 2") + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + # Verify data created + self.assertEqual(nw.link[NUM_LINKS+1].link_id, "Link2") + self.assertEqual(len(nw.link), NUM_LINKS + 2) + self.assertEqual(nw.link[NUM_LINKS+1].source.source_node, self.dest_node.node_id) + self.assertEqual(nw.link[NUM_LINKS+1].source.source_tp, self.tp2.tp_id) + self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_node, self.src_node.node_id) + self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_tp, self.tp1.tp_id) + + def test_add_network_link_l2attr(self): + """ + Test: Check L2 link attributes + """ + logger.info("SdnTopStoreTest: Add network link L2 attributes") + # Create test state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self.link1.l2_link_attributes.name = "Link L2 name" + self.link1.l2_link_attributes.rate = 10000 + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000) + + def test_change_network_link_l2attr(self): + """ + Test: Change L2 link attributes + """ + logger.info("SdnTopStoreTest: Change network link L2 attributes") + # Create initial state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self.link1.l2_link_attributes.name = "Link L2 name" + self.link1.l2_link_attributes.rate = 10000 + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify initial state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000) + + # Create initial state + self.test_l2net = RwTl.YangData_IetfNetwork_Network() + self.test_l2net.network_id = "L2HostNetwork-1" + self.link1 = self.test_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.l2_link_attributes.name = "Link L2 updated name" + self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net) + # Verify test state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 updated name") + + def test_change_network_link_dest_tp(self): + """ + Test: Change L2 link attributes + """ + logger.info("SdnTopStoreTest: Change network link dest-tp") + # Create initial state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self.link1.l2_link_attributes.name = "Link L2 name" + self.link1.l2_link_attributes.rate = 10000 + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify initial state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000) + + # Create test state + self.test_l2net = RwTl.YangData_IetfNetwork_Network() + self.test_l2net.network_id = "L2HostNetwork-1" + self.link1 = self.test_l2net.link.add() + self.link1.link_id = "Link1" + # Changing dest node params + self.link1.destination.dest_node = self.src_node.node_id + self.link1.destination.dest_tp = self.tp1.tp_id + self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net) + # Verify test state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(nw.link[NUM_LINKS].destination.dest_node, self.src_node.node_id) + + + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + unittest.main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py new file mode 100755 index 0000000..8c19072 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import logging +import os +import sys +import types +import unittest +import uuid +import random + +import xmlrunner + +import gi +gi.require_version('CF', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwMain', '1.0') +gi.require_version('RwManifestYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') +import gi.repository.CF as cf +import gi.repository.RwDts as rwdts +import gi.repository.RwMain as rwmain +import gi.repository.RwManifestYang as rwmanifest +import gi.repository.IetfL2TopologyYang as l2Tl +import gi.repository.RwTopologyYang as RwTl +import gi.repository.RwLaunchpadYang as launchpadyang +from gi.repository import RwsdnYang +from gi.repository.RwTypes import RwStatus + +from create_stackedl2topology import MyL2Topology +from create_stackedProvNettopology import MyProvTopology +from create_stackedVMNettopology import MyVMTopology +from create_stackedSfctopology import MySfcTopology + +import rw_peas +import rift.tasklets +import rift.test.dts + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class TopMgrTestCase(rift.test.dts.AbstractDTSTest): + + @classmethod + def configure_suite(cls, rwmain): + vns_mgr_dir = os.environ.get('VNS_MGR_DIR') + + cls.rwmain.add_tasklet(vns_mgr_dir, 'rwvnstasklet') + + @classmethod + def configure_schema(cls): + return RwTl.get_schema() + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(1, loop=self.loop) + + @classmethod + def configure_timeout(cls): + return 360 + + + @asyncio.coroutine + def configure_l2_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + nw_xpath = "C,/nd:network" + self.log.info("Configuring l2 network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + @asyncio.coroutine + def configure_prov_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + + provtop = MyProvTopology(nwtop, l2top, self.log) + provtop.setup_all() + nw_xpath = "C,/nd:network" + self.log.info("Configuring provider network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + @asyncio.coroutine + def configure_vm_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + + provtop = MyProvTopology(nwtop, l2top, self.log) + provtop.setup_all() + + vmtop = MyVMTopology(nwtop, l2top, provtop, self.log) + vmtop.setup_all() + nw_xpath = "C,/nd:network" + self.log.info("Configuring VM network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + @asyncio.coroutine + def configure_sfc_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + + provtop = MyProvTopology(nwtop, l2top, self.log) + provtop.setup_all() + + vmtop = MyVMTopology(nwtop, l2top, provtop, self.log) + vmtop.setup_all() + + sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, self.log) + sfctop.setup_all() + + nw_xpath = "C,/nd:network" + self.log.info("Configuring SFC network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + + #@unittest.skip("Skipping test_network_config") + def test_network_config(self): + self.log.debug("STARTING - test_network_config") + tinfo = self.new_tinfo('static_network') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + + yield from asyncio.sleep(120, loop=self.loop) + yield from self.configure_l2_network(dts) + yield from self.configure_prov_network(dts) + yield from self.configure_vm_network(dts) + yield from self.configure_sfc_network(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_network_config") + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + mc_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build') + launchpad_build_dir = os.path.join(mc_build_dir, 'rwlaunchpad') + + if 'VNS_MGR_DIR' not in os.environ: + os.environ['VNS_MGR_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwvns') + + if 'MESSAGE_BROKER_DIR' not in os.environ: + os.environ['MESSAGE_BROKER_DIR'] = os.path.join(build_dir, 'rwmsg/plugins/rwmsgbroker-c') + + if 'ROUTER_DIR' not in os.environ: + os.environ['ROUTER_DIR'] = os.path.join(build_dir, 'rwdts/plugins/rwdtsrouter-c') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt new file mode 100644 index 0000000..3e17eb5 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt @@ -0,0 +1,59 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Creation Date: 10/28/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwsdn) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwSdn-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0 + rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdn_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0 + ietf_network_yang-1.0 ietf_network_topology_yang-1.0 + ietf_l2_topology_yang-1.0 rw_topology_yang-1.0 + rw_log-1.0 + VAPI_DIRS + ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang + ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/ + GIR_PATHS + ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang + ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/ + GENERATE_HEADER_FILE ${VALA_NAME}.h + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + DEPENDS rwcal_yang rwsdn_yang mano_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwsdn_mock + rwsdn_sim + rwsdn_odl + rwsdn-python + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt new file mode 100644 index 0000000..261e82f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn-plugin rwsdn-plugin.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py new file mode 100644 index 0000000..d984362 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py @@ -0,0 +1,96 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import logging + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes) + +import rw_status +import rwlogger + +import rift.cal +import rift.sdn + +logger = logging.getLogger('rwsdn') + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + + }) + + +class TopologyPlugin(GObject.Object, RwSdn.Topology): + def __init__(self): + GObject.Object.__init__(self) + self._impl = None + + @rwstatus + def do_init(self, rwlog_ctx): + providers = { + "sdnsim": rift.sdn.SdnSim, + "mock": rift.sdn.Mock, + } + + logger.addHandler( + rwlogger.RwLogger( + category="rwsdn", + log_hdl=rwlog_ctx, + ) + ) + + self._impl = {} + for name, impl in providers.items(): + try: + self._impl[name] = impl() + + except Exception: + msg = "unable to load SDN implementation for {}" + logger.exception(msg.format(name)) + + @rwstatus + def do_get_network_list(self, account, network_top): + obj = self._impl[account.account_type] + return obj.get_network_list(account, network_top) + +def main(): + @rwstatus + def blah(): + raise IndexError() + + a = blah() + assert(a == RwTypes.RwStatus.NOTFOUND) + + @rwstatus({IndexError: RwTypes.RwStatus.NOTCONNECTED}) + def blah2(): + """Some function""" + raise IndexError() + + a = blah2() + assert(a == RwTypes.RwStatus.NOTCONNECTED) + assert(blah2.__doc__ == "Some function") + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala new file mode 100644 index 0000000..a79f5a7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala @@ -0,0 +1,79 @@ +namespace RwSdn { + + public interface Topology: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Configuring related APIs + */ + /* TODO */ + + /* + * Network related APIs + */ + public abstract RwTypes.RwStatus get_network_list( + Rwsdn.SDNAccount account, + out RwTopology.YangData_IetfNetwork network_topology); + + /* + * VNFFG Chain related APIs + */ + public abstract RwTypes.RwStatus create_vnffg_chain( + Rwsdn.SDNAccount account, + Rwsdn.VNFFGChain vnffg_chain, + out string vnffg_id); + + /* + * VNFFG Chain Terminate related APIs + */ + public abstract RwTypes.RwStatus terminate_vnffg_chain( + Rwsdn.SDNAccount account, + string vnffg_id); + + + /* + * Network related APIs + */ + public abstract RwTypes.RwStatus get_vnffg_rendered_paths( + Rwsdn.SDNAccount account, + out Rwsdn.VNFFGRenderedPaths rendered_paths); + + /* + * Classifier related APIs + */ + public abstract RwTypes.RwStatus create_vnffg_classifier( + Rwsdn.SDNAccount account, + Rwsdn.VNFFGClassifier vnffg_classifier, + out string vnffg_classifier_id); + + /* + * Classifier related APIs + */ + public abstract RwTypes.RwStatus terminate_vnffg_classifier( + Rwsdn.SDNAccount account, + string vnffg_classifier_id); + + + + /* + * Node Related APIs + */ + /* TODO */ + + /* + * Termination-point Related APIs + */ + /* TODO */ + + /* + * Link Related APIs + */ + /* TODO */ + + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt new file mode 100644 index 0000000..1588ddf --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn_mock rwsdn_mock.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py new file mode 100644 index 0000000..833ccc4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py @@ -0,0 +1,174 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import collections +import logging + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes, + RwTopologyYang as RwTl, + RwsdnYang + ) + +import rw_status +import rwlogger + +logger = logging.getLogger('rwsdn.mock') + + +class UnknownAccountError(Exception): + pass + + +class MissingFileError(Exception): + pass + + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + UnknownAccountError: RwTypes.RwStatus.NOTFOUND, + MissingFileError: RwTypes.RwStatus.NOTFOUND, + }) + +GRUNT118 = {"name": "grunt118", "ip_addr": "10.66.4.118", "tps": ["eth0"]} +GRUNT44 = {"name": "grunt44", "ip_addr": "10.66.4.44", "tps": ["eth0"]} +AS1 = {"name":"AristaSw1", "ip_addr": "10.66.4.54", "tps": ["Ethernet8/7","Ethernet8/8"]} +NW_NODES = [GRUNT118, GRUNT44, AS1] +NW_BIDIR_LINKS = [{"src" : ("grunt118","eth0"), "dest" : ("AristaSw1","Ethernet8/7")}, + {"src" : ("grunt44","eth0"), "dest" : ("AristaSw1","Ethernet8/8")}] + + +class DataStore(object): + def __init__(self): + self.topology = None + self.nw = None + self.next_mac = 11 + + def create_link(self, cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp): + lnk= self.nw.link.add() + lnk.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp) + lnk.source.source_node = cfg_src_node + lnk.source.source_tp = cfg_src_tp + lnk.destination.dest_node = cfg_dest_node + lnk.destination.dest_tp = cfg_dest_tp + # L2 link augmentation + lnk.l2_link_attributes.name = cfg_src_tp + cfg_dest_tp + lnk.l2_link_attributes.rate = 1000000000.00 + + def create_tp(self, node, cfg_tp): + tp = node.termination_point.add() + tp.tp_id = ("urn:Rift:Lab:{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.tp_state = "in_use" + tp.l2_termination_point_attributes.eth_encapsulation = "ethernet" + + def create_node(self, cfg_node): + node = self.nw.node.add() + node.node_id = cfg_node['name'] + # L2 Node augmentation + node.l2_node_attributes.name = cfg_node['name'] + node.l2_node_attributes.description = "Host with OVS-DPDK" + node.l2_node_attributes.management_address.append(cfg_node['ip_addr']) + for cfg_tp in cfg_node['tps']: + self.create_tp(node, cfg_tp) + + def create_default_topology(self): + logger.debug('Creating default topology: ') + + self.topology = RwTl.YangData_IetfNetwork() + self.nw = self.topology.network.add() + self.nw.network_id = "L2HostTopology-Def1" + self.nw.server_provided = 'true' + + # L2 Network type augmentation + self.nw.network_types.l2_network = self.nw.network_types.l2_network.new() + # L2 Network augmentation + self.nw.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network" + + for cfg_node in NW_NODES: + self.create_node(cfg_node) + + for cfg_link in NW_BIDIR_LINKS: + self.create_link(cfg_link['src'][0], cfg_link['src'][1], cfg_link['dest'][0], cfg_link['dest'][1]) + self.create_link(cfg_link['src'][1], cfg_link['src'][0], cfg_link['dest'][1], cfg_link['dest'][0]) + + return self.topology + + +class Resources(object): + def __init__(self): + self.networks = dict() + + +class MockPlugin(GObject.Object, RwSdn.Topology): + """This class implements the abstract methods in the Topology class. + Mock is used for unit testing.""" + + def __init__(self): + GObject.Object.__init__(self) + self.resources = collections.defaultdict(Resources) + self.datastore = None + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler( + rwlogger.RwLogger( + category="rwsdn.mock", + log_hdl=rwlog_ctx, + ) + ) + + account = RwsdnYang.SDNAccount() + account.name = 'mock' + account.account_type = 'mock' + account.mock.username = 'rift' + + self.datastore = DataStore() + self.topology = self.datastore.create_default_topology() + + @rwstatus(ret_on_failure=[None]) + def do_get_network_list(self, account): + """ + Returns the list of discovered network + + @param account - a SDN account + + """ + logger.debug('Get network list: ') + + if (self.topology): + logger.debug('Returning network list: ') + return self.topology + + logger.debug('Returning empty network list: ') + return None + + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt new file mode 100644 index 0000000..ffa8dec --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn_odl rwsdn_odl.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py new file mode 100644 index 0000000..31e1402 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py @@ -0,0 +1,943 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import logging + +import requests + +import json +import re + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwsdnYang', '1.0') +gi.require_version('RwSdn', '1.0') +gi.require_version('RwTopologyYang','1.0') + +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes, + RwsdnYang, + RwTopologyYang as RwTl, + ) + +import rw_status +import rwlogger + + +logger = logging.getLogger('rwsdn.sdnodl') +logger.setLevel(logging.DEBUG) + + +sff_rest_based = True + +class UnknownAccountError(Exception): + pass + + +class MissingFileError(Exception): + pass + + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + UnknownAccountError: RwTypes.RwStatus.NOTFOUND, + MissingFileError: RwTypes.RwStatus.NOTFOUND, + }) + + +class SdnOdlPlugin(GObject.Object, RwSdn.Topology): + + def __init__(self): + GObject.Object.__init__(self) + self.sdnodl = SdnOdl() + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler( + rwlogger.RwLogger( + category="sdnodl", + log_hdl=rwlog_ctx, + ) + ) + + @rwstatus(ret_on_failure=[None]) + def do_get_network_list(self, account): + """ + Returns the list of discovered networks + + @param account - a SDN account + + """ + logger.debug('Received Get network list: ') + nwtop = self.sdnodl.get_network_list( account) + logger.debug('Done with get network list: %s', type(nwtop)) + return nwtop + + @rwstatus(ret_on_failure=[""]) + def do_create_vnffg_chain(self, account,vnffg_chain): + """ + Creates Service Function chain in ODL + + @param account - a SDN account + + """ + logger.debug('Received Create VNFFG chain ') + vnffg_id = self.sdnodl.create_sfc( account,vnffg_chain) + logger.debug('Done with create VNFFG chain with name : %s', vnffg_id) + return vnffg_id + + @rwstatus + def do_terminate_vnffg_chain(self, account,vnffg_id): + """ + Terminate Service Function chain in ODL + + @param account - a SDN account + + """ + logger.debug('Received terminate VNFFG chain for id %s ', vnffg_id) + # TODO: Currently all the RSP, SFPs , SFFs and SFs are deleted + # Need to handle deletion of specific RSP, SFFs, SFs etc + self.sdnodl.terminate_all_sfc(account) + logger.debug('Done with terminate VNFFG chain with name : %s', vnffg_id) + + @rwstatus(ret_on_failure=[None]) + def do_get_vnffg_rendered_paths(self, account): + """ + Get ODL Rendered Service Path List (SFC) + + @param account - a SDN account + """ + vnffg_list = self.sdnodl.get_rsp_list(account) + return vnffg_list + + @rwstatus(ret_on_failure=[None]) + def do_create_vnffg_classifier(self, account, vnffg_classifier): + """ + Add VNFFG Classifier + + @param account - a SDN account + """ + classifier_name = self.sdnodl.create_sfc_classifier(account,vnffg_classifier) + return classifier_name + + @rwstatus(ret_on_failure=[None]) + def do_terminate_vnffg_classifier(self, account, vnffg_classifier_name): + """ + Add VNFFG Classifier + + @param account - a SDN account + """ + self.sdnodl.terminate_sfc_classifier(account,vnffg_classifier_name) + + +class Sff(object): + """ + Create SFF object to hold SFF related details + """ + + def __init__(self,sff_br_uid, sff_br_name , sff_ip, sff_br_ip): + import socket + self.name = socket.getfqdn(sff_ip) + self.br_uid = sff_br_uid + self.ip = sff_ip + self.br_ip = sff_br_ip + self.br_name = sff_br_name + self.sff_port = 6633 + self.sff_rest_port = 6000 + self.sf_dp_list = list() + + def add_sf_dp_to_sff(self,sf_dp): + self.sf_dp_list.append(sf_dp) + + def __repr__(self): + return 'Name:{},Bridge Name:{}, IP: {}, SF List: {}'.format(self.br_uid,self.br_name, self.ip, self.sf_dp_list) + +class SfDpLocator(object): + """ + Create Service Function Data Plane Locator related Object to hold details related to each DP Locator endpoint + """ + def __init__(self,sfdp_id,vnfr_name,vm_id): + self.name = sfdp_id + self.port_id = sfdp_id + self.vnfr_name = vnfr_name + self.vm_id = vm_id + self.sff_name = None + + def _update_sff_name(self,sff_name): + self.sff_name = sff_name + + def _update_vnf_params(self,service_function_type,address, port,transport_type): + self.service_function_type = 'service-function-type:{}'.format(service_function_type) + self.address = address + self.port = port + self.transport_type = "service-locator:{}".format(transport_type) + + def __repr__(self): + return 'Name:{},Port id:{}, VNFR ID: {}, VM ID: {}, SFF Name: {}'.format(self.name,self.port_id, self.vnfr_name, self.vm_id,self.sff_name) + +class SdnOdl(object): + """ + SDN ODL Class to support REST based API calls + """ + + @property + def _network_topology_path(self): + return 'restconf/operational/network-topology:network-topology' + + @property + def _node_inventory_path(self): + return 'restconf/operational/opendaylight-inventory:nodes' + + def _network_topology_rest_url(self,account): + return '{}/{}'.format(account.odl.url,self._network_topology_path) + + def _node_inventory_rest_url(self,account): + return '{}/{}'.format(account.odl.url,self._node_inventory_path) + + def _get_rest_url(self,account, rest_path): + return '{}/{}'.format(account.odl.url,rest_path) + + + def _get_peer_termination_point(self,node_inv,tp_id): + for node in node_inv['nodes']['node']: + if "node-connector" in node and len(node['node-connector']) > 0: + for nodec in node['node-connector']: + if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id): + return(node['id'], nodec['id']) + return (None,None) + + def _get_termination_point_mac_address(self,node_inv,tp_id): + for node in node_inv['nodes']['node']: + if "node-connector" in node and len(node['node-connector']) > 0: + for nodec in node['node-connector']: + if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id): + return nodec.get("flow-node-inventory:hardware-address") + + def _add_host(self,ntwk,node,term_point,vmid,node_inv): + for ntwk_node in ntwk.node: + if ntwk_node.node_id == vmid: + break + else: + ntwk_node = ntwk.node.add() + if "ovsdb:bridge-name" in node: + ntwk_node.rw_node_attributes.ovs_bridge_name = node["ovsdb:bridge-name"] + ntwk_node.node_id = vmid + intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id'] + if intf_id: + ntwk_node_tp = ntwk_node.termination_point.add() + ntwk_node_tp.tp_id = intf_id[0]['external-id-value'] + att_mac = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'attached-mac'] + if att_mac: + ntwk_node_tp.l2_termination_point_attributes.mac_address = att_mac[0]['external-id-value'] + peer_node,peer_node_tp = self._get_peer_termination_point(node_inv,term_point['tp-id']) + if peer_node and peer_node_tp: + nw_lnk = ntwk.link.add() + nw_lnk.source.source_tp = ntwk_node_tp.tp_id + nw_lnk.source.source_node = ntwk_node.node_id + nw_lnk.destination.dest_tp = term_point['tp-id'] + nw_lnk.destination.dest_node = node['node-id'] + nw_lnk.link_id = peer_node_tp + '-' + 'source' + + nw_lnk = ntwk.link.add() + nw_lnk.source.source_tp = term_point['tp-id'] + nw_lnk.source.source_node = node['node-id'] + nw_lnk.destination.dest_tp = ntwk_node_tp.tp_id + nw_lnk.destination.dest_node = ntwk_node.node_id + nw_lnk.link_id = peer_node_tp + '-' + 'dest' + + def _get_address_from_node_inventory(self,node_inv,node_id): + for node in node_inv['nodes']['node']: + if node['id'] == node_id: + return node["flow-node-inventory:ip-address"] + return None + + def _fill_network_list(self,nw_topo,node_inventory): + """ + Fill Topology related information + """ + nwtop = RwTl.YangData_IetfNetwork() + + for topo in nw_topo['network-topology']['topology']: + if ('node' in topo and len(topo['node']) > 0): + ntwk = nwtop.network.add() + ntwk.network_id = topo['topology-id'] + ntwk.server_provided = True + for node in topo['node']: + if ('termination-point' in node and len(node['termination-point']) > 0): + ntwk_node = ntwk.node.add() + ntwk_node.node_id = node['node-id'] + addr = self._get_address_from_node_inventory(node_inventory,ntwk_node.node_id) + if addr: + ntwk_node.l2_node_attributes.management_address.append(addr) + for term_point in node['termination-point']: + ntwk_node_tp = ntwk_node.termination_point.add() + ntwk_node_tp.tp_id = term_point['tp-id'] + mac_address = self._get_termination_point_mac_address(node_inventory,term_point['tp-id']) + if mac_address: + ntwk_node_tp.l2_termination_point_attributes.mac_address = mac_address + if 'ovsdb:interface-external-ids' in term_point: + vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id'] + if vm_id: + vmid = vm_id[0]['external-id-value'] + self._add_host(ntwk,node,term_point,vmid,node_inventory) + if ('link' in topo and len(topo['link']) > 0): + for link in topo['link']: + nw_link = ntwk.link.add() + if 'destination' in link: + nw_link.destination.dest_tp = link['destination'].get('dest-tp') + nw_link.destination.dest_node = link['destination'].get('dest-node') + if 'source' in link: + nw_link.source.source_node = link['source'].get('source-node') + nw_link.source.source_tp = link['source'].get('source-tp') + nw_link.link_id = link.get('link-id') + return nwtop + + + def get_network_list(self, account): + """ + Get the networks details from ODL + """ + url = self._network_topology_rest_url(account) + r=requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + nw_topo = r.json() + + url = self._node_inventory_rest_url(account) + r = requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + node_inventory = r.json() + return self._fill_network_list(nw_topo,node_inventory) + + @property + def _service_functions_path(self): + return 'restconf/config/service-function:service-functions' + + @property + def _service_function_path(self): + return 'restconf/config/service-function:service-functions/service-function/{}' + + @property + def _service_function_forwarders_path(self): + return 'restconf/config/service-function-forwarder:service-function-forwarders' + + @property + def _service_function_forwarder_path(self): + return 'restconf/config/service-function-forwarder:service-function-forwarders/service-function-forwarder/{}' + + @property + def _service_function_chains_path(self): + return 'restconf/config/service-function-chain:service-function-chains' + + @property + def _service_function_chain_path(self): + return 'restconf/config/service-function-chain:service-function-chains/service-function-chain/{}' + + + @property + def _sfps_path(self): + return 'restconf/config/service-function-path:service-function-paths' + + @property + def _sfp_path(self): + return 'restconf/config/service-function-path:service-function-paths/service-function-path/{}' + + + @property + def _create_rsp_path(self): + return 'restconf/operations/rendered-service-path:create-rendered-path' + + @property + def _delete_rsp_path(self): + return 'restconf/operations/rendered-service-path:delete-rendered-path' + + + @property + def _get_rsp_paths(self): + return 'restconf/operational/rendered-service-path:rendered-service-paths' + + @property + def _get_rsp_path(self): + return 'restconf/operational/rendered-service-path:rendered-service-paths/rendered-service-path/{}' + + @property + def _access_list_path(self): + return 'restconf/config/ietf-access-control-list:access-lists/acl/{}' + + @property + def _service_function_classifier_path(self): + return 'restconf/config/service-function-classifier:service-function-classifiers/service-function-classifier/{}' + + @property + def _access_lists_path(self): + return 'restconf/config/ietf-access-control-list:access-lists' + + @property + def _service_function_classifiers_path(self): + return 'restconf/config/service-function-classifier:service-function-classifiers' + + + def _create_sf(self,account,vnffg_chain,sf_dp_list): + "Create SF" + sf_json = {} + + for vnf in vnffg_chain.vnf_chain_path: + for vnfr in vnf.vnfr_ids: + sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name)) + print(sf_url) + r=requests.get(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + # If the SF is not found; create new SF + if r.status_code == 200: + logger.info("SF with name %s is already present in ODL. Skipping update", vnfr.vnfr_name) + continue + elif r.status_code != 404: + r.raise_for_status() + + sf_dict = {} + sf_dict['name'] = vnfr.vnfr_name + sf_dict['nsh-aware'] = vnf.nsh_aware + sf_dict['type'] = 'service-function-type:{}'.format(vnf.service_function_type) + sf_dict['ip-mgmt-address'] = vnfr.mgmt_address + sf_dict['rest-uri'] = 'http://{}:{}'.format(vnfr.mgmt_address, vnfr.mgmt_port) + + sf_dict['sf-data-plane-locator'] = list() + for vdu in vnfr.vdu_list: + sf_dp = {} + if vdu.port_id in sf_dp_list.keys(): + sf_dp['name'] = vdu.name + sf_dp['ip'] = vdu.address + sf_dp['port'] = vdu.port + sf_dp['transport'] = "service-locator:{}".format(vnf.transport_type) + sff_name = sf_dp_list[vdu.port_id].sff_name + if sff_name is None: + logger.error("SFF not found for port %s in SF %s", vdu.port_id, vnfr.vnfr_name) + sf_dp['service-function-forwarder'] = sff_name + sf_dict['sf-data-plane-locator'].append(sf_dp) + else: + logger.error("Port %s not found in SF DP list",vdu.port_id) + + sf_json['service-function'] = sf_dict + sf_data = json.dumps(sf_json) + sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name)) + print(sf_url) + print(sf_data) + r=requests.put(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sf_data) + r.raise_for_status() + + + def _create_sff(self,account,vnffg_chain,sff): + "Create SFF" + sff_json = {} + sff_dict = {} + + sff_url = self._get_rest_url(account,self._service_function_forwarder_path.format(sff.name)) + print(sff_url) + r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + # If the SFF is not found; create new SF + if r.status_code == 200: + logger.info("SFF with name %s is already present in ODL. Skipping full update", sff.name) + sff_dict = r.json() + sff_updated = False + for sf_dp in sff.sf_dp_list: + for sff_sf in sff_dict['service-function-forwarder'][0]['service-function-dictionary']: + if sf_dp.vnfr_name == sff_sf['name']: + logger.info("SF with name %s is already found in SFF %s SF Dictionay. Skipping update",sf_dp.vnfr_name,sff.name) + break + else: + logger.info("SF with name %s is not found in SFF %s SF Dictionay",sf_dp.vnfr_name, sff.name) + sff_updated = True + sff_sf_dict = {} + sff_sf_dp_loc = {} + sff_sf_dict['name'] = sf_dp.vnfr_name + + # Below two lines are enabled only for ODL Beryillium + sff_sf_dp_loc['sff-dpl-name'] = sff.name + sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name + + sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc + sff_dict['service-function-forwarder'][0]['service-function-dictionary'].append(sff_sf_dict) + if sff_updated is True: + sff_data = json.dumps(sff_dict) + print(sff_data) + r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data) + r.raise_for_status() + return + elif r.status_code != 404: + r.raise_for_status() + + sff_name = sff.name + sff_ip = sff.ip + sff_br_ip = sff.br_ip + sff_port = sff.sff_port + sff_bridge_name = '' + sff_rest_port = sff.sff_rest_port + sff_ovs_op = {} + if sff_rest_based is False: + sff_bridge_name = sff.br_name + sff_ovs_op = {"key": "flow", + "nshc1": "flow", + "nsp": "flow", + "remote-ip": "flow", + "dst-port": sff_port, + "nshc3": "flow", + "nshc2": "flow", + "nshc4": "flow", + "nsi": "flow"} + + + sff_dict['name'] = sff_name + sff_dict['service-node'] = '' + sff_dict['ip-mgmt-address'] = sff_ip + if sff_rest_based: + sff_dict['rest-uri'] = 'http://{}:{}'.format(sff_ip, sff_rest_port) + else: + sff_dict['service-function-forwarder-ovs:ovs-bridge'] = {"bridge-name": sff_bridge_name} + sff_dict['service-function-dictionary'] = list() + for sf_dp in sff.sf_dp_list: + sff_sf_dict = {} + sff_sf_dp_loc = {} + sff_sf_dict['name'] = sf_dp.vnfr_name + + # Below set of lines are reqd for Lithium + #sff_sf_dict['type'] = sf_dp.service_function_type + #sff_sf_dp_loc['ip'] = sf_dp.address + #sff_sf_dp_loc['port'] = sf_dp.port + #sff_sf_dp_loc['transport'] = sf_dp.transport_type + #sff_sf_dp_loc['service-function-forwarder-ovs:ovs-bridge'] = {} + + # Below two lines are enabled only for ODL Beryillium + sff_sf_dp_loc['sff-dpl-name'] = sff_name + sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name + + sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc + sff_dict['service-function-dictionary'].append(sff_sf_dict) + + sff_dict['sff-data-plane-locator'] = list() + sff_dp = {} + dp_loc = {} + sff_dp['name'] = sff_name + dp_loc['ip'] = sff_br_ip + dp_loc['port'] = sff_port + dp_loc['transport'] = 'service-locator:vxlan-gpe' + sff_dp['data-plane-locator'] = dp_loc + if sff_rest_based is False: + sff_dp['service-function-forwarder-ovs:ovs-options'] = sff_ovs_op + sff_dp["service-function-forwarder-ovs:ovs-bridge"] = {'bridge-name':sff_bridge_name} + sff_dict['sff-data-plane-locator'].append(sff_dp) + + sff_json['service-function-forwarder'] = sff_dict + sff_data = json.dumps(sff_json) + print(sff_data) + r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data) + r.raise_for_status() + + def _create_sfc(self,account,vnffg_chain): + "Create SFC" + sfc_json = {} + sfc_dict = {} + sfc_dict['name'] = vnffg_chain.name + sfc_dict['sfc-service-function'] = list() + vnf_chain_list = sorted(vnffg_chain.vnf_chain_path, key = lambda x: x.order) + for vnf in vnf_chain_list: + sfc_sf_dict = {} + sfc_sf_dict['name'] = vnf.service_function_type + sfc_sf_dict['type'] = 'service-function-type:{}'.format(vnf.service_function_type) + sfc_sf_dict['order'] = vnf.order + sfc_dict['sfc-service-function'].append(sfc_sf_dict) + sfc_json['service-function-chain'] = sfc_dict + sfc_data = json.dumps(sfc_json) + sfc_url = self._get_rest_url(account,self._service_function_chain_path.format(vnffg_chain.name)) + print(sfc_url) + print(sfc_data) + r=requests.put(sfc_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfc_data) + r.raise_for_status() + + def _create_sfp(self,account,vnffg_chain, sym_chain=False): + "Create SFP" + sfp_json = {} + sfp_dict = {} + sfp_dict['name'] = vnffg_chain.name + sfp_dict['service-chain-name'] = vnffg_chain.name + sfp_dict['symmetric'] = sym_chain + + sfp_json['service-function-path'] = sfp_dict + sfp_data = json.dumps(sfp_json) + sfp_url = self._get_rest_url(account,self._sfp_path.format(vnffg_chain.name)) + print(sfp_url) + print(sfp_data) + r=requests.put(sfp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfp_data) + r.raise_for_status() + + def _create_rsp(self,account,vnffg_chain_name, sym_chain=True): + "Create RSP" + rsp_json = {} + rsp_input = {} + rsp_json['input'] = {} + rsp_input['name'] = vnffg_chain_name + rsp_input['parent-service-function-path'] = vnffg_chain_name + rsp_input['symmetric'] = sym_chain + + rsp_json['input'] = rsp_input + rsp_data = json.dumps(rsp_json) + self._rsp_data = rsp_json + rsp_url = self._get_rest_url(account,self._create_rsp_path) + print(rsp_url) + print(rsp_data) + r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data) + r.raise_for_status() + print(r.json()) + output_json = r.json() + return output_json['output']['name'] + + def _get_sff_list_for_chain(self, account,sf_dp_list): + """ + Get List of all SFF that needs to be created based on VNFs included in VNFFG chain. + """ + + sff_list = {} + if sf_dp_list is None: + logger.error("VM List for vnffg chain is empty while trying to get SFF list") + url = self._network_topology_rest_url(account) + r=requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + nw_topo = r.json() + + for topo in nw_topo['network-topology']['topology']: + if ('node' in topo and len(topo['node']) > 0): + for node in topo['node']: + if ('termination-point' in node and len(node['termination-point']) > 0): + for term_point in node['termination-point']: + if 'ovsdb:interface-external-ids' in term_point: + vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id'] + if len(vm_id) == 0: + continue + vmid = vm_id[0]['external-id-value'] + intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id'] + if len(intf_id) == 0: + continue + intfid = intf_id[0]['external-id-value'] + if intfid not in sf_dp_list.keys(): + continue + if sf_dp_list[intfid].vm_id != vmid: + logger.error("Intf ID %s is not present in VM %s", intfid, vmid) + continue + + if 'ovsdb:managed-by' in node: + rr=re.search('network-topology:node-id=\'([-\w\:\/]*)\'',node['ovsdb:managed-by']) + node_id = rr.group(1) + ovsdb_node = [node for node in topo['node'] if node['node-id'] == node_id] + if ovsdb_node: + if 'ovsdb:connection-info' in ovsdb_node[0]: + sff_ip = ovsdb_node[0]['ovsdb:connection-info']['local-ip'] + sff_br_name = node['ovsdb:bridge-name'] + sff_br_uuid = node['ovsdb:bridge-uuid'] + sff_br_ip = sff_ip + + if 'ovsdb:openvswitch-other-configs' in ovsdb_node[0]: + for other_key in ovsdb_node[0]['ovsdb:openvswitch-other-configs']: + if other_key['other-config-key'] == 'local_ip': + local_ip_str = other_key['other-config-value'] + sff_br_ip = local_ip_str.split(',')[0] + break + + if sff_br_uuid in sff_list: + sff_list[sff_br_uuid].add_sf_dp_to_sff(sf_dp_list[intfid]) + sf_dp_list[intfid]._update_sff_name(sff_list[sff_br_uuid].name) + else: + sff_list[sff_br_uuid] = Sff(sff_br_uuid,sff_br_name, sff_ip,sff_br_ip) + sff_list[sff_br_uuid].add_sf_dp_to_sff(sf_dp_list[intfid]) + sf_dp_list[intfid]._update_sff_name(sff_list[sff_br_uuid].name) + return sff_list + + + def _get_sf_dp_list_for_chain(self,account,vnffg_chain): + """ + Get list of all Service Function Data Plane Locators present in VNFFG + useful for easy reference while creating SF and SFF + """ + sfdp_list = {} + for vnf in vnffg_chain.vnf_chain_path: + for vnfr in vnf.vnfr_ids: + for vdu in vnfr.vdu_list: + sfdp = SfDpLocator(vdu.port_id,vnfr.vnfr_name, vdu.vm_id) + sfdp._update_vnf_params(vnf.service_function_type, vdu.address, vdu.port, vnf.transport_type) + sfdp_list[vdu.port_id] = sfdp + return sfdp_list + + def create_sfc(self, account, vnffg_chain): + "Create SFC chain" + + sff_list = {} + sf_dp_list = {} + sf_dp_list = self._get_sf_dp_list_for_chain(account,vnffg_chain) + + # Get the list of all SFFs required for vnffg chain + sff_list = self._get_sff_list_for_chain(account,sf_dp_list) + + #for name,sff in sff_list.items(): + # print(name, sff) + + #Create all the SF in VNFFG chain + self._create_sf(account,vnffg_chain,sf_dp_list) + + for _,sff in sff_list.items(): + self._create_sff(account,vnffg_chain,sff) + + self._create_sfc(account,vnffg_chain) + + self._create_sfp(account,vnffg_chain) + + ## Update to SFF could have deleted some RSP; so get list of SFP and + ## check RSP exists for same and create any as necessary + #rsp_name = self._create_rsp(account,vnffg_chain) + #return rsp_name + self._create_all_rsps(account) + return vnffg_chain.name + + def _create_all_rsps(self,account): + """ + Create all the RSPs for SFP found + """ + sfps_url = self._get_rest_url(account,self._sfps_path) + r=requests.get(sfps_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + r.raise_for_status() + sfps_json = r.json() + if 'service-function-path' in sfps_json['service-function-paths']: + for sfp in sfps_json['service-function-paths']['service-function-path']: + rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfp['name'])) + r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + if r.status_code == 404: + # Create the RSP + logger.info("Creating RSP for Service Path with name %s",sfp['name']) + self._create_rsp(account,sfp['name']) + + def delete_all_sf(self, account): + "Delete all the SFs" + sf_url = self._get_rest_url(account,self._service_functions_path) + print(sf_url) + r=requests.delete(sf_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + + def delete_all_sff(self, account): + "Delete all the SFFs" + sff_url = self._get_rest_url(account,self._service_function_forwarders_path) + print(sff_url) + r=requests.delete(sff_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def delete_all_sfc(self, account): + "Delete all the SFCs" + sfc_url = self._get_rest_url(account,self._service_function_chains_path) + print(sfc_url) + r=requests.delete(sfc_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def delete_all_sfp(self, account): + "Delete all the SFPs" + sfp_url = self._get_rest_url(account,self._sfps_path) + print(sfp_url) + r=requests.delete(sfp_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def delete_all_rsp(self, account): + "Delete all the RSP" + #rsp_list = self.get_rsp_list(account) + url = self._get_rest_url(account,self._get_rsp_paths) + print(url) + r = requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + print(r.json()) + rsp_list = r.json() + + #for vnffg in rsp_list.vnffg_rendered_path: + for sfc_rsp in rsp_list['rendered-service-paths']['rendered-service-path']: + rsp_json = {} + rsp_input = {} + rsp_json['input'] = {} + rsp_input['name'] = sfc_rsp['name'] + + rsp_json['input'] = rsp_input + rsp_data = json.dumps(rsp_json) + self._rsp_data = rsp_json + rsp_url = self._get_rest_url(account,self._delete_rsp_path) + print(rsp_url) + print(rsp_data) + + r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data) + r.raise_for_status() + print(r.json()) + #output_json = r.json() + #return output_json['output']['name'] + + def terminate_all_sfc(self, account): + "Terminate SFC chain" + self.delete_all_rsp(account) + self.delete_all_sfp(account) + self.delete_all_sfc(account) + self.delete_all_sff(account) + self.delete_all_sf(account) + + + def _fill_rsp_list(self,sfc_rsp_list,sff_list): + vnffg_rsps = RwsdnYang.VNFFGRenderedPaths() + for sfc_rsp in sfc_rsp_list['rendered-service-paths']['rendered-service-path']: + rsp = vnffg_rsps.vnffg_rendered_path.add() + rsp.name = sfc_rsp['name'] + rsp.path_id = sfc_rsp['path-id'] + for sfc_rsp_hop in sfc_rsp['rendered-service-path-hop']: + rsp_hop = rsp.rendered_path_hop.add() + rsp_hop.hop_number = sfc_rsp_hop['hop-number'] + rsp_hop.service_index = sfc_rsp_hop['service-index'] + rsp_hop.vnfr_name = sfc_rsp_hop['service-function-name'] + rsp_hop.service_function_forwarder.name = sfc_rsp_hop['service-function-forwarder'] + for sff in sff_list['service-function-forwarders']['service-function-forwarder']: + if sff['name'] == rsp_hop.service_function_forwarder.name: + rsp_hop.service_function_forwarder.ip_address = sff['sff-data-plane-locator'][0]['data-plane-locator']['ip'] + rsp_hop.service_function_forwarder.port = sff['sff-data-plane-locator'][0]['data-plane-locator']['port'] + break + return vnffg_rsps + + + def get_rsp_list(self,account): + "Get RSP list" + + sff_url = self._get_rest_url(account,self._service_function_forwarders_path) + print(sff_url) + r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + r.raise_for_status() + sff_list = r.json() + + url = self._get_rest_url(account,self._get_rsp_paths) + print(url) + r = requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + print(r.json()) + return self._fill_rsp_list(r.json(),sff_list) + + def create_sfc_classifier(self, account, sfc_classifiers): + "Create SFC Classifiers" + self._add_acl_rules(account, sfc_classifiers) + self._create_sf_classifier(account, sfc_classifiers) + return sfc_classifiers.name + + def terminate_sfc_classifier(self, account, sfc_classifier_name): + "Create SFC Classifiers" + self._terminate_sf_classifier(account, sfc_classifier_name) + self._del_acl_rules(account, sfc_classifier_name) + + def _del_acl_rules(self,account,sfc_classifier_name): + " Terminate SF classifiers" + acl_url = self._get_rest_url(account,self._access_lists_path) + print(acl_url) + r=requests.delete(acl_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def _terminate_sf_classifier(self,account,sfc_classifier_name): + " Terminate SF classifiers" + sfcl_url = self._get_rest_url(account,self._service_function_classifiers_path) + print(sfcl_url) + r=requests.delete(sfcl_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def _create_sf_classifier(self,account,sfc_classifiers): + " Create SF classifiers" + sf_classifier_json = {} + sf_classifier_dict = {} + sf_classifier_dict['name'] = sfc_classifiers.name + sf_classifier_dict['access-list'] = sfc_classifiers.name + sf_classifier_dict['scl-service-function-forwarder'] = list() + scl_sff = {} + scl_sff_name = '' + + if sfc_classifiers.has_field('port_id') and sfc_classifiers.has_field('vm_id'): + sf_dp = SfDpLocator(sfc_classifiers.port_id,'', sfc_classifiers.vm_id) + sf_dp_list= {} + sf_dp_list[sfc_classifiers.port_id] = sf_dp + self._get_sff_list_for_chain(account,sf_dp_list) + + if sf_dp.sff_name is None: + logger.error("SFF not found for port %s, VM: %s",sfc_classifiers.port.port_id,sfc_classifiers.vm_id) + else: + logger.error("SFF with name %s found for port %s, VM: %s",sf_dp.sff_name, sfc_classifiers.port_id,sfc_classifiers.vm_id) + scl_sff_name = sf_dp.sff_name + else: + rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfc_classifiers.rsp_name)) + r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + if r.status_code == 200: + rsp_data = r.json() + if 'rendered-service-path' in rsp_data and len(rsp_data['rendered-service-path'][0]['rendered-service-path-hop']) > 0: + scl_sff_name = rsp_data['rendered-service-path'][0]['rendered-service-path-hop'][0]['service-function-forwarder'] + + logger.debug("SFF for classifer %s found is %s",sfc_classifiers.name, scl_sff_name) + scl_sff['name'] = scl_sff_name + #scl_sff['interface'] = sff_intf_name + sf_classifier_dict['scl-service-function-forwarder'].append(scl_sff) + + sf_classifier_json['service-function-classifier'] = sf_classifier_dict + + sfcl_data = json.dumps(sf_classifier_json) + sfcl_url = self._get_rest_url(account,self._service_function_classifier_path.format(sfc_classifiers.name)) + print(sfcl_url) + print(sfcl_data) + r=requests.put(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfcl_data) + r.raise_for_status() + + def _add_acl_rules(self, account,sfc_classifiers): + "Create ACL rules" + access_list_json = {} + access_list_dict = {} + acl_entry_list = list() + acl_list_dict = {} + for acl_rule in sfc_classifiers.match_attributes: + acl_entry = {} + acl_entry['rule-name'] = acl_rule.name + acl_entry['actions'] = {} + #acl_entry['actions']['netvirt-sfc-acl:rsp-name'] = sfc_classifiers.rsp_name + acl_entry['actions']['service-function-acl:rendered-service-path'] = sfc_classifiers.rsp_name + + matches = {} + for field, value in acl_rule.as_dict().items(): + if field == 'ip_proto': + matches['protocol'] = value + elif field == 'source_ip_address': + matches['source-ipv4-network'] = value + elif field == 'destination_ip_address': + matches['destination-ipv4-network'] = value + elif field == 'source_port': + matches['source-port-range'] = {'lower-port':value, 'upper-port':value} + elif field == 'destination_port': + matches['destination-port-range'] = {'lower-port':value, 'upper-port':value} + acl_entry['matches'] = matches + acl_entry_list.append(acl_entry) + acl_list_dict['ace'] = acl_entry_list + access_list_dict['acl-name'] = sfc_classifiers.name + access_list_dict['access-list-entries'] = acl_list_dict + access_list_json['acl'] = access_list_dict + + acl_data = json.dumps(access_list_json) + acl_url = self._get_rest_url(account,self._access_list_path.format(sfc_classifiers.name)) + print(acl_url) + print(acl_data) + r=requests.put(acl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=acl_data) + r.raise_for_status() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt new file mode 100644 index 0000000..726abde --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn_sim rwsdn_sim.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py new file mode 100644 index 0000000..74ed66e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py @@ -0,0 +1,95 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import collections +import itertools +import logging +import os +import uuid +import time + +import ipaddress + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes, + RwsdnYang, + #IetfL2TopologyYang as l2Tl, + RwTopologyYang as RwTl, + ) + +import rw_status +import rwlogger + +from rift.topmgr.sdnsim import SdnSim + + +logger = logging.getLogger('rwsdn.sdnsim') + + +class UnknownAccountError(Exception): + pass + + +class MissingFileError(Exception): + pass + + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + UnknownAccountError: RwTypes.RwStatus.NOTFOUND, + MissingFileError: RwTypes.RwStatus.NOTFOUND, + }) + + +class SdnSimPlugin(GObject.Object, RwSdn.Topology): + + def __init__(self): + GObject.Object.__init__(self) + self.sdnsim = SdnSim() + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler( + rwlogger.RwLogger( + category="sdnsim", + log_hdl=rwlog_ctx, + ) + ) + + @rwstatus(ret_on_failure=[None]) + def do_get_network_list(self, account): + """ + Returns the list of discovered networks + + @param account - a SDN account + + """ + logger.debug('Get network list: ') + nwtop = self.sdnsim.get_network_list( account) + logger.debug('Done with get network list: %s', type(nwtop)) + return nwtop \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt new file mode 100644 index 0000000..0183de2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# + +## +# Parse the yang files +## + +include(rift_yang) +include(rift_cmdargs) + +set(source_yang_files rwsdn.yang) + +rift_add_yang_target( + TARGET rwsdn_yang + YANG_FILES ${source_yang_files} + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwschema_yang_gen + rwyang + rwlog + rwlog-mgmt_yang_gen +) + diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.tailf.yang new file mode 100644 index 0000000..9733eb6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.tailf.yang @@ -0,0 +1,17 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rwsdn-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rwsdn-annotation"; + prefix "rwsdn-ann"; + + import rwsdn { + prefix rwsdn; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang new file mode 100644 index 0000000..6994633 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang @@ -0,0 +1,303 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rwsdn +{ + namespace "http://riftio.com/ns/riftware-1.0/rwsdn"; + prefix "rwsdn"; + + import rw-base { + prefix rwbase; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-log { + prefix "rwlog"; + } + + import mano-types { + prefix "manotypes"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + + revision 2014-12-30 { + description + "Initial revision."; + reference + "RIFT RWSDN cloud data"; + } + + typedef sdn-account-type { + description "SDN account type"; + type enumeration { + enum odl; + enum mock; + enum sdnsim; + } + } + + grouping sdn-provider-auth { + leaf account-type { + type sdn-account-type; + } + + choice provider-specific-info { + container odl { + leaf username { + type string { + length "1..255"; + } + } + + leaf password { + type string { + length "1..32"; + } + } + + leaf url { + type string { + length "1..255"; + } + } + leaf plugin-name { + type string; + default "rwsdn_odl"; + } + } + container mock { + leaf username { + type string; + } + leaf plugin-name { + type string; + default "rwsdn_mock"; + } + } + + container sdnsim { + leaf username { + type string; + } + leaf plugin-name { + type string; + default "rwsdn_sim"; + } + } + } + } + + container sdn-accounts { + list sdn-account-list { + rwpb:msg-new SDNAccount; + key "name"; + + leaf name { + type string; + } + + uses sdn-provider-auth; + } + } + + container vnffgs { + list vnffg-chain { + key "name"; + rwpb:msg-new VNFFGChain; + + leaf name { + type string; + } + + list vnf-chain-path { + key "order"; + leaf order { + type uint32; + description " Order of the VNF in VNFFG chain"; + } + leaf service-function-type { + type string; + } + leaf nsh-aware { + type boolean; + } + leaf transport-type { + type string; + } + list vnfr-ids { + key "vnfr-id"; + leaf vnfr-id { + type yang:uuid; + } + leaf vnfr-name { + type string; + } + leaf mgmt-address { + type inet:ip-address; + } + leaf mgmt-port { + type inet:port-number; + } + list vdu-list { + key "vm-id port-id"; + leaf port-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf vm-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf name { + type string; + } + leaf address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + } + } + } + } + + container vnffg-rendered-paths { + rwpb:msg-new VNFFGRenderedPaths; + list vnffg-rendered-path { + key "name"; + rwpb:msg-new VNFFGRenderedPath; + config false; + leaf name { + type string; + } + leaf path-id { + description + "Unique Identifier for the service path"; + type uint32; + } + list rendered-path-hop { + key "hop-number"; + leaf hop-number { + type uint8; + } + leaf service-index { + description + "Location within the service path"; + type uint8; + } + leaf vnfr-name { + type string; + } + container service-function-forwarder { + leaf name { + description + "Service Function Forwarder name"; + type string; + } + leaf ip-address { + description + "Service Function Forwarder Data Plane IP address"; + type inet:ip-address; + } + leaf port { + description + "Service Function Forwarder Data Plane port"; + type inet:port-number; + } + } + } + } + } + + + container vnffg-classifiers { + list vnffg-classifier { + key "name"; + rwpb:msg-new VNFFGClassifier; + + leaf name { + type string; + } + leaf rsp-name { + type string; + } + leaf port-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf vm-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + list match-attributes { + description + "List of match attributes."; + key "name"; + leaf name { + description + "Name for the Access list"; + type string; + } + + leaf ip-proto { + description + "IP Protocol."; + type uint8; + } + + leaf source-ip-address { + description + "Source IP address."; + type inet:ip-prefix; + } + + leaf destination-ip-address { + description + "Destination IP address."; + type inet:ip-prefix; + } + + leaf source-port { + description + "Source port number."; + type inet:port-number; + } + + leaf destination-port { + description + "Destination port number."; + type inet:port-number; + } + } //match-attributes + } + } + +} + +/* vim: set ts=2:sw=2: */ diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/CMakeLists.txt new file mode 100644 index 0000000..06917d0 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/CMakeLists.txt @@ -0,0 +1,13 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +set(subdirs + rwve_vnfm_em + rwve_vnfm_vnf + rwos_ma_nfvo + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt new file mode 100644 index 0000000..5355ead --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt @@ -0,0 +1,52 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwos_ma_nfvo) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwOsMaNfvo-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0 + rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0 + + #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + GENERATE_HEADER_FILE ${VALA_NAME}.h + + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + #DEPENDS rwcal_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwos_ma_nfvo_rest + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala new file mode 100644 index 0000000..63e4601 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala @@ -0,0 +1,16 @@ +namespace RwOsMaNfvo { + + public interface Orchestrator: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Notify the EM of lifecycle event + */ + public abstract RwTypes.RwStatus ns_lifecycle_event(); + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt new file mode 100644 index 0000000..bf9c897 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwos_ma_nfvo_rest rwos_ma_nfvo_rest.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py new file mode 100644 index 0000000..dd48b8b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py @@ -0,0 +1,53 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import re +import logging +import rw_status +import rwlogger +import subprocess, os + +import gi +gi.require_version('RwOsMaNfvo', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + GObject, + RwOsMaNfvo, + RwTypes) + +logger = logging.getLogger('rwos-ma-nfvo-rest') + + +rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}) + +class RwOsMaNfvoRestPlugin(GObject.Object, RwOsMaNfvo.Orchestrator): + """This class implements the Ve-Vnfm VALA methods.""" + + def __init__(self): + GObject.Object.__init__(self) + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler(rwlogger.RwLogger(category="rwos-ma-nfvo-rest", + log_hdl=rwlog_ctx,)) + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt new file mode 100644 index 0000000..517f480 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt @@ -0,0 +1,52 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwve_vnfm_em) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwVeVnfmEm-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0 + rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0 + + #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + GENERATE_HEADER_FILE ${VALA_NAME}.h + + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + #DEPENDS rwcal_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwve_vnfm_em_rest + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala new file mode 100644 index 0000000..3da25f9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala @@ -0,0 +1,16 @@ +namespace RwVeVnfmEm { + + public interface ElementManager: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Notify the EM of lifecycle event + */ + public abstract RwTypes.RwStatus vnf_lifecycle_event(); + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt new file mode 100644 index 0000000..58f5d7f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwve_vnfm_em_rest rwve_vnfm_em_rest.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py new file mode 100644 index 0000000..50704a6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py @@ -0,0 +1,56 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import re +import logging +import rw_status +import rwlogger +import subprocess, os + +import gi +gi.require_version('RwVeVnfmEm', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + GObject, + RwVeVnfmEm, + RwTypes) + +logger = logging.getLogger('rw_ve_vnfm_em.rest') + + +rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}) + +class RwVeVnfmEmRestPlugin(GObject.Object, RwVeVnfmEm.ElementManager): + """This class implements the Ve-Vnfm VALA methods.""" + + def __init__(self): + GObject.Object.__init__(self) + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler(rwlogger.RwLogger(category="rwcal-aws", + log_hdl=rwlog_ctx,)) + @rwstatus + def do_vnf_lifecycle_event(self): + pass + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt new file mode 100644 index 0000000..0b0082b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt @@ -0,0 +1,52 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwve_vnfm_vnf) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwVeVnfmVnf-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0 + rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0 + + #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + GENERATE_HEADER_FILE ${VALA_NAME}.h + + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + #DEPENDS rwcal_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwve_vnfm_vnf_rest + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala new file mode 100644 index 0000000..6b5e84e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala @@ -0,0 +1,16 @@ +namespace RwVeVnfmVnf { + + public interface Vnf: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Notify the EM of lifecycle event + */ + public abstract RwTypes.RwStatus get_monitoring_param(); + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt new file mode 100644 index 0000000..2d1ca9e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwve_vnfm_vnf_rest rwve_vnfm_vnf_rest.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py new file mode 100644 index 0000000..ea56ad7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py @@ -0,0 +1,56 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import re +import logging +import rw_status +import rwlogger +import subprocess, os + +import gi +gi.require_version('RwVeVnfmVnf', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + GObject, + RwVeVnfmVnf, + RwTypes) + +logger = logging.getLogger('rwve-vnfm-vnf-rest') + + +rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}) + +class RwVeVnfmVnfRestPlugin(GObject.Object, RwVeVnfmVnf.Vnf): + """This class implements the Ve-Vnfm VALA methods.""" + + def __init__(self): + GObject.Object.__init__(self) + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler(rwlogger.RwLogger(category="rwve-vnfm-vnf-rest", + log_hdl=rwlog_ctx,)) + + @rwstatus + def do_get_monitoring_param(self): + pass + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..ba7166b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/CMakeLists.txt @@ -0,0 +1,36 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tom Seidenberg +# Creation Date: 2014/04/08 +# + +set(source_yang_files + rw-iwp.yang + rw-launchpad-log.yang + rw-launchpad.yang + rw-monitor.yang + rw-nsm.yang + rw-resource-mgr.yang + rw-vnfm.yang + rw-vns.yang + ) +## +# Yang targets +## +rift_add_yang_target( + TARGET rwlaunchpad_yang + YANG_FILES ${source_yang_files} + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + mano_yang_gen + rwcloud_yang_gen + rw_conman_yang_gen + rwconfig_agent_yang_gen + DEPENDS + mano_yang + rwcloud_yang + rw_conman_yang + rwconfig_agent_yang +) + diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/Makefile b/modules/core/mano/rwlaunchpad/plugins/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.tailf.yang new file mode 100644 index 0000000..df8df03 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.tailf.yang @@ -0,0 +1,30 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-iwp-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-iwp-annotation"; + prefix "rw-iwp-ann"; + + import rw-iwp + { + prefix rw-iwp; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:vm-pool/rw-iwp:resources/rw-iwp:is_reserved" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:network-pool/rw-iwp:resources/rw-iwp:is_reserved" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.yang new file mode 100755 index 0000000..2ed608d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.yang @@ -0,0 +1,184 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-iwp.yang + * @author Austin Cormier + * @date 2015/09/21 + * @brief Intelligent Workload Placement Yang + */ + +module rw-iwp +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-iwp"; + prefix "rw-iwp"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-21 { + description + "Initial revision."; + } + + container resource-mgr { + rwpb:msg-new ResourceManagerConfig; + container mission-control { + leaf mgmt-ip { + type string; + } + } + + container mgmt-domain { + leaf name { + description "The mgmt domain name this launchpad is associated with."; + type string; + } + } + + container pools { + rwpb:msg-new ResourcePools; + description "Pools configured for this mgmt domain."; + + list vm-pool { + rwpb:msg-new VmResourcePool; + key "name"; + leaf name { + type string; + } + list resources { + rwpb:msg-new VmPoolResource; + key "vm-id"; + + leaf vm-id { + rwpb:field-string-max 64; + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + default false; + config false; + } + } + } + + list network-pool { + rwpb:msg-new NetworkResourcePool; + key "name"; + leaf name { + type string; + } + list resources { + rwpb:msg-new NetworkPoolResource; + key "network-id"; + + leaf network-id { + rwpb:field-string-max 64; + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + default false; + config false; + } + } + } + } + + container network-request { + config false; + + list requests { + rwpb:msg-new NetworkRequest; + key "request-id"; + leaf request-id { + description "Identifier for the Network Request"; + type yang:uuid; + } + container network-response { + rwpb:msg-new NetworkResponse; + leaf network-id { + description "Allocated network id"; + type string; + } + leaf network-pool { + description "Pool that network resource was allocated from"; + type string; + } + } + } + } + + container vm-request { + config false; + + list requests { + rwpb:msg-new VMRequest; + key "request-id"; + leaf request-id { + description "Identifier for the VM Request"; + type yang:uuid; + } + + uses manotypes:vm-flavor; + uses manotypes:guest-epa; + uses manotypes:vswitch-epa; + uses manotypes:hypervisor-epa; + uses manotypes:host-epa; + + leaf image { + description "File/URL path to the software image"; + type string; + } + + container vm-response { + rwpb:msg-new VMResponse; + leaf vm-id { + description "Allocated VM id"; + type string; + } + leaf vm-ip { + description "Management IP Address of the VM"; + type string; + } + leaf vm-pool { + description "Pool that vm resource was allocated from"; + type string; + } + } + } + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad-log.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad-log.yang new file mode 100755 index 0000000..2fd7197 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad-log.yang @@ -0,0 +1,47 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + +/**0 + * @file rw-launchpad-log.yang + * @author Rift.IO + * @date 03/02/2015 + * @brief RiftWare Log Event Definitions for rw-launchpad logging + */ + +module rw-launchpad-log +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad-log"; + prefix "rwlaunchpadlog"; + + import rw-base { + prefix rwbase; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-notify-ext { + prefix "rwnotify"; + } + + import rw-log { + prefix "rwlog"; + } + + revision 2014-12-30 { + description + "Initial revision."; + reference + "RIFT Launchpad Logging"; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang new file mode 100644 index 0000000..74350e6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-launchpad-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad-annotation"; + prefix "rw-launchpad-ann"; + + import rw-launchpad { + prefix rw-launchpad; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-launchpad:datacenters" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.yang new file mode 100755 index 0000000..efc48db --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.yang @@ -0,0 +1,131 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + + + +/** + * @file rw-launchpad.yang + * @author Joshua Downer + * @date 2015/09/14 + * @brief Launchpad Yang + */ + +module rw-launchpad +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad"; + prefix "rw-launchpad"; + + import ietf-yang-types { + prefix "yang"; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import vnfd { + prefix "vnfd"; + } + + import vld { + prefix "vld"; + } + + import nsd { + prefix "nsd"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import rw-nsr { + prefix "rw-nsr"; + } + + import rw-conman { + prefix "rw-conman"; + } + + import rw-config-agent { + prefix "rw-config-agent"; + } + + + revision 2015-09-14 { + description + "Initial revision."; + } + + container datacenters { + description "OpenMano data centers"; + + rwpb:msg-new DataCenters; + config false; + + list cloud-accounts { + description + "A list of OpenMano cloud accounts that have data centers associated + with them"; + + rwpb:msg-new CloudAccount; + key "name"; + + leaf name { + description "The name of the cloud account"; + type leafref { + path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name"; + } + } + + list datacenters { + rwpb:msg-new DataCenter; + leaf uuid { + description "The UUID of the data center"; + type yang:uuid; + } + + leaf name { + description "The name of the data center"; + type string; + } + } + } + } + + container launchpad-config { + leaf operational-mode { + description + "The mode in which this launchpad is running + STANDALONE : This launchpad was started in the standalone mode. + MC_MANAGED : This lauchpad is managed by mission control. + "; + type enumeration { + enum STANDALONE { + value 1; + } + enum MC_MANAGED { + value 2; + } + } + default STANDALONE; + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.tailf.yang new file mode 100644 index 0000000..7c68c50 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.tailf.yang @@ -0,0 +1,21 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-monitor-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-monitor-annotation"; + prefix "rw-monitor-ann"; + + import rw-monitor { + prefix rw-monitor; + } + + import tailf-common { + prefix tailf; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.yang new file mode 100755 index 0000000..bea73a6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.yang @@ -0,0 +1,62 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-monitor.yang + * @author Joshua Downer + * @date 2015/10/30 + * @brief NFVI Monitor + */ + +module rw-monitor +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-monitor"; + prefix "rw-monitor"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import rw-nsr { + prefix "rw-nsr"; + } + + import rwcal { + prefix "rwcal"; + } + + import vnfr { + prefix "vnfr"; + } + + import nsr { + prefix "nsr"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-10-30 { + description + "Initial revision."; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-nsm.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-nsm.yang new file mode 100755 index 0000000..fe06c3e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-nsm.yang @@ -0,0 +1,121 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-nsm.yang + * @author Rajesh Velandy + * @date 2015/10/07 + * @brief NSM yang + */ + +module rw-nsm +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsm"; + prefix "rw-nsm"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import rw-nsd { + prefix "rw-nsd"; + } + import nsd { + prefix "nsd"; + } + import rw-nsr { + prefix "rw-nsr"; + } + import vld { + prefix "vld"; + } + import rw-vlr { + prefix "rw-vlr"; + } + import rw-vns { + prefix "rw-vns"; + } + import rw-vnfd { + prefix "rw-vnfd"; + } + import vnfd { + prefix "vnfd"; + } + import rw-vnfr { + prefix "rw-vnfr"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-launchpad { + prefix "rw-launchpad"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import rw-sdn { + prefix "rw-sdn"; + } + + import rw-config-agent { + prefix "rw-config-agent"; + } + + revision 2015-10-07 { + description + "Initial revision."; + } + + grouping cm-endpoint { + leaf cm-ip-address { + type inet:ip-address; + description "IP Address"; + default "127.0.0.1"; + } + leaf cm-port { + type inet:port-number; + description "Port Number"; + default 2022; + } + leaf cm-username { + description "RO endpoint username"; + type string; + default "admin"; + } + leaf cm-password { + description "RO endpoint password"; + type string; + default "admin"; + } + } + + container ro-config { + description "Resource Orchestrator endpoint ip address"; + rwpb:msg-new "roConfig"; + rwcli:new-mode "ro-config"; + + container cm-endpoint { + description "Service Orchestrator endpoint ip address"; + rwpb:msg-new "SoEndpoint"; + uses cm-endpoint; + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang new file mode 100644 index 0000000..339bed4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang @@ -0,0 +1,30 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-resource-mgr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr-annotation"; + prefix "rw-resource-mgr-ann"; + + import rw-resource-mgr + { + prefix rw-resource-mgr; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-resource-mgr:resource-pool-records" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-resource-mgr:resource-mgmt" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.yang new file mode 100755 index 0000000..7bc65b8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.yang @@ -0,0 +1,293 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +module rw-resource-mgr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr"; + prefix "rw-resource-mgr"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-cloud { + prefix "rwcloud"; + } + + import rwcal { + prefix "rwcal"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-10-16 { + description + "Initial revision."; + } + + grouping resource-pool-info { + leaf name { + description "Name of the resource pool"; + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + //mandatory true; + } + + leaf resource-type { + description "Type of resource"; + type enumeration { + enum compute; + enum network; + } + } + + leaf pool-type { + description "Type of pool"; + type enumeration { + enum static; + enum dynamic; + } + default "static"; + } + + leaf max-size { + description "Maximum size to which a dynamic resource pool can grow"; + type uint32; + } + + } + + container resource-mgr-config { + description "Data model for configuration of resource-mgr"; + rwpb:msg-new ResourceManagerConfig; + config true; + + container management-domain { + leaf name { + description "The management domain name this launchpad is associated with."; + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + //mandatory true; + } + } + + container resource-pools { + description "Resource Pool configuration"; + rwpb:msg-new ResourcePools; + list cloud-account { + key "name"; + leaf name { + description + "Resource pool for the configured cloud account"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + } + } + } + + grouping resource-state { + leaf resource-state { + type enumeration { + enum inactive; + enum active; + enum pending; + enum failed; + } + } + } + + container resource-mgmt { + description "Resource management "; + config false; + + container vdu-event { + description "Events for VDU Management"; + rwpb:msg-new VDUEvent; + + list vdu-event-data { + rwpb:msg-new VDUEventData; + key "event-id"; + + leaf event-id { + description "Identifier associated with the VDU transaction"; + type yang:uuid; + } + + leaf cloud-account { + description "The cloud account to use for this resource request"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + container request-info { + description "Information about required resource"; + + uses rwcal:vdu-create-params; + } + + container resource-info { + description "Information about allocated resource"; + leaf pool-name { + type string; + } + uses resource-state; + uses rwcal:vdu-info-params; + } + } + } + + container vlink-event { + description "Events for Virtual Link management"; + rwpb:msg-new VirtualLinkEvent; + + list vlink-event-data { + rwpb:msg-new VirtualLinkEventData; + + key "event-id"; + + leaf event-id { + description "Identifier associated with the Virtual Link transaction"; + type yang:uuid; + } + + leaf cloud-account { + description "The cloud account to use for this resource request"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + container request-info { + description "Information about required resource"; + + uses rwcal:virtual-link-create-params; + } + + container resource-info { + leaf pool-name { + type string; + } + uses resource-state; + uses rwcal:virtual-link-info-params; + } + } + } + } + + + container resource-pool-records { + description "Resource Pool Records"; + rwpb:msg-new ResourcePoolRecords; + config false; + + list cloud-account { + key "name"; + leaf name { + description + "The configured cloud account's pool records."; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + list records { + rwpb:msg-new ResourceRecordInfo; + key "name"; + uses resource-pool-info; + + leaf pool-status { + type enumeration { + enum unknown; + enum locked; + enum unlocked; + } + } + + leaf total-resources { + type uint32; + } + + leaf free-resources { + type uint32; + } + + leaf allocated-resources { + type uint32; + } + } + } + } + + + container resource-mgr-data{ + description "Resource Manager operational data"; + config false; + + container pool-record { + description "Resource Pool record"; + + list cloud { + key "name"; + max-elements 16; + rwpb:msg-new "ResmgrCloudPoolRecords"; + leaf name { + description + "The configured cloud account's pool records."; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + list records { + key "name"; + uses resource-pool-info; + + list free-vdu-list { + key vdu-id; + uses rwcal:vdu-info-params; + } + + list in-use-vdu-list { + key vdu-id; + uses rwcal:vdu-info-params; + } + + list free-vlink-list { + key virtual-link-id; + uses rwcal:virtual-link-info-params; + } + + list in-use-vlink-list { + key virtual-link-id; + uses rwcal:virtual-link-info-params; + } + } + } + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-vnfm.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vnfm.yang new file mode 100755 index 0000000..e254b26 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vnfm.yang @@ -0,0 +1,66 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-vnfm.yang + * @author Rajesh Velandy + * @date 2015/10/07 + * @brief VNFM yang + */ + +module rw-vnfm +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfm"; + prefix "rw-vnfm"; + + import vld { + prefix "vld"; + } + + import vlr { + prefix "vlr"; + } + + import rw-vlr { + prefix "rw-vlr"; + } + + import rw-vns { + prefix "rw-vns"; + } + + import rw-vnfd { + prefix "rw-vnfd"; + } + + import rw-vnfr { + prefix "rw-vnfr"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-manifest { + prefix "rw-manifest"; + } + + import rw-resource-mgr { + prefix "rw-resource-mgr"; + } + + import rw-launchpad { + prefix "rw-launchpad"; + } + + revision 2015-10-07 { + description + "Initial revision."; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.tailf.yang new file mode 100644 index 0000000..722a185 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.tailf.yang @@ -0,0 +1,51 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vns-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vns"; + prefix "rw-vns-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + import ietf-l2-topology { + prefix lt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nt:link" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:node" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:node/nt:termination-point" { + tailf:callpoint base_show; + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.yang new file mode 100755 index 0000000..b8ecef8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.yang @@ -0,0 +1,88 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-vns.yang + * @author Austin Cormier + * @date 2015/10/06 + * @brief Virtual Network Service Yang + */ + +module rw-vns +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vns"; + prefix "rw-vns"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import rwsdn { + prefix "rwsdn"; + } + + + import ietf-yang-types { + prefix "yang"; + } + + import rw-iwp { + prefix "rwiwp"; + } + + import rw-vlr { + prefix "rwvlr"; + } + + import vld { + prefix "vld"; + } + + import ietf-network { + prefix "nw"; + } + + import ietf-network-topology { + prefix "nt"; + } + + import ietf-l2-topology { + prefix "l2t"; + } + + import rw-topology { + prefix "rw-topology"; + } + + import rw-resource-mgr { + prefix "rw-resource-mgr"; + } + + import rw-sdn { + prefix "rw-sdn"; + } + + revision 2015-10-05 { + description + "Initial revision."; + } +} diff --git a/modules/core/mano/rwlaunchpad/ra/CMakeLists.txt b/modules/core/mano/rwlaunchpad/ra/CMakeLists.txt new file mode 100644 index 0000000..81f567d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/CMakeLists.txt @@ -0,0 +1,51 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Paul Laidler +# Creation Date: 09/16/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS + pingpong_longevity_systest + pingpong_vnf_systest + pingpong_records_systest + pingpong_vnf_reload_systest + pingpong_lp_standalone_systest + DESTINATION usr/rift/systemtest/pingpong_vnf + COMPONENT ${PKG_LONG_NAME}) + +install( + PROGRAMS + launchpad_longevity_systest + DESTINATION usr/rift/systemtest/launchpad + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + pytest/test_launchpad_longevity.py + pytest/test_startstop.py + DESTINATION usr/rift/systemtest/pytest/mission_control/launchpad + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + pytest/conftest.py + pytest/test_pingpong_longevity.py + pytest/test_pingpong_vnf.py + pytest/test_records.py + DESTINATION usr/rift/systemtest/pytest/mission_control/pingpong_vnf + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + racfg/pingpong_vnf_systest_cloudsim.racfg + racfg/pingpong_vnf_systest_openstack.racfg + racfg/pingpong_records_systest_openstack.racfg + racfg/pingpong_vnf_reload_systest_openstack.racfg + racfg/pingpong_lp_standalone_systest_openstack.racfg + DESTINATION usr/rift/systemtest/pingpong_vnf + COMPONENT ${PKG_LONG_NAME}) + diff --git a/modules/core/mano/rwlaunchpad/ra/launchpad_longevity_systest b/modules/core/mano/rwlaunchpad/ra/launchpad_longevity_systest new file mode 100755 index 0000000..de00966 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/launchpad_longevity_systest @@ -0,0 +1,44 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -x -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/launchpad/test_launchpad_longevity.py" + +test_prefix="launchpad_longevity_systest" +test_cmd="" +repeat=10 +repeat_keyword="longevity" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_longevity_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_longevity_systest new file mode 100755 index 0000000..687b62c --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_longevity_systest @@ -0,0 +1,31 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -x -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_longevity.py" + +test_prefix="pingpong_longevity_systest" +test_cmd="" +repeat_keyword="longevity" +repeat=10 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_lp_standalone_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_lp_standalone_systest new file mode 100755 index 0000000..8ebf68b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_lp_standalone_systest @@ -0,0 +1,32 @@ +#!/bin/bash +# +# +# Author(s): Varun Prasad +# Creation Date: 19-Feb-2016 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -v -p no:cacheprovider \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +test_prefix="pingpong_lp_standalone" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Force standalone launchpad +lp_standalone=true + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} + diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_records_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_records_systest new file mode 100755 index 0000000..36aa4f9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_records_systest @@ -0,0 +1,29 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/09/15 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh +restconf=true + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -v -p no:cacheprovider \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +test_prefix="pingpong_records_systest" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_reload_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_reload_systest new file mode 100755 index 0000000..954d387 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_reload_systest @@ -0,0 +1,33 @@ +#!/bin/bash +# +# +# Author(s): Varun Prasad +# Creation Date: 2016/01/04 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'not Teardown or test_stop_launchpad' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +REBOOT_SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'test_wait_for_launchpad_started or test_wait_for_pingpong_started or Teardown' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +test_prefix="pingpong_vnf_reload_systest" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_systest new file mode 100755 index 0000000..91635a1 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_systest @@ -0,0 +1,28 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/09/15 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py" + +test_prefix="pingpong_vnf_systest" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/conftest.py b/modules/core/mano/rwlaunchpad/ra/pytest/conftest.py new file mode 100644 index 0000000..0d3b2bc --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/conftest.py @@ -0,0 +1,151 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import pytest +import os + +import gi +import rift.auto.session +import rift.mano.examples.ping_pong_nsd as ping_pong +import rift.vcs.vcs + +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + + +class PackageError(Exception): + pass + +@pytest.fixture(scope='session', autouse=True) +def cloud_account_name(request): + '''fixture which returns the name used to identify the cloud account''' + return 'cloud-0' + +@pytest.fixture(autouse=True) +def mc_only(request, standalone_launchpad): + """Fixture to skip any tests that needs to be run only when a MC is used, + and not in lp standalone mode. + + Arugments: + request - pytest request fixture + standalone_launchpad - indicates if the launchpad is running standalone + """ + if request.node.get_marker('mc_only'): + if standalone_launchpad: + pytest.skip('Test marked skip for launchpad standalone mode') + + +@pytest.fixture(scope='session') +def launchpad_session(mgmt_session, mgmt_domain_name, session_type, standalone_launchpad, use_https): + '''Fixture containing a rift.auto.session connected to the launchpad + + Arguments: + mgmt_session - session connected to the mission control instance + (or launchpad in the case of a standalone session) + mgmt_domain_name - name of the mgmt_domain being used + session_type - Restconf or Netconf + standalone_launchpad - indicates if the launchpad is running standalone + ''' + if standalone_launchpad: + return mgmt_session + + mc_proxy = mgmt_session.proxy(RwMcYang) + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + + if session_type == 'netconf': + launchpad_session = rift.auto.session.NetconfSession(host=launchpad_host) + elif session_type == 'restconf': + launchpad_session = rift.auto.session.RestconfSession( + host=launchpad_host, + use_https=use_https) + + launchpad_session.connect() + rift.vcs.vcs.wait_until_system_started(launchpad_session) + + return launchpad_session + + +@pytest.fixture(scope='session') +def ping_pong_install_dir(): + '''Fixture containing the location of ping_pong installation + ''' + install_dir = os.path.join( + os.environ["RIFT_ROOT"], + "images" + ) + return install_dir + +@pytest.fixture(scope='session') +def ping_vnfd_package_file(ping_pong_install_dir): + '''Fixture containing the location of the ping vnfd package + + Arguments: + ping_pong_install_dir - location of ping_pong installation + ''' + ping_pkg_file = os.path.join( + ping_pong_install_dir, + "ping_vnfd_with_image.tar.gz", + ) + if not os.path.exists(ping_pkg_file): + raise_package_error() + + return ping_pkg_file + + +@pytest.fixture(scope='session') +def pong_vnfd_package_file(ping_pong_install_dir): + '''Fixture containing the location of the pong vnfd package + + Arguments: + ping_pong_install_dir - location of ping_pong installation + ''' + pong_pkg_file = os.path.join( + ping_pong_install_dir, + "pong_vnfd_with_image.tar.gz", + ) + if not os.path.exists(pong_pkg_file): + raise_package_error() + + return pong_pkg_file + + +@pytest.fixture(scope='session') +def ping_pong_nsd_package_file(ping_pong_install_dir): + '''Fixture containing the location of the ping_pong_nsd package + + Arguments: + ping_pong_install_dir - location of ping_pong installation + ''' + ping_pong_pkg_file = os.path.join( + ping_pong_install_dir, + "ping_pong_nsd.tar.gz", + ) + if not os.path.exists(ping_pong_pkg_file): + raise_package_error() + + return ping_pong_pkg_file + + +# Setting scope to be module, so that we get a different UUID when called +# by different files/modules. +@pytest.fixture(scope='module') +def ping_pong_records(): + '''Fixture containing a set of generated ping and pong descriptors + ''' + return ping_pong.generate_ping_pong_descriptors(pingcount=1) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_launchpad_longevity.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_launchpad_longevity.py new file mode 100644 index 0000000..99d5db6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_launchpad_longevity.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# + +import rift.vcs.vcs +import time +import gi +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + +def test_launchpad_longevity(mgmt_session, mgmt_domain_name): + time.sleep(60) + rift.vcs.vcs.wait_until_system_started(mgmt_session) + launchpad_state = mgmt_session.proxy(RwMcYang).get("/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name) + assert launchpad_state == 'started' \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_longevity.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_longevity.py new file mode 100644 index 0000000..4711281 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_longevity.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# + +import pytest +import rift.vcs.vcs +import time + +import gi +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def launchpad_session(request, mgmt_session, mgmt_domain_name, session_type): + launchpad_host = mgmt_session.proxy(RwMcYang).get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + + if session_type == 'netconf': + launchpad_session = rift.auto.session.NetconfSession(host=launchpad_host) + elif session_type == 'restconf': + launchpad_session = rift.auto.session.RestconfSession(host=launchpad_host) + + launchpad_session.connect() + rift.vcs.vcs.wait_until_system_started(launchpad_session) + return launchpad_session + +@pytest.fixture(scope='module') +def rwnsr_proxy(launchpad_session): + return launchpad_session.proxy(RwNsrYang) + +def test_launchpad_longevity(launchpad_session, mgmt_session, mgmt_domain_name, rwnsr_proxy): + time.sleep(60) + + rift.vcs.vcs.wait_until_system_started(mgmt_session) + + launchpad_state = mgmt_session.proxy(RwMcYang).get("/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name) + assert launchpad_state == 'started' + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + for nsr in nsr_opdata.nsr: + xpath = ("/ns-instance-opdata" + "/nsr[ns-instance-config-ref='%s']" + "/operational-status") % (nsr.ns_instance_config_ref) + operational_status = rwnsr_proxy.get(xpath) + assert operational_status == 'running' \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf.py new file mode 100755 index 0000000..6f182ba --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 11/03/2015 +@brief Launchpad System Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import shutil +import subprocess +import tempfile +import time +import uuid +import rift.auto.session + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +from gi.repository import ( + RwMcYang, + NsdYang, + RwNsrYang, + RwVnfrYang, + NsrYang, + VnfrYang, + VldYang, + RwVnfdYang, + RwLaunchpadYang, + RwBaseYang +) + +logging.basicConfig(level=logging.DEBUG) + + +@pytest.fixture(scope='module') +def launchpad_proxy(request, launchpad_session): + return launchpad_session.proxy(RwLaunchpadYang) + +@pytest.fixture(scope='module') +def vnfd_proxy(request, launchpad_session): + return launchpad_session.proxy(RwVnfdYang) + +@pytest.fixture(scope='module') +def rwvnfr_proxy(request, launchpad_session): + return launchpad_session.proxy(RwVnfrYang) + +@pytest.fixture(scope='module') +def vld_proxy(request, launchpad_session): + return launchpad_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, launchpad_session): + return launchpad_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, launchpad_session): + return launchpad_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, launchpad_session): + return launchpad_session.proxy(RwBaseYang) + + +def create_nsr(nsd_id, input_param_list, cloud_account_name): + """ + Create the NSR record object + + Arguments: + nsd_id - NSD id + input_param_list - list of input-parameter objects + + Return: + NSR object + """ + nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + + nsr.id = str(uuid.uuid4()) + nsr.name = "nsr_name" + nsr.short_name = "nsr_short_name" + nsr.description = "This is a description" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + nsr.input_parameter.extend(input_param_list) + nsr.cloud_account = cloud_account_name + + return nsr + + +def upload_descriptor( + logger, + descriptor_file, + scheme, + cert, + host="127.0.0.1", + endpoint="upload"): + curl_cmd = ('curl --cert {cert} --key {key} -F "descriptor=@{file}" -k ' + '{scheme}://{host}:4567/api/{endpoint}'.format( + cert=cert[0], + key=cert[1], + scheme=scheme, + endpoint=endpoint, + file=descriptor_file, + host=host, + )) + + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_onboard_transaction_finished( + logger, + transaction_id, + scheme, + cert, + timeout=600, + host="127.0.0.1", + endpoint="upload"): + logger.info("Waiting for onboard trans_id %s to complete", transaction_id) + uri = '%s://%s:4567/api/%s/%s/state' % (scheme, host, endpoint, transaction_id) + elapsed = 0 + start = time.time() + while elapsed < timeout: + reply = requests.get(uri, cert=cert, verify=False) + state = reply.json() + if state["status"] == "success": + break + + if state["status"] != "pending": + raise DescriptorOnboardError(state) + + time.sleep(1) + elapsed = time.time() - start + + if state["status"] != "success": + raise DescriptorOnboardError(state) + + logger.info("Descriptor onboard was successful") + + +def terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger): + """ + Terminate the instance and check if the record is deleted. + + Asserts: + 1. NSR record is deleted from instance-config. + + """ + logger.debug("Terminating Ping Pong NSR") + + nsr_path = "/ns-instance-config" + nsr = rwnsr_proxy.get_config(nsr_path) + ping_pong = nsr.nsr[0] + + xpath = "/ns-instance-config/nsr[id='{}']".format(ping_pong.id) + rwnsr_proxy.delete_config(xpath) + + time.sleep(30) + nsr = rwnsr_proxy.get_config(xpath) + assert nsr is None + + # Termination tests + vnfr = "/vnfr-catalog/vnfr" + vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True) + assert vnfrs is None or len(vnfrs.vnfr) == 0 + + # nsr = "/ns-instance-opdata/nsr" + # nsrs = rwnsr_proxy.get(nsr, list_obj=True) + # assert len(nsrs.nsr) == 0 + + +@pytest.fixture(scope='module', params=["upload", "update"]) +def endpoint(request): + """A simple fixture, which in combination with the incremental marker, lets + the ENTIRE TestPingPongStart class to be run twice in order. + """ + return request.param + + +@pytest.mark.setup('pingpong') +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestPingPongStart(object): + """A brief overview of the steps performed. + 1. Generate & on-board new descriptors + 2. Start & stop the ping pong NSR + 3. Update the exiting descriptor files. + 4. Start the ping pong NSR. + """ + def generate_tar_files(self, tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd): + """Converts the descriptor to files and package them into zip files + that can be uploaded to LP instance. + + Args: + tmpdir (string): Full path where the zipped files should be + ping_vnfd (VirtualNetworkFunction): Ping VNFD data + pong_vnfd (VirtualNetworkFunction): Pong VNFD data + ping_pong_nsd (NetworkService): PingPong NSD data + + Returns: + Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd + """ + rift_build = os.environ['RIFT_BUILD'] + MANO_DIR = os.path.join( + rift_build, + "modules/core/mano/src/core_mano-build/examples/ping_pong_ns") + ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2") + pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2") + + """ grab cached copies of these files if not found. They may not exist + because our git submodule dependency mgmt + will not populate these because they live in .build, not .install + """ + if not os.path.exists(ping_img): + ping_img = os.path.join( + os.environ['RIFT_ROOT'], + 'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2') + pong_img = os.path.join( + os.environ['RIFT_ROOT'], + 'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2') + + for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]: + descriptor.write_to_file(output_format='xml', outdir=tmpdir.name) + + ping_img_path = os.path.join(tmpdir.name, "{}/images/".format(ping_vnfd.name)) + pong_img_path = os.path.join(tmpdir.name, "{}/images/".format(pong_vnfd.name)) + os.makedirs(ping_img_path) + os.makedirs(pong_img_path) + + shutil.copy(ping_img, ping_img_path) + shutil.copy(pong_img, pong_img_path) + + for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]: + subprocess.call([ + "sh", + "{}/bin/generate_descriptor_pkg.sh".format(os.environ['RIFT_ROOT']), + tmpdir.name, + dir_name]) + + return (os.path.join(tmpdir.name, "{}.tar.gz".format(ping_vnfd.name)), + os.path.join(tmpdir.name, "{}.tar.gz".format(pong_vnfd.name)), + os.path.join(tmpdir.name, "{}.tar.gz".format(ping_pong_nsd.name))) + + def onboard_descriptor(self, host, file_name, logger, endpoint, scheme, cert): + """On-board/update the descriptor. + + Args: + host (str): Launchpad IP + file_name (str): Full file path. + logger: Logger instance + endpoint (str): endpoint to be used for the upload operation. + + """ + logger.info("Onboarding package: %s", file_name) + trans_id = upload_descriptor( + logger, + file_name, + scheme, + cert, + host=host, + endpoint=endpoint) + wait_onboard_transaction_finished( + logger, + trans_id, + scheme, + cert, + host=host, + endpoint=endpoint) + + def test_onboard_descriptors( + self, + logger, + vnfd_proxy, + nsd_proxy, + launchpad_session, + scheme, + cert, + ping_pong_records, + endpoint): + """Generates & On-boards the descriptors. + """ + ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records + + # if update_mode: + # for vnfd_record in [ping_vnfd, pong_vnfd]: + # vnfd_record.descriptor.vnfd[0].description += "_update" + # ping_pong_nsd.descriptor.nsd[0].description += "_update" + + tmpdir = tempfile.TemporaryDirectory() + + ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \ + self.generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd) + + for file_name in [ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file]: + self.onboard_descriptor( + launchpad_session.host, + file_name, + logger, + endpoint, + scheme, + cert) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should two vnfds" + assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name] + assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name] + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + assert nsds[0].name == "ping_pong_nsd" + + def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account_name, endpoint): + + def verify_input_parameters(running_config, config_param): + """ + Verify the configured parameter set against the running configuration + """ + for run_input_param in running_config.input_parameter: + if (run_input_param.xpath == config_param.xpath and + run_input_param.value == config_param.value): + return True + + assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} " + "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath, + config_param.value, + running_config.input_parameter)) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + input_parameters = [] + descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id + descr_value = "New NSD Description" + in_param_id = str(uuid.uuid4()) + + input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath=descr_xpath, + value=descr_value) + + input_parameters.append(input_param_1) + + nsr = create_nsr(nsd.id, input_parameters, cloud_account_name) + + logger.info("Instantiating the Network Service") + rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id)) + assert nsr_opdata is not None + + # Verify the input parameter configuration + running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id) + for input_param in input_parameters: + verify_input_parameters(running_config, input_param) + + def test_wait_for_pingpong_started(self, rwnsr_proxy, endpoint): + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + + assert len(nsrs) == 1 + current_nsr = nsrs[0] + + xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(current_nsr.ns_instance_config_ref) + rwnsr_proxy.wait_for(xpath, "running", timeout=120) + + def test_stop_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, endpoint): + """If the package is being on-boarded, not updated, then terminate the + current NSR instance, as we will be triggering another instance + after updating the descriptor files. + """ + if endpoint == "upload": + terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger) + + +@pytest.mark.teardown('pingpong') +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestPingPongTeardown(object): + def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger): + """ + Terminate the instance and check if the record is deleted. + + Asserts: + 1. NSR record is deleted from instance-config. + + """ + logger.debug("Terminating Ping Pong NSR") + terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger) + + def test_delete_records(self, nsd_proxy, vnfd_proxy): + """Delete the NSD & VNFD records + + Asserts: + The records are deleted. + """ + nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True) + for nsd in nsds.nsd: + xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id) + nsd_proxy.delete_config(xpath) + + nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True) + assert nsds is None or len(nsds.nsd) == 0 + + vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True) + for vnfd_record in vnfds.vnfd: + xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id) + vnfd_proxy.delete_config(xpath) + + vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True) + assert vnfds is None or len(vnfds.vnfd) == 0 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf_static.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf_static.py new file mode 100644 index 0000000..012a500 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf_static.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 11/03/2015 +@brief Launchpad System Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import rift.auto.session + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +from gi.repository import RwMcYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwLaunchpadYang, RwBaseYang + +logging.basicConfig(level=logging.DEBUG) + + +@pytest.fixture(scope='module') +def mc_proxy(request, mgmt_session): + return mgmt_session.proxy(RwMcYang) + + +@pytest.fixture(scope='module') +def launchpad_session(request, mc_proxy, mgmt_domain_name, session_type): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + + if session_type == 'netconf': + launchpad_session = rift.auto.session.NetconfSession(host=launchpad_host) + elif session_type == 'restconf': + launchpad_session = rift.auto.session.RestconfSession(host=launchpad_host) + + launchpad_session.connect() + rift.vcs.vcs.wait_until_system_started(launchpad_session) + return launchpad_session + + +@pytest.fixture(scope='module') +def launchpad_proxy(request, launchpad_session): + return launchpad_session.proxy(RwLaunchpadYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, launchpad_session): + return launchpad_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, launchpad_session): + return launchpad_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, launchpad_session): + return launchpad_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, launchpad_session): + return launchpad_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, launchpad_session): + return launchpad_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, launchpad_session): + return launchpad_session.proxy(RwBaseYang) + + +def create_nsr_from_nsd_id(nsd_id): + nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "nsr_name" + nsr.short_name = "nsr_short_name" + nsr.description = "This is a description" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + nsr.cloud_account = "cloud_account_name" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_onboard_transaction_finished(logger, transaction_id, timeout=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", transaction_id) + uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id) + elapsed = 0 + start = time.time() + while elapsed < timeout: + reply = requests.get(uri) + state = reply.json() + if state["status"] == "success": + break + + if state["status"] != "pending": + raise DescriptorOnboardError(state) + + time.sleep(1) + elapsed = time.time() - start + + if state["status"] != "success": + raise DescriptorOnboardError(state) + + logger.info("Descriptor onboard was successful") + + + +@pytest.mark.setup('pingpong') +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestPingPongStart(object): + def test_configure_pools(self, mc_proxy, vm_pool_name, network_pool_name): + vm_pool = mc_proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in vm_pool.available] + + assert len(available_ids) >= 2 + + network_pool = mc_proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + available_ids = [network.id for network in network_pool.available] + assert len(available_ids) >= 3 + + vm_pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'assigned':[ + {'id':available_ids[0]}, + {'id':available_ids[1]}, + ]}) + + mc_proxy.merge_config( + "/vm-pool/pool[name='%s']" % vm_pool_name, + vm_pool_config) + + network_pool_config = RwMcYang.NetworkPool.from_dict({ + 'name':network_pool_name, + 'assigned':[ + {'id':available_ids[0]}, + {'id':available_ids[1]}, + {'id':available_ids[2]}, + ]}) + mc_proxy.merge_config( + "/network-pool/pool[name='%s']" % network_pool_name, + network_pool_config) + + def test_restart_launchpad(self, mc_proxy, mgmt_domain_name, launchpad_session, launchpad_scraper): + mc_proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = mc_proxy.rpc(stop_launchpad_input) + + mc_proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=60, + fail_on=['crashed']) + + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = mc_proxy.rpc(start_launchpad_input) + mc_proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=200, + fail_on=['crashed']) + + rift.vcs.vcs.wait_until_system_started(launchpad_session) + launchpad_scraper.reset() + + def test_onboard_ping_vnfd(self, logger, mc_proxy, mgmt_domain_name, vnfd_proxy, ping_vnfd_package_file): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + logger.info("Onboarding ping_vnfd package: %s", ping_vnfd_package_file) + trans_id = upload_descriptor(logger, ping_vnfd_package_file, launchpad_host) + wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "rw_ping_vnfd" + + def test_onboard_pong_vnfd(self, logger, mc_proxy, mgmt_domain_name, vnfd_proxy, pong_vnfd_package_file): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + logger.info("Onboarding pong_vnfd package: %s", pong_vnfd_package_file) + trans_id = upload_descriptor(logger, pong_vnfd_package_file, launchpad_host) + wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "rw_pong_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ping_pong_nsd(self, logger, mc_proxy, mgmt_domain_name, nsd_proxy, ping_pong_nsd_package_file): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + logger.info("Onboarding ping_pong_nsd package: %s", ping_pong_nsd_package_file) + trans_id = upload_descriptor(logger, ping_pong_nsd_package_file, launchpad_host) + wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "rw_ping_pong_nsd" + + def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + rwnsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id + + logger.info("Waiting up to 120 seconds for ping and pong components to show " + "up in show vcs info") + + start_time = time.time() + while (time.time() - start_time) < 120: + vcs_info = base_proxy.get('/vcs/info') + components = vcs_info.components.component_info + + def find_component_by_name(name): + for component in components: + if name in component.component_name: + return component + + logger.warning("Did not find %s component name in show vcs info", + name) + + return None + + ping_vm_component = find_component_by_name( + "rw_ping_vnfd:rwping_vm" + ) + if ping_vm_component is None: + continue + + pong_vm_component = find_component_by_name( + "rw_pong_vnfd:rwpong_vm" + ) + if pong_vm_component is None: + continue + + ping_proc_component = find_component_by_name( + "rw_ping_vnfd:rwping_proc" + ) + if ping_proc_component is None: + continue + + pong_proc_component = find_component_by_name( + "rw_pong_vnfd:rwpong_proc" + ) + if pong_proc_component is None: + continue + + ping_tasklet_component = find_component_by_name( + "rw_ping_vnfd:rwping_tasklet" + ) + if ping_tasklet_component is None: + continue + + pong_tasklet_component = find_component_by_name( + "rw_pong_vnfd:rwpong_tasklet" + ) + if pong_tasklet_component is None: + continue + + logger.info("TEST SUCCESSFUL: All ping and pong components were found in show vcs info") + break + + else: + assert False, "Did not find all ping and pong component in time" \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_records.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_records.py new file mode 100755 index 0000000..266b1dd --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_records.py @@ -0,0 +1,357 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import socket +import subprocess +import time + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +from gi.repository import ( + NsdYang, + RwConmanYang, + RwMcYang, + RwNsrYang, + VlrYang, + RwVlrYang, + RwVnfdYang, + RwVnfrYang, + VnfrYang + ) +import rift.auto.session +import rift.mano.examples.ping_pong_nsd as ping_pong + + +@pytest.fixture(scope='module') +def proxy(request, launchpad_session): + return launchpad_session.proxy + + +def yield_vnfd_vnfr_pairs(proxy, nsr=None): + """ + Yields tuples of vnfd & vnfr entries. + + Args: + proxy (callable): Launchpad proxy + nsr (optional): If specified, only the vnfr & vnfd records of the NSR + are returned + + Yields: + Tuple: VNFD and its corresponding VNFR entry + """ + def get_vnfd(vnfd_id): + xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id) + return proxy(RwVnfdYang).get(xpath) + + vnfr = "/vnfr-catalog/vnfr" + vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True) + for vnfr in vnfrs.vnfr: + + if nsr and vnfr.id not in nsr.constituent_vnfr_ref: + continue + + vnfd = get_vnfd(vnfr.vnfd_ref) + yield vnfd, vnfr + + +def yield_nsd_nsr_pairs(proxy): + """Yields tuples of NSD & NSR pairs + + Args: + proxy (callable): Launchpad proxy + + Yields: + Tuple: NSD and its corresponding NSR record + """ + nsr = "/ns-instance-opdata/nsr" + nsrs = proxy(RwNsrYang).get(nsr, list_obj=True) + for nsr in nsrs.nsr: + nsd_path = "/ns-instance-config/nsr[id='{}']".format( + nsr.ns_instance_config_ref) + nsd = proxy(RwNsrYang).get_config(nsd_path) + + yield nsd, nsr + + +def assert_records(proxy): + """Verifies if the NSR & VNFR records are created + """ + ns_tuple = list(yield_nsd_nsr_pairs(proxy)) + assert len(ns_tuple) == 1 + + vnf_tuple = list(yield_vnfd_vnfr_pairs(proxy)) + assert len(vnf_tuple) == 2 + + +@pytest.mark.depends('pingpong') +@pytest.mark.incremental +class TestRecords(object): + def is_valid_ip(self, address): + """Verifies if it is a valid IP and if its accessible + + Args: + address (str): IP address + + Returns: + boolean + """ + try: + socket.inet_aton(address) + except socket.error: + return False + else: + return True + + def test_records_present(self, proxy): + assert_records(proxy) + + def test_vdu_record_params(self, proxy): + """ + Asserts: + 1. If a valid floating IP has been assigned to the VM + 3. Check if the VM flavor has been copied over the VDUR + """ + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy): + assert vnfd.mgmt_interface.port == vnfr.mgmt_interface.port + + for vdud, vdur in zip(vnfd.vdu, vnfr.vdur): + assert vdud.vm_flavor == vdur.vm_flavor + assert self.is_valid_ip(vdur.management_ip) is True + assert vdud.external_interface[0].vnfd_connection_point_ref == \ + vdur.external_interface[0].vnfd_connection_point_ref + + def test_external_vl(self, proxy): + """ + Asserts: + 1. Valid IP for external connection point + 2. A valid external network fabric + 3. Connection point names are copied over + """ + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy): + cp_des, cp_rec = vnfd.connection_point, vnfr.connection_point + assert cp_des[0].name == cp_rec[0].name + assert self.is_valid_ip(cp_rec[0].ip_address) is True + + xpath = "/vlr-catalog/vlr[id='{}']/network-id".format(cp_rec[0].vlr_ref) + network_id = proxy(VlrYang).get(xpath) + assert len(network_id) > 0 + + def test_monitoring_params(self, proxy): + """ + Asserts: + 1. The value counter ticks? + 2. If the meta fields are copied over + """ + def mon_param_record(vnfr_id, mon_param_id): + return '/vnfr-catalog/vnfr[id="{}"]/monitoring-param[id="{}"]'.format( + vnfr_id, mon_param_id) + + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy): + for mon_des in (vnfd.monitoring_param): + mon_rec = mon_param_record(vnfr.id, mon_des.id) + mon_rec = proxy(VnfrYang).get(mon_rec) + + # Meta data check + fields = mon_des.as_dict().keys() + for field in fields: + assert getattr(mon_des, field) == getattr(mon_rec, field) + # Tick check + #assert mon_rec.value_integer > 0 + + def test_nsr_record(self, proxy): + """ + Currently we only test for the components of NSR tests. Ignoring the + operational-events records + + Asserts: + 1. The constituent components. + 2. Admin status of the corresponding NSD record. + """ + for nsd, nsr in yield_nsd_nsr_pairs(proxy): + # 1 n/w and 2 connection points + assert len(nsr.vlr) == 1 + assert len(nsr.vlr[0].vnfr_connection_point_ref) == 2 + + assert len(nsr.constituent_vnfr_ref) == 2 + assert nsd.admin_status == 'ENABLED' + + def test_create_update_vnfd(self, proxy, ping_pong_records): + """ + Verify VNFD related operations + + Asserts: + If a VNFD record is created + """ + ping_vnfd, pong_vnfd, _ = ping_pong_records + vnfdproxy = proxy(RwVnfdYang) + + for vnfd_record in [ping_vnfd, pong_vnfd]: + xpath = "/vnfd-catalog/vnfd" + vnfdproxy.create_config(xpath, vnfd_record.vnfd) + + xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id) + vnfd = vnfdproxy.get(xpath) + assert vnfd.id == vnfd_record.id + + vnfdproxy.replace_config(xpath, vnfd_record.vnfd) + + def test_create_update_nsd(self, proxy, ping_pong_records): + """ + Verify NSD related operations + + Asserts: + If NSD record was created + """ + _, _, ping_pong_nsd = ping_pong_records + nsdproxy = proxy(NsdYang) + + xpath = "/nsd-catalog/nsd" + nsdproxy.create_config(xpath, ping_pong_nsd.descriptor) + + xpath = "/nsd-catalog/nsd[id='{}']".format(ping_pong_nsd.id) + nsd = nsdproxy.get(xpath) + assert nsd.id == ping_pong_nsd.id + + nsdproxy.replace_config(xpath, ping_pong_nsd.descriptor) + + def test_wait_for_pingpong_configured(self, proxy): + nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + + assert len(nsrs) == 1 + current_nsr = nsrs[0] + + xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref) + proxy(RwNsrYang).wait_for(xpath, "configured", timeout=240) + + def test_cm_nsr(self, proxy): + """ + Asserts: + 1. The ID of the NSR in cm-state + 2. Name of the cm-nsr + 3. The vnfr component's count + 4. State of the cm-nsr + """ + for nsd, _ in yield_nsd_nsr_pairs(proxy): + con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsd.id) + con_data = proxy(RwConmanYang).get(con_nsr_xpath) + + assert con_data is not None, \ + "No Config data obtained for the nsd {}: {}".format( + nsd.name, nsd.id) + assert con_data.name == "ping_pong_nsd" + assert len(con_data.cm_vnfr) == 2 + + state_path = con_nsr_xpath + "/state" + proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120) + + def test_cm_vnfr(self, proxy): + """ + Asserts: + 1. The ID of Vnfr in cm-state + 2. Name of the vnfr + 3. State of the VNFR + 4. Checks for a reachable IP in mgmt_interface + 5. Basic checks for connection point and cfg_location. + """ + def is_reachable(ip): + rc = subprocess.call(["ping", "-c1", ip]) + if rc == 0: + return True + return False + + nsd, _ = list(yield_nsd_nsr_pairs(proxy))[0] + con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsd.id) + + for _, vnfr in yield_vnfd_vnfr_pairs(proxy): + con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id='{}']".format(vnfr.id) + con_data = proxy(RwConmanYang).get(con_vnfr_path) + + assert con_data is not None + + state_path = con_vnfr_path + "/state" + proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120) + + con_data = proxy(RwConmanYang).get(con_vnfr_path) + assert is_reachable(con_data.mgmt_interface.ip_address) is True + + assert len(con_data.connection_point) == 1 + connection_point = con_data.connection_point[0] + assert connection_point.name == vnfr.connection_point[0].name + assert connection_point.ip_address == vnfr.connection_point[0].ip_address + + assert con_data.cfg_location is not None + +@pytest.mark.depends('pingpong') +@pytest.mark.incremental +class TestNfviMetrics(object): + + def test_records_present(self, proxy): + assert_records(proxy) + + def test_nfvi_metrics(self, proxy): + """ + Verify the NFVI metrics + + Asserts: + 1. Computed metrics, such as memory, cpu, storage and ports, match + with the metrics in NSR record. The metrics are computed from the + descriptor records. + 2. Check if the 'utilization' field has a valid value (> 0) and matches + with the 'used' field, if available. + """ + for nsd, nsr in yield_nsd_nsr_pairs(proxy): + nfvi_metrics = nsr.nfvi_metrics + computed_metrics = collections.defaultdict(int) + + # Get the constituent VNF records. + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy, nsr): + vdu = vnfd.vdu[0] + vm_spec = vdu.vm_flavor + computed_metrics['vm'] += 1 + computed_metrics['memory'] += vm_spec.memory_mb * (10**6) + computed_metrics['storage'] += vm_spec.storage_gb * (10**9) + computed_metrics['vcpu'] += vm_spec.vcpu_count + computed_metrics['external_ports'] += len(vnfd.connection_point) + computed_metrics['internal_ports'] += len(vdu.internal_connection_point) + + assert nfvi_metrics.vm.active_vm == computed_metrics['vm'] + + # Availability checks + for metric_name in computed_metrics: + metric_data = getattr(nfvi_metrics, metric_name) + total_available = getattr(metric_data, 'total', None) + + if total_available is not None: + assert computed_metrics[metric_name] == total_available + + # Utilization checks + for metric_name in ['memory', 'storage', 'vcpu']: + metric_data = getattr(nfvi_metrics, metric_name) + + utilization = metric_data.utilization + # assert utilization > 0 + + # If used field is available, check if it matches with utilization! + total = metric_data.total + used = getattr(metric_data, 'used', None) + if used is not None: + assert total > 0 + computed_utilization = round((used/total) * 100, 2) + assert abs(computed_utilization - utilization) <= 0.1 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_startstop.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_startstop.py new file mode 100644 index 0000000..550bd47 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_startstop.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_startstop.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 12/17/2015 +@brief System test of launchpad start and stop functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestLaunchpadStartStop: + + @pytest.mark.feature('mission-control') + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + @pytest.mark.feature('mission-control') + def test_start_launchpad(self, proxy, mgmt_domain_name, launchpad_scraper): + '''Invoke start launchpad RPC + + Asserts: + Launchpad begins test in state 'stopped' + Launchpad finishes test in state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=10, + fail_on=['crashed']) + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = proxy.rpc(start_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=400, + fail_on=['crashed']) + launchpad_scraper.reset() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_lp_standalone_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_lp_standalone_systest_openstack.racfg new file mode 100644 index 0000000..79abeb8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_lp_standalone_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_LP_STANDALONE_OPENSTACK", + "commandline":"./pingpong_lp_standalone_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo --lp-standalone ", + "test_description":"System test for ping and pong vnf with standalone Launcpad (Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg new file mode 100644 index 0000000..6ad25f6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_RECORDS_OPENSTACK", + "commandline":"./pingpong_records_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for ping and pong vnf (Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg new file mode 100644 index 0000000..ce97a13 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK", + "commandline":"./pingpong_vnf_reload_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for ping pong vnf reload(Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2200, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg new file mode 100644 index 0000000..9dd3279 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_PINGPONG_VNF_CLOUDSIM", + "commandline":"./pingpong_vnf_systest", + "target_vm":"VM", + "test_description":"System test for ping and pong vnf", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"], + "timelimit": 1800, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg new file mode 100644 index 0000000..2371c9d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_VNF_OPENSTACK", + "commandline":"./pingpong_vnf_systest --cloud-type 'openstack' --cloud-host '10.66.4.115'", + "test_description":"System test for ping and pong vnf (Openstack)", + "run_as_root": false, + "status":"broken", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2200, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/test/CMakeLists.txt b/modules/core/mano/rwlaunchpad/test/CMakeLists.txt new file mode 100644 index 0000000..64017c2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/CMakeLists.txt @@ -0,0 +1,59 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Creation Date: 10/01/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS + launchpad.py + DESTINATION demos + COMPONENT ${PKG_LONG_NAME} + ) + +install( + FILES + racfg/launchpad_module_test.racfg + DESTINATION + usr/rift/systemtest/launchpad + COMPONENT ${PKG_LONG_NAME} + ) + +install( + FILES + pytest/lp_test.py + DESTINATION + usr/rift/systemtest/pytest/launchpad + COMPONENT ${PKG_LONG_NAME} + ) + +install( + PROGRAMS + launchpad_module_test + DESTINATION + usr/rift/systemtest/launchpad + COMPONENT ${PKG_LONG_NAME} + ) + +rift_py3test(rwmano_utest + TEST_ARGS + ${CMAKE_CURRENT_SOURCE_DIR}/mano_ut.py + ) + +#rift_py3test(rwmano_error_utest +# TEST_ARGS +# ${CMAKE_CURRENT_SOURCE_DIR}/mano_error_ut.py +# ) + +rift_py3test(utest_rwmonitor + TEST_ARGS + ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwmonitor.py + ) + +rift_py3test(utest_rwnsm + TEST_ARGS + ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwnsm.py + ) diff --git a/modules/core/mano/rwlaunchpad/test/juju_ut.py b/modules/core/mano/rwlaunchpad/test/juju_ut.py new file mode 100755 index 0000000..17bff54 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/juju_ut.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import logging +import os +import sys +import time +import unittest +import uuid + +import xmlrunner + +from gi.repository import ( + RwDts as rwdts, + RwLaunchpadYang as launchpadyang, + RwNsmYang as rwnsmyang, + RwCloudYang as rwcloudyang, + RwResourceMgrYang, + RwConfigAgentYang, + NsrYang + ) +import rift.tasklets +import rift.test.dts + +import mano_ut + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class OpenManoNsmTestCase(mano_ut.ManoTestCase): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + launchpad_build_dir = os.path.join( + cls.top_dir, + '.build/modules/core/mc/core_mc-build/rwlaunchpad' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwnsm'), + 'rwnsmtasklet' + ) + + cls.waited_for_tasklets = False + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @asyncio.coroutine + def wait_tasklets(self): + if not OpenManoNsmTestCase.waited_for_tasklets: + OpenManoNsmTestCase.waited_for_tasklets = True + self._wait_event = asyncio.Event(loop=self.loop) + yield from asyncio.sleep(5, loop=self.loop) + self._wait_event.set() + + yield from self._wait_event.wait() + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + yield from self.ping_pong.publish_desciptors( + num_external_vlrs, + num_internal_vlrs, + num_ping_vms + ) + + def unpublish_descriptors(self): + self.ping_pong.unpublish_descriptors() + + @asyncio.coroutine + def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20): + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + if len(nsrs) == 0: + continue + self.assertEqual(1, len(nsrs)) + if nsrs[0].operational_status in ['running', 'failed']: + return + + self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status) + yield from asyncio.sleep(2, loop=self.loop) + + self.assertIn(nsrs[0].operational_status, ['running', 'failed']) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @asyncio.coroutine + def configure_cloud_account(self, cloud_name="cloud1"): + account = rwcloudyang.CloudAccount() + account.name = cloud_name + account.account_type = "mock" + account.mock.username = "mock_user" + + account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name) + self.log.info("Configuring cloud-account: %s", account) + yield from self.dts.query_create(account_xpath, + rwdts.Flag.ADVISE | rwdts.Flag.TRACE, + account) + + @asyncio.coroutine + def configure_config_agent(self): + account_xpath = "C,/rw-config-agent:config-agent/account[name='Juju1 config']" + + juju1 = RwConfigAgentYang.ConfigAgentAccount.from_dict({ + "name": "Juju1 config", + "account_type": "juju", + "juju": { + "ip_address": "1.1.1.1", + "port": 9000, + "user": "foo", + "secret": "1232" + } + }) + + cfg_agt = RwConfigAgentYang.ConfigAgent() + cfg_agt.account.append(juju1) + cfg_agt.as_dict() + + yield from self.dts.query_create( + account_xpath, + rwdts.Flag.ADVISE, + juju1, + ) + + + @asyncio.coroutine + def configure_config_primitive(self, nsr_id): + job_data = NsrYang.YangInput_Nsr_ExecNsConfigPrimitive.from_dict({ + "name": "Add Corporation", + "nsr_id_ref": nsr_id, + "vnf_list": [{ + "vnfr_id_ref": "10631555-757e-4924-96e6-41a0297a9406", + "member_vnf_index_ref": 1, + "vnf_primitive": [{ + "name": "create-update-user", + "parameter": [ + {"name" : "number", "value": "1234334"}, + {"name" : "password", "value": "1234334"}, + ] + }] + }] + + }) + yield from self.dts.query_rpc( + "/nsr:exec-ns-config-primitive", + 0, + job_data, + ) + + @rift.test.dts.async_test + def test_ping_pong_nsm_instantiate(self): + yield from self.wait_tasklets() + yield from self.configure_cloud_account("mock_account") + yield from self.configure_config_agent() + yield from self.publish_desciptors(num_internal_vlrs=0) + + nsr_id = yield from self.ping_pong.publish_nsr_config("mock_account") + yield from asyncio.sleep(10, loop=self.loop) + + res_iter = yield from self.dts.query_read("D,/nsr:ns-instance-opdata/nsr:nsr") + for i in res_iter: + result = yield from i + + print ("**", result) + # yield from self.configure_config_primitive(nsr_id) + yield from asyncio.sleep(10, loop=self.loop) + + # nsrs = yield from self.querier.get_nsr_opdatas() + # nsr = nsrs[0] + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + OpenManoNsmTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() + +# vim: sw \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/launchpad.py b/modules/core/mano/rwlaunchpad/test/launchpad.py new file mode 100755 index 0000000..5387e1f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/launchpad.py @@ -0,0 +1,406 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import logging +import os +import resource +import socket +import sys +import subprocess +import shlex +import netifaces + +from rift.rwlib.util import certs +import rift.rwcal.cloudsim +import rift.rwcal.cloudsim.net +import rift.vcs +import rift.vcs.core as core +import rift.vcs.demo +import rift.vcs.vms + +import rift.rwcal.cloudsim +import rift.rwcal.cloudsim.net + +from rift.vcs.ext import ClassProperty + +logger = logging.getLogger(__name__) + + +class NsmTasklet(rift.vcs.core.Tasklet): + """ + This class represents a network services manager tasklet. + """ + + def __init__(self, name='network-services-manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a NsmTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(NsmTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet') + plugin_name = ClassProperty('rwnsmtasklet') + + +class VnsTasklet(rift.vcs.core.Tasklet): + """ + This class represents a network services manager tasklet. + """ + + def __init__(self, name='virtual-network-service', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a VnsTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(VnsTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet') + plugin_name = ClassProperty('rwvnstasklet') + + +class VnfmTasklet(rift.vcs.core.Tasklet): + """ + This class represents a virtual network function manager tasklet. + """ + + def __init__(self, name='virtual-network-function-manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a VnfmTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(VnfmTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet') + plugin_name = ClassProperty('rwvnfmtasklet') + + +class ResMgrTasklet(rift.vcs.core.Tasklet): + """ + This class represents a Resource Manager tasklet. + """ + + def __init__(self, name='Resource-Manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a ResMgrTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(ResMgrTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet') + plugin_name = ClassProperty('rwresmgrtasklet') + + +class MonitorTasklet(rift.vcs.core.Tasklet): + """ + This class represents a tasklet that is used to monitor NFVI metrics. + """ + + def __init__(self, name='nfvi-metrics-monitor', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a MonitorTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + + """ + super(MonitorTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor') + plugin_name = ClassProperty('rwmonitor') + + +def get_ui_ssl_args(): + """Returns the SSL parameter string for launchpad UI processes""" + + try: + use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key() + except certs.BootstrapSslMissingException: + logger.error('No bootstrap certificates found. Disabling UI SSL') + use_ssl = False + + # If we're not using SSL, no SSL arguments are necessary + if not use_ssl: + return "" + + return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path) + + +class UIServer(rift.vcs.NativeProcess): + def __init__(self, name="RW.MC.UI", + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + super(UIServer, self).__init__( + name=name, + exe="./usr/share/rw.ui/webapp/scripts/launch_ui.sh", + config_ready=config_ready, + recovery_action=recovery_action, + ) + + @property + def args(self): + return get_ui_ssl_args() + + +class ComposerUI(rift.vcs.NativeProcess): + def __init__(self, name="RW.COMPOSER.UI", + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + super(ComposerUI, self).__init__( + name=name, + exe="./usr/share/composer/scripts/launch_composer.sh", + config_ready=config_ready, + recovery_action=recovery_action, + ) + + @property + def args(self): + return get_ui_ssl_args() + + +class ConfigManagerTasklet(rift.vcs.core.Tasklet): + """ + This class represents a Resource Manager tasklet. + """ + + def __init__(self, name='Configuration-Manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a ConfigManagerTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(ConfigManagerTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet') + plugin_name = ClassProperty('rwconmantasklet') + + +class Demo(rift.vcs.demo.Demo): + def __init__(self, with_cntr_mgr=False): + + procs = [ + rift.vcs.RiftCli(), + rift.vcs.uAgentTasklet(), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + rift.vcs.RestconfTasklet(), + rift.vcs.Watchdog(), + rift.vcs.RestPortForwardTasklet(), + rift.vcs.CalProxy(), + MonitorTasklet(), + NsmTasklet(), + #VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value,), + VnsTasklet(), + MonitorTasklet(), + UIServer(), + ComposerUI(), + ConfigManagerTasklet(), + rift.vcs.Launchpad(), + ResMgrTasklet(), + ] + + restart_procs = [ + VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value,), + ] + if with_cntr_mgr: + procs.append(rift.vcs.ContainerManager()) + + super(Demo, self).__init__( + # Construct the system. This system consists of 1 cluster in 1 + # colony. The master cluster houses CLI and management VMs + sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + name='master', + clusters=[ + rift.vcs.VirtualMachine( + name='vm-launchpad', + ip='127.0.0.1', + procs=procs, + restart_procs=restart_procs, + ), + ] + ) + ] + ), + + # Define the generic portmap. + port_map = {}, + + # Define a mapping from the placeholder logical names to the real + # port names for each of the different modes supported by this demo. + port_names = { + 'ethsim': { + }, + 'pci': { + } + }, + + # Define the connectivity between logical port names. + port_groups = {}, + ) + + +def clear_salt_keys(): + # clear all the previously installed salt keys + logger.info("Removing all unconnected salt keys") + stdout = subprocess.check_output( + shlex.split('salt-run manage.down'), + universal_newlines=True, + ) + + down_minions = stdout.splitlines() + + for line in down_minions: + salt_id = line.strip().replace("- ", "") + logger.info("Removing old unconnected salt id: %s", salt_id) + minion_keys_stdout = subprocess.check_output( + shlex.split('salt-key -f {}'.format(salt_id)), + universal_newlines=True) + + minion_keys = minion_keys_stdout.splitlines() + for key_line in minion_keys: + if "Keys" in key_line: + continue + + key_split = key_line.split(":") + if len(key_split) < 2: + continue + + key = key_split[0] + + # Delete the minion key + logger.info("Deleting minion %s key: %s", salt_id, key) + subprocess.check_call(shlex.split('salt-key -d {} -y'.format(key))) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s') + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser() + + parser.add_argument( + '--with-cntr-mgr', + action='store_true', + help='Enable the container manager tasklet' + ) + + args = parser.parse_args(argv) + + # Disable loading any kernel modules for the launchpad VM + # since it doesn't need it and it will fail within containers + os.environ["NO_KERNEL_MODS"] = "1" + + if args.with_cntr_mgr: + # In order to reliably module test, the virbr0 bridge + # with IP 192.168.122.1 must exist before we start executing. + # This is because in expanded mode, we need to use a container + # accessible IP address for zookeeper clients. + rift.rwcal.cloudsim.net.virsh_initialize_default() + clear_salt_keys() + + # Remove the persistant DTS recovery files + for f in os.listdir(os.environ["INSTALLDIR"]): + if f.endswith(".db"): + os.remove(os.path.join(os.environ["INSTALLDIR"], f)) + + #load demo info and create Demo object + demo = Demo(args.with_cntr_mgr) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, + northbound_listing="cli_rwmc_schema_listing.txt") + + confd_ip = socket.gethostbyname(socket.gethostname()) + intf = netifaces.ifaddresses('eth0') + if intf and netifaces.AF_INET in intf and len(intf[netifaces.AF_INET]): + confd_ip = intf[netifaces.AF_INET][0]['addr'] + rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip) + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) ) + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/launchpad_module_test b/modules/core/mano/rwlaunchpad/test/launchpad_module_test new file mode 100755 index 0000000..40b1e34 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/launchpad_module_test @@ -0,0 +1,43 @@ +#!/bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2015/10/15 +# +# Helper script for invoking the Launchpad module test using the systest_wrapper + +set -o nounset +set -u +set -e + +THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PYTEST_DIR="${RIFT_INSTALL}/usr/rift/systemtest/pytest" +SYSTEM_TEST_UTIL_DIR="${RIFT_INSTALL}/usr/rift/systemtest/util" +DEMO_DIR=$RIFT_INSTALL/demos +DEMO_TEST_DIR=$DEMO_DIR/tests + +TEST_NAME="TC_LAUNCHPAD_MODTEST_0100" +SCRIPT_SYSTEM="${RIFT_INSTALL}/demos/launchpad.py" +SCRIPT_TEST="py.test -s -v ${PYTEST_DIR}/launchpad/lp_test.py" +RESULT_XML="launchpad_modtest.xml" +wait_system=1000 + +up_cmd="$SYSTEM_TEST_UTIL_DIR/wait_until_system_started.py \ + --max-wait $wait_system" + +system_args="\ + --mode ethsim \ + --ip-list=\"192.168.122.1\" \ + --with-cntr-mgr" + +test_args="\ + --junitprefix ${TEST_NAME} \ + --junitxml ${RIFT_MODULE_TEST}/${RESULT_XML}" + +echo "Executing Launchpad Module test" + +# We must be in the pytest install directory to correctly include +# conftest.py +cd ${PYTEST_DIR} + +${SYSTEM_TEST_UTIL_DIR}/systest_wrapper.sh --system_cmd "${SCRIPT_SYSTEM} ${system_args}" --up_cmd "${up_cmd}" --test_cmd "${SCRIPT_TEST} ${test_args}" diff --git a/modules/core/mano/rwlaunchpad/test/mano_error_ut.py b/modules/core/mano/rwlaunchpad/test/mano_error_ut.py new file mode 100755 index 0000000..616597f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/mano_error_ut.py @@ -0,0 +1,904 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import logging +import os +import sys +import time +import unittest +import uuid + +import xmlrunner + +import gi.repository.RwDts as rwdts +import gi.repository.RwNsmYang as rwnsmyang +import gi.repository.RwResourceMgrYang as RwResourceMgrYang +import gi.repository.RwLaunchpadYang as launchpadyang +import rift.tasklets +import rift.test.dts + +import mano_ut + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class OutOfResourceError(Exception): + pass + + +class ComputeResourceRequestMockEventHandler(object): + def __init__(self): + self._pool_name = "vm_pool" + self._vdu_id = str(uuid.uuid4()) + self._vdu_info = { + "vdu_id": self._vdu_id, + "state": "active", + "management_ip": "1.1.1.1", + "public_ip": "1.1.1.1", + "connection_points": [], + } + + self._resource_state = "active" + + self._event_id = None + self._request_info = None + + def allocate(self, event_id, request_info): + self._event_id = event_id + self._request_info = request_info + + self._vdu_info.update({ + "name": self._request_info.name, + "flavor_id": self._request_info.flavor_id, + "image_id": self._request_info.image_id, + }) + + for cp in request_info.connection_points: + info_cp = dict( + name=cp.name, + virtual_link_id=cp.virtual_link_id, + vdu_id=self._vdu_id, + state="active", + ip_address="1.2.3.4", + ) + info_cp = self._vdu_info["connection_points"].append(info_cp) + + @property + def event_id(self): + return self._event_id + + @property + def resource_state(self): + return self._resource_state + + def set_active(self): + self._resource_state = "active" + + def set_failed(self): + self._resource_state = "failed" + + def set_pending(self): + self._resource_state = "pending" + + @property + def response_msg(self): + resource_info = dict( + pool_name=self._pool_name, + resource_state=self.resource_state, + ) + resource_info.update(self._vdu_info) + + response = RwResourceMgrYang.VDUEventData.from_dict(dict( + event_id=self._event_id, + request_info=self._request_info.as_dict(), + resource_info=resource_info, + )) + + return response.resource_info + + +class NetworkResourceRequestMockEventHandler(object): + def __init__(self): + self._pool_name = "network_pool" + self._link_id = str(uuid.uuid4()) + self._link_info = { + "virtual_link_id": self._link_id, + "state": "active", + } + + self._resource_state = "active" + + self._event_id = None + self._request_info = None + + def allocate(self, event_id, request_info): + self._event_id = event_id + self._request_info = request_info + + self._link_info.update({ + "name": self._request_info.name, + "subnet": self._request_info.subnet, + }) + + @property + def event_id(self): + return self._event_id + + @property + def resource_state(self): + return self._resource_state + + def set_active(self): + self._resource_state = "active" + + def set_failed(self): + self._resource_state = "failed" + + def set_pending(self): + self._resource_state = "pending" + + @property + def response_msg(self): + resource_info = dict( + pool_name=self._pool_name, + resource_state=self.resource_state, + ) + resource_info.update(self._link_info) + + response = RwResourceMgrYang.VirtualLinkEventData.from_dict(dict( + event_id=self._event_id, + request_info=self._request_info.as_dict(), + resource_info=resource_info, + )) + + return response.resource_info + + +class ResourceMgrMock(object): + VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data" + VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data" + + def __init__(self, dts, log, loop): + self._log = log + self._dts = dts + self._loop = loop + self._vdu_reg = None + self._link_reg = None + + self._vdu_reg_event = asyncio.Event(loop=self._loop) + self._link_reg_event = asyncio.Event(loop=self._loop) + + self._available_compute_handlers = [] + self._available_network_handlers = [] + + self._used_compute_handlers = {} + self._used_network_handlers = {} + + self._compute_allocate_requests = 0 + self._network_allocate_requests = 0 + + self._registered = False + + def _allocate_virtual_compute(self, event_id, request_info): + self._compute_allocate_requests += 1 + + if not self._available_compute_handlers: + raise OutOfResourceError("No more compute handlers") + + handler = self._available_compute_handlers.pop() + handler.allocate(event_id, request_info) + self._used_compute_handlers[event_id] = handler + + return handler.response_msg + + def _allocate_virtual_network(self, event_id, request_info): + self._network_allocate_requests += 1 + + if not self._available_network_handlers: + raise OutOfResourceError("No more network handlers") + + handler = self._available_network_handlers.pop() + handler.allocate(event_id, request_info) + self._used_network_handlers[event_id] = handler + + return handler.response_msg + + def _release_virtual_network(self, event_id): + del self._used_network_handlers[event_id] + + def _release_virtual_compute(self, event_id): + del self._used_compute_handlers[event_id] + + def _read_virtual_network(self, event_id): + return self._used_network_handlers[event_id].response_msg + + def _read_virtual_compute(self, event_id): + return self._used_compute_handlers[event_id].response_msg + + @asyncio.coroutine + def on_link_request_prepare(self, xact_info, action, ks_path, request_msg): + if not self._registered: + self._log.error("Got a prepare callback when not registered!") + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + self._log.debug("Received virtual-link on_prepare callback (self: %s, xact_info: %s, action: %s): %s", + self, xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VirtualLinkEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = self._allocate_virtual_network( + pathentry.key00.event_id, + request_msg.request_info, + ) + + elif action == rwdts.QueryAction.DELETE: + self._release_virtual_network(pathentry.key00.event_id) + + elif action == rwdts.QueryAction.READ: + response_info = self._read_virtual_network( + pathentry.key00.event_id + ) + else: + raise ValueError("Only read/create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + @asyncio.coroutine + def on_vdu_request_prepare(self, xact_info, action, ks_path, request_msg): + if not self._registered: + self._log.error("Got a prepare callback when not registered!") + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + @asyncio.coroutine + def monitor_vdu_state(response_xpath, pathentry): + self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath) + loop_cnt = 120 + while loop_cnt > 0: + self._log.debug("VDU state monitoring: Sleeping for 1 second ") + yield from asyncio.sleep(1, loop = self._loop) + try: + response_info = self._read_virtual_compute( + pathentry.key00.event_id + ) + except Exception as e: + self._log.error( + "VDU state monitoring: Received exception %s " + "in VDU state monitoring for %s. Aborting monitoring", + str(e), response_xpath + ) + raise + + if response_info.resource_state == 'active' or response_info.resource_state == 'failed': + self._log.info( + "VDU state monitoring: VDU reached terminal state." + "Publishing VDU info: %s at path: %s", + response_info, response_xpath + ) + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + else: + loop_cnt -= 1 + + ### End of while loop. This is only possible if VDU did not reach active state + self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring", + response_xpath) + response_info = RwResourceMgrYang.VDUEventData_ResourceInfo() + response_info.resource_state = 'failed' + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + + self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VDUEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = self._allocate_virtual_compute( + pathentry.key00.event_id, + request_msg.request_info, + ) + if response_info.resource_state == 'pending': + asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry), + loop = self._loop) + + elif action == rwdts.QueryAction.DELETE: + self._release_virtual_compute( + pathentry.key00.event_id + ) + + elif action == rwdts.QueryAction.READ: + response_info = self._read_virtual_compute( + pathentry.key00.event_id + ) + else: + raise ValueError("Only create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VDUInfo at xpath %s: %s", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_request_ready(registration, status): + self._log.debug("Got request ready event (registration: %s) (status: %s)", + registration, status) + + if registration == self._link_reg: + self._link_reg_event.set() + elif registration == self._vdu_reg: + self._vdu_reg_event.set() + else: + self._log.error("Unknown registration ready event: %s", registration) + + + with self._dts.group_create() as group: + self._log.debug("Registering for Link Resource Request using xpath: %s", + ResourceMgrMock.VLINK_REQUEST_XPATH) + + self._link_reg = group.register( + xpath=ResourceMgrMock.VLINK_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_prepare=self.on_link_request_prepare), + flags=rwdts.Flag.PUBLISHER) + + self._log.debug("Registering for VDU Resource Request using xpath: %s", + ResourceMgrMock.VDU_REQUEST_XPATH) + + self._vdu_reg = group.register( + xpath=ResourceMgrMock.VDU_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_prepare=self.on_vdu_request_prepare), + flags=rwdts.Flag.PUBLISHER) + + self._registered = True + + def unregister(self): + self._link_reg.deregister() + self._vdu_reg.deregister() + self._registered = False + + @asyncio.coroutine + def wait_ready(self, timeout=5): + self._log.debug("Waiting for all request registrations to become ready.") + yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()], + timeout=timeout, loop=self._loop) + + def create_compute_mock_event_handler(self): + handler = ComputeResourceRequestMockEventHandler() + self._available_compute_handlers.append(handler) + + return handler + + def create_network_mock_event_handler(self): + handler = NetworkResourceRequestMockEventHandler() + self._available_network_handlers.append(handler) + + return handler + + @property + def num_compute_requests(self): + return self._compute_allocate_requests + + @property + def num_network_requests(self): + return self._network_allocate_requests + + @property + def num_allocated_compute_resources(self): + return len(self._used_compute_handlers) + + @property + def num_allocated_network_resources(self): + return len(self._used_network_handlers) + + +@unittest.skip('failing and needs rework') +class ManoErrorTestCase(rift.test.dts.AbstractDTSTest): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + launchpad_build_dir = os.path.join( + cls.top_dir, + '.build/modules/core/mc/core_mc-build/rwlaunchpad' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwvns'), + 'rwvnstasklet' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwvnfm'), + 'rwvnfmtasklet' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwnsm'), + 'rwnsmtasklet' + ) + + cls.waited_for_tasklets = False + + @asyncio.coroutine + def register_mock_res_mgr(self): + self.res_mgr = ResourceMgrMock( + self.dts, + self.log, + self.loop, + ) + yield from self.res_mgr.register() + + self.log.info("Waiting for resource manager to be ready") + yield from self.res_mgr.wait_ready() + + def unregister_mock_res_mgr(self): + self.res_mgr.unregister() + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @asyncio.coroutine + def wait_tasklets(self): + if not ManoErrorTestCase.waited_for_tasklets: + yield from asyncio.sleep(5, loop=self.loop) + ManoErrorTestCase.waited_for_tasklets = True + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + yield from self.ping_pong.publish_desciptors( + num_external_vlrs, + num_internal_vlrs, + num_ping_vms + ) + + def unpublish_descriptors(self): + self.ping_pong.unpublish_descriptors() + + @asyncio.coroutine + def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20): + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + self.assertEqual(1, len(nsrs)) + if nsrs[0].operational_status in ['running', 'failed']: + return + + self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status) + yield from asyncio.sleep(2, loop=self.loop) + + self.assertIn(nsrs[0].operational_status, ['running', 'failed']) + + def verify_number_compute_requests(self, num_requests): + self.assertEqual(num_requests, self.res_mgr.num_compute_requests) + + def verify_number_network_requests(self, num_requests): + self.assertEqual(num_requests, self.res_mgr.num_network_requests) + + def verify_number_allocated_compute(self, num_allocated): + self.assertEqual(num_allocated, self.res_mgr.num_allocated_compute_resources) + + def verify_number_allocated_network(self, num_allocated): + self.assertEqual(num_allocated, self.res_mgr.num_allocated_network_resources) + + def allocate_network_handlers(self, num_networks): + return [self.res_mgr.create_network_mock_event_handler() for _ in range(num_networks)] + + def allocate_compute_handlers(self, num_computes): + return [self.res_mgr.create_compute_mock_event_handler() for _ in range(num_computes)] + + @asyncio.coroutine + def create_mock_launchpad_tasklet(self): + yield from mano_ut.create_mock_launchpad_tasklet(self.log, self.dts) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @rift.test.dts.async_test + def test_fail_first_nsm_vlr(self): + yield from self.publish_desciptors(num_external_vlrs=2) + yield from self.register_mock_res_mgr() + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(1) + yield from self.verify_num_nsr_vlrs(nsr_id, 2) + yield from self.verify_num_vnfrs(0) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "failed") + + self.verify_number_network_requests(1) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_second_nsm_vlr(self): + yield from self.publish_desciptors(num_external_vlrs=2) + yield from self.register_mock_res_mgr() + self.allocate_network_handlers(1) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(2) + yield from self.verify_num_nsr_vlrs(nsr_id, 2) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + yield from self.verify_vlr_state(nsr_vlrs[1], "failed") + + self.verify_number_network_requests(2) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(1) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_first_vlr(self): + yield from self.publish_desciptors(num_internal_vlrs=2) + yield from self.register_mock_res_mgr() + self.allocate_network_handlers(1) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(2) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "failed") + + self.verify_number_network_requests(2) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(1) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_second_vlr(self): + yield from self.publish_desciptors(num_internal_vlrs=2) + yield from self.register_mock_res_mgr() + self.allocate_network_handlers(2) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(3) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "failed") + + self.verify_number_network_requests(3) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(2) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_first_vdu(self): + yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2) + yield from self.register_mock_res_mgr() + yield from self.create_mock_launchpad_tasklet() + self.allocate_network_handlers(3) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(3) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2) + vdus = yield from self.get_vnf_vdus(nsr_vnfs[0]) + self.verify_vdu_state(vdus[0], "failed") + + self.verify_number_network_requests(3) + self.verify_number_compute_requests(1) + self.verify_number_allocated_network(3) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_second_vdu(self): + yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2) + yield from self.register_mock_res_mgr() + yield from self.create_mock_launchpad_tasklet() + self.allocate_network_handlers(3) + self.allocate_compute_handlers(1) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(3) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2) + + vdus = yield from self.get_vnf_vdus(nsr_vnfs[0]) + self.verify_vdu_state(vdus[0], "running") + self.verify_vdu_state(vdus[1], "failed") + + self.verify_number_network_requests(3) + self.verify_number_compute_requests(2) + self.verify_number_allocated_network(3) + self.verify_number_allocated_compute(1) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_second_vnf_second_vdu(self): + yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2) + yield from self.register_mock_res_mgr() + yield from self.create_mock_launchpad_tasklet() + self.allocate_network_handlers(5) + self.allocate_compute_handlers(3) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(5) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(2) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "running") + yield from self.verify_vnf_state(nsr_vnfs[1], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[1]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2) + yield from self.verify_num_vnfr_vdus(nsr_vnfs[1], 2) + + vdus = yield from self.get_vnf_vdus(nsr_vnfs[0]) + self.verify_vdu_state(vdus[0], "running") + self.verify_vdu_state(vdus[1], "running") + + vdus = yield from self.get_vnf_vdus(nsr_vnfs[1]) + self.verify_vdu_state(vdus[0], "running") + self.verify_vdu_state(vdus[1], "failed") + + self.verify_number_network_requests(5) + self.verify_number_compute_requests(4) + self.verify_number_allocated_network(5) + self.verify_number_allocated_compute(3) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + args, _ = parser.parse_known_args() + + ManoErrorTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() + +# vim: sw \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/mano_ut.py b/modules/core/mano/rwlaunchpad/test/mano_ut.py new file mode 100755 index 0000000..8ef373c --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/mano_ut.py @@ -0,0 +1,814 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import os +import sys +import unittest +import uuid +import xmlrunner +import argparse +import logging + +import gi +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + NsrYang as nsryang, + RwCloudYang as rwcloudyang, + RwDts as rwdts, + RwLaunchpadYang as launchpadyang, + RwNsmYang as rwnsmyang, + RwNsrYang as rwnsryang, + RwResourceMgrYang as rmgryang, + RwcalYang as rwcalyang, +) + +from gi.repository.RwTypes import RwStatus +import rift.mano.examples.ping_pong_nsd as ping_pong_nsd +import rift.tasklets +import rift.test.dts +import rw_peas + + +openstack_info = { + 'username': 'pluto', + 'password': 'mypasswd', + 'auth_url': 'http://10.66.4.14:5000/v3/', + 'project_name': 'demo', + 'mgmt_network': 'private', + 'image_id': '03bafdd3-8faa-44d5-bb5d-571b1655232f', + 'vms': ['test1', 'test2'], + 'networks': ['testnet1', 'testnet2', 'testnet3'] + } + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class XPaths(object): + @staticmethod + def nsd(k=None): + return ("C,/nsd:nsd-catalog/nsd:nsd" + + ("[nsd:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vld(k=None): + return ("C,/vld:vld-catalog/vld:vld" + + ("[vld:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vnfd(k=None): + return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" + + ("[vnfd:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vnfr(k=None): + return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" + + ("[vnfr:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vlr(k=None): + return ("D,/vlr:vlr-catalog/vlr:vlr" + + ("[vlr:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def nsd_ref_count(k=None): + return ("D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" + + ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else "")) + + @staticmethod + def vnfd_ref_count(k=None): + return ("D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" + + ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else "")) + + @staticmethod + def nsr_config(k=None): + return ("C,/nsr:ns-instance-config/nsr:nsr" + + ("[nsr:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def nsr_opdata(k=None): + return ("D,/nsr:ns-instance-opdata/nsr:nsr" + + ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else "")) + + +class ManoQuerier(object): + def __init__(self, log, dts): + self.log = log + self.dts = dts + + @asyncio.coroutine + def _read_query(self, xpath, do_trace=False): + flags = rwdts.Flag.MERGE + flags += rwdts.Flag.TRACE if do_trace else 0 + res_iter = yield from self.dts.query_read( + xpath, flags=flags + ) + + results = [] + for i in res_iter: + result = yield from i + if result is not None: + results.append(result.result) + + return results + + @asyncio.coroutine + def get_nsr_opdatas(self, nsr_id=None): + return (yield from self._read_query(XPaths.nsr_opdata(nsr_id), True)) + + @asyncio.coroutine + def get_nsr_configs(self, nsr_id=None): + return (yield from self._read_query(XPaths.nsr_config(nsr_id))) + + @asyncio.coroutine + def get_vnfrs(self, vnfr_id=None): + return (yield from self._read_query(XPaths.vnfr(vnfr_id))) + + @asyncio.coroutine + def get_vlrs(self, vlr_id=None): + return (yield from self._read_query(XPaths.vlr(vlr_id))) + + @asyncio.coroutine + def get_nsd_ref_counts(self, nsd_id=None): + return (yield from self._read_query(XPaths.nsd_ref_count(nsd_id))) + + @asyncio.coroutine + def get_vnfd_ref_counts(self, vnfd_id=None): + return (yield from self._read_query(XPaths.vnfd_ref_count(vnfd_id))) + + @asyncio.coroutine + def delete_nsr(self, nsr_id): + with self.dts.transaction() as xact: + yield from self.dts.query_delete( + XPaths.nsr_config(nsr_id), + rwdts.Flag.TRACE, + #rwdts.Flag.ADVISE, + ) + + @asyncio.coroutine + def delete_nsd(self, nsd_id): + nsd_xpath = XPaths.nsd(nsd_id) + self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_delete( + nsd_xpath, + rwdts.Flag.ADVISE, + ) + + @asyncio.coroutine + def delete_vnfd(self, vnfd_id): + vnfd_xpath = XPaths.vnfd(vnfd_id) + self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_delete( + vnfd_xpath, + rwdts.Flag.ADVISE, + ) + + @asyncio.coroutine + def update_nsd(self, nsd_id, nsd_msg): + nsd_xpath = XPaths.nsd(nsd_id) + self.log.debug("Attempting to update NSD with path = %s", nsd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_update( + nsd_xpath, + rwdts.Flag.ADVISE, + nsd_msg, + ) + + @asyncio.coroutine + def update_vnfd(self, vnfd_id, vnfd_msg): + vnfd_xpath = XPaths.vnfd(vnfd_id) + self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_update( + vnfd_xpath, + rwdts.Flag.ADVISE, + vnfd_msg, + ) + + +class ManoTestCase(rift.test.dts.AbstractDTSTest): + @asyncio.coroutine + def verify_nsr_state(self, nsr_id, state): + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + self.assertEqual(1, len(nsrs)) + nsr = nsrs[0] + + self.log.debug("Got nsr = %s", nsr) + self.assertEqual(state, nsr.operational_status) + + @asyncio.coroutine + def verify_vlr_state(self, vlr_id, state): + vlrs = yield from self.querier.get_vlrs(vlr_id) + self.assertEqual(1, len(vlrs)) + vlr = vlrs[0] + + self.assertEqual(state, vlr.operational_status) + + def verify_vdu_state(self, vdu, state): + self.assertEqual(state, vdu.operational_status) + + @asyncio.coroutine + def verify_vnf_state(self, vnfr_id, state): + vnfrs = yield from self.querier.get_vnfrs(vnfr_id) + self.assertEqual(1, len(vnfrs)) + vnfr = vnfrs[0] + + self.assertEqual(state, vnfr.operational_status) + + @asyncio.coroutine + def terminate_nsr(self, nsr_id): + self.log.debug("Terminating nsr id: %s", nsr_id) + yield from self.querier.delete_nsr(nsr_id) + + @asyncio.coroutine + def verify_nsr_deleted(self, nsr_id): + nsr_opdatas = yield from self.querier.get_nsr_opdatas(nsr_id) + self.assertEqual(0, len(nsr_opdatas)) + + nsr_configs = yield from self.querier.get_nsr_configs(nsr_id) + self.assertEqual(0, len(nsr_configs)) + + @asyncio.coroutine + def verify_num_vlrs(self, num_vlrs): + vlrs = yield from self.querier.get_vlrs() + self.assertEqual(num_vlrs, len(vlrs)) + + @asyncio.coroutine + def get_nsr_vlrs(self, nsr_id): + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + return [v.vlr_ref for v in nsrs[0].vlr] + + @asyncio.coroutine + def get_nsr_vnfs(self, nsr_id): + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + return nsrs[0].constituent_vnfr_ref + + @asyncio.coroutine + def get_vnf_vlrs(self, vnfr_id): + vnfrs = yield from self.querier.get_vnfrs(vnfr_id) + return [i.vlr_ref for i in vnfrs[0].internal_vlr] + + @asyncio.coroutine + def verify_num_nsr_vlrs(self, nsr_id, num_vlrs): + vlrs = yield from self.get_nsr_vlrs(nsr_id) + self.assertEqual(num_vlrs, len(vlrs)) + + @asyncio.coroutine + def verify_num_nsr_vnfrs(self, nsr_id, num_vnfs): + vnfs = yield from self.get_nsr_vnfs(nsr_id) + self.assertEqual(num_vnfs, len(vnfs)) + + @asyncio.coroutine + def verify_num_vnfr_vlrs(self, vnfr_id, num_vlrs): + vlrs = yield from self.get_vnf_vlrs(vnfr_id) + self.assertEqual(num_vlrs, len(vlrs)) + + @asyncio.coroutine + def get_vnf_vdus(self, vnfr_id): + vnfrs = yield from self.querier.get_vnfrs(vnfr_id) + return [i for i in vnfrs[0].vdur] + + @asyncio.coroutine + def verify_num_vnfr_vdus(self, vnfr_id, num_vdus): + vdus = yield from self.get_vnf_vdus(vnfr_id) + self.assertEqual(num_vdus, len(vdus)) + + @asyncio.coroutine + def verify_num_vnfrs(self, num_vnfrs): + vnfrs = yield from self.querier.get_vnfrs() + self.assertEqual(num_vnfrs, len(vnfrs)) + + @asyncio.coroutine + def verify_nsd_ref_count(self, nsd_id, num_ref): + nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id) + self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count) + + +class DescriptorPublisher(object): + def __init__(self, log, loop, dts): + self.log = log + self.loop = loop + self.dts = dts + + self._registrations = [] + + @asyncio.coroutine + def publish(self, w_path, path, desc): + ready_event = asyncio.Event(loop=self.loop) + + @asyncio.coroutine + def on_ready(regh, status): + self.log.debug("Create element: %s, obj-type:%s obj:%s", + path, type(desc), desc) + with self.dts.transaction() as xact: + regh.create_element(path, desc, xact.xact) + self.log.debug("Created element: %s, obj:%s", path, desc) + ready_event.set() + + handler = rift.tasklets.DTS.RegistrationHandler( + on_ready=on_ready + ) + + self.log.debug("Registering path: %s, obj:%s", w_path, desc) + reg = yield from self.dts.register( + w_path, + handler, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ + ) + self._registrations.append(reg) + self.log.debug("Registered path : %s", w_path) + yield from ready_event.wait() + + return reg + + def unpublish_all(self): + self.log.debug("Deregistering all published descriptors") + for reg in self._registrations: + reg.deregister() + + +class PingPongNsrConfigPublisher(object): + XPATH = "C,/nsr:ns-instance-config" + + def __init__(self, log, loop, dts, nsd_id, cloud_account_name): + self.dts = dts + self.log = log + self.loop = loop + self.ref = None + + self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig() + + nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "ns1.{}".format(nsr.id) + nsr.nsd_ref = nsd_id + nsr.cloud_account = cloud_account_name + + inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter() + inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(nsd_id) + inputs.value = "inigo montoya" + + nsr.input_parameter.append(inputs) + + self.nsr_config.nsr.append(nsr) + + @asyncio.coroutine + def register(self): + ready_event = asyncio.Event(loop=self.loop) + + @asyncio.coroutine + def on_ready(regh, status): + with self.dts.transaction() as xact: + regh.create_element( + PingPongNsrConfigPublisher.XPATH, + self.nsr_config, + xact=xact.xact, + ) + + ready_event.set() + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + if action == rwdts.QueryAction.READ: + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=PingPongNsrConfigPublisher.XPATH, + msg=self.nsr_config, + ) + elif action == rwdts.QueryAction.DELETE: + self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig() + self.reg.delete_element( + PingPongNsrConfigPublisher.XPATH, + ) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH) + self.reg = yield from self.dts.register( + PingPongNsrConfigPublisher.XPATH, + flags=rwdts.Flag.PUBLISHER, + handler=rift.tasklets.DTS.RegistrationHandler( + on_ready=on_ready, + on_prepare=on_prepare, + ), + ) + + yield from ready_event.wait() + + def deregister(self): + if self.reg is not None: + self.reg.deregister() + + +class PingPongDescriptorPublisher(object): + def __init__(self, log, loop, dts): + self.log = log + self.loop = loop + self.dts = dts + + self.querier = ManoQuerier(self.log, self.dts) + self.publisher = DescriptorPublisher(self.log, self.loop, self.dts) + self.nsr_config_publisher = None + + @property + def nsd_id(self): + return self.ping_pong_nsd.id + + @property + def ping_vnfd_id(self): + return self.ping_vnfd.id + + @property + def pong_vnfd_id(self): + return self.pong_vnfd.id + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \ + ping_pong_nsd.generate_ping_pong_descriptors( + pingcount=1, + external_vlr_count=num_external_vlrs, + internal_vlr_count=num_internal_vlrs, + num_vnf_vms=2, + ) + + # Publish ping_vnfd + xpath = XPaths.vnfd(self.ping_vnfd_id) + xpath_wild = XPaths.vnfd() + for obj in self.ping_vnfd.descriptor.vnfd: + self.log.debug("Publishing ping_vnfd path: %s - %s, type:%s, obj:%s", + xpath, xpath_wild, type(obj), obj) + yield from self.publisher.publish(xpath_wild, xpath, obj) + + # Publish pong_vnfd + xpath = XPaths.vnfd(self.pong_vnfd_id) + xpath_wild = XPaths.vnfd() + for obj in self.pong_vnfd.descriptor.vnfd: + self.log.debug("Publishing pong_vnfd path: %s, wild_path: %s, obj:%s", + xpath, xpath_wild, obj) + yield from self.publisher.publish(xpath_wild, xpath, obj) + + # Publish ping_pong_nsd + xpath = XPaths.nsd(self.nsd_id) + xpath_wild = XPaths.nsd() + for obj in self.ping_pong_nsd.descriptor.nsd: + self.log.debug("Publishing ping_pong nsd path: %s, wild_path: %s, obj:%s", + xpath, xpath_wild, obj) + yield from self.publisher.publish(xpath_wild, xpath, obj) + + self.log.debug("DONE - publish_desciptors") + + def unpublish_descriptors(self): + self.publisher.unpublish_all() + if self.nsr_config_publisher is not None: + self.nsr_config_publisher.deregister() + + @asyncio.coroutine + def publish_nsr_config(self, cloud_account_name): + self.nsr_config_publisher = PingPongNsrConfigPublisher( + self.log, + self.loop, + self.dts, + self.nsd_id, + cloud_account_name, + ) + + yield from self.nsr_config_publisher.register() + return self.nsr_config_publisher.nsr_config.nsr[0].id + + @asyncio.coroutine + def delete_nsd(self): + yield from self.querier.delete_nsd(self.ping_pong_nsd.id) + + @asyncio.coroutine + def delete_ping_vnfd(self): + yield from self.querier.delete_vnfd(self.ping_vnfd.id) + + @asyncio.coroutine + def update_nsd(self): + yield from self.querier.update_nsd( + self.ping_pong_nsd.id, + self.ping_pong_nsd.descriptor.nsd[0] + ) + + @asyncio.coroutine + def update_ping_vnfd(self): + yield from self.querier.update_vnfd( + self.ping_vnfd.id, + self.ping_vnfd.descriptor.vnfd[0] + ) + + +class VnsTestCase(rift.test.dts.AbstractDTSTest): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + vns_dir = os.environ.get('VNS_DIR') + vnfm_dir = os.environ.get('VNFM_DIR') + nsm_dir = os.environ.get('NSM_DIR') + rm_dir = os.environ.get('RM_DIR') + + rwmain.add_tasklet(vns_dir, 'rwvnstasklet') + rwmain.add_tasklet(vnfm_dir, 'rwvnfmtasklet') + rwmain.add_tasklet(nsm_dir, 'rwnsmtasklet') + rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet') + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @staticmethod + def get_cal_account(account_type, account_name): + """ + Creates an object for class RwcalYang.Clo + """ + account = rwcloudyang.CloudAccount() + if account_type == 'mock': + account.name = account_name + account.account_type = "mock" + account.mock.username = "mock_user" + elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')): + account.name = account_name + account.account_type = 'openstack' + account.openstack.key = openstack_info['username'] + account.openstack.secret = openstack_info['password'] + account.openstack.auth_url = openstack_info['auth_url'] + account.openstack.tenant = openstack_info['project_name'] + account.openstack.mgmt_network = openstack_info['mgmt_network'] + return account + + @asyncio.coroutine + def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"): + account = self.get_cal_account(cloud_type, cloud_name) + account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name) + self.log.info("Configuring cloud-account: %s", account) + yield from dts.query_create(account_xpath, + rwdts.Flag.ADVISE | rwdts.Flag.TRACE, + account) + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(5, loop=self.loop) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = ManoQuerier(self.log, self.dts) + + def test_create_nsr_record(self): + @asyncio.coroutine + def verify_nsr_opdata(termination=False): + self.log.debug("Verifying nsr opdata path = %s", XPaths.nsr_opdata()) + + while True: + nsrs = yield from self.querier.get_nsr_opdatas() + if termination: + self.assertEqual(0, len(nsrs)) + return + + nsr = nsrs[0] + self.log.debug("Got nsr record %s", nsr) + if nsr.operational_status == 'running': + self.log.debug("!!! Rcvd NSR with running status !!!") + break + + self.log.debug("Rcvd NSR with %s status", nsr.operational_status) + self.log.debug("Sleeping for 10 seconds") + yield from asyncio.sleep(10, loop=self.loop) + + @asyncio.coroutine + def verify_nsr_config(termination=False): + self.log.debug("Verifying nsr config path = %s", XPaths.nsr_config()) + + nsr_configs = yield from self.querier.get_nsr_configs() + self.assertEqual(1, len(nsr_configs)) + + nsr_config = nsr_configs[0] + self.assertEqual( + "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id), + nsr_config.input_parameter[0].xpath, + ) + + @asyncio.coroutine + def verify_vnfr_record(termination=False): + self.log.debug("Verifying vnfr record path = %s, Termination=%d", + XPaths.vnfr(), termination) + if termination: + for i in range(5): + vnfrs = yield from self.querier.get_vnfrs() + if len(vnfrs) == 0: + return True + + for vnfr in vnfrs: + self.log.debug("VNFR still exists = %s", vnfr) + + + assert len(vnfrs) == 0 + + while True: + vnfrs = yield from self.querier.get_vnfrs() + if len(vnfrs) != 0 and termination is False: + vnfr = vnfrs[0] + self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status) + if vnfr.operational_status == 'running': + self.log.debug("!!! Rcvd VNFR with running status !!!") + return True + + elif vnfr.operational_status == "failed": + self.log.debug("!!! Rcvd VNFR with failed status !!!") + return False + + self.log.debug("Sleeping for 10 seconds") + yield from asyncio.sleep(10, loop=self.loop) + + @asyncio.coroutine + def verify_vlr_record(termination=False): + vlr_xpath = XPaths.vlr() + self.log.debug("Verifying vlr record path = %s, termination: %s", + vlr_xpath, termination) + res_iter = yield from self.dts.query_read(vlr_xpath) + + for i in res_iter: + result = yield from i + if termination: + self.assertIsNone(result) + + self.log.debug("Got vlr record %s", result) + + @asyncio.coroutine + def verify_nsd_ref_count(termination): + self.log.debug("Verifying nsd ref count= %s", XPaths.nsd_ref_count()) + res_iter = yield from self.dts.query_read(XPaths.nsd_ref_count()) + + for i in res_iter: + result = yield from i + self.log.debug("Got nsd ref count record %s", result) + + @asyncio.coroutine + def verify_vnfd_ref_count(termination): + self.log.debug("Verifying vnfd ref count= %s", XPaths.vnfd_ref_count()) + res_iter = yield from self.dts.query_read(XPaths.vnfd_ref_count()) + + for i in res_iter: + result = yield from i + self.log.debug("Got vnfd ref count record %s", result) + + @asyncio.coroutine + def verify_results(termination=False): + yield from verify_vnfr_record(termination) + yield from verify_vlr_record(termination) + yield from verify_nsr_opdata(termination) + yield from verify_nsr_config(termination) + yield from verify_nsd_ref_count(termination) + yield from verify_vnfd_ref_count(termination) + + @asyncio.coroutine + def terminate_ns(nsr_id): + xpath = XPaths.nsr_config(nsr_id) + self.log.debug("Terminating network service with path %s", xpath) + yield from self.dts.query_delete(xpath, flags=rwdts.Flag.ADVISE) + self.log.debug("Terminated network service with path %s", xpath) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @asyncio.coroutine + def run_test(): + yield from self.wait_tasklets() + + cloud_type = "mock" + yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account") + + yield from self.ping_pong.publish_desciptors() + + # Attempt deleting VNFD not in use + yield from self.ping_pong.update_ping_vnfd() + + # Attempt deleting NSD not in use + yield from self.ping_pong.update_nsd() + + # Attempt deleting VNFD not in use + yield from self.ping_pong.delete_ping_vnfd() + + # Attempt deleting NSD not in use + yield from self.ping_pong.delete_nsd() + + yield from self.ping_pong.publish_desciptors() + + # Create an ns-instance-config element and prompt the creation of + # an NSR. + nsr_id = yield from self.ping_pong.publish_nsr_config("mock_account") + + yield from verify_results() + + # Attempt deleting VNFD in use + yield from self.ping_pong.delete_ping_vnfd() + + # Attempt deleting NSD in use + yield from self.ping_pong.delete_nsd() + + yield from terminate_ns(nsr_id) + + yield from asyncio.sleep(2, loop=self.loop) + self.log.debug("Verifying termination results") + yield from verify_results(termination=True) + self.log.debug("Verified termination results") + + self.log.debug("Attempting to delete VNFD for real") + yield from self.ping_pong.delete_ping_vnfd() + + self.log.debug("Attempting to delete NSD for real") + yield from self.ping_pong.delete_nsd() + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + launchpad_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build/rwlaunchpad') + + if 'VNS_DIR' not in os.environ: + os.environ['VNS_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwvns') + + if 'VNFM_DIR' not in os.environ: + os.environ['VNFM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwvnfm') + + if 'NSM_DIR' not in os.environ: + os.environ['NSM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwnsm') + + if 'RM_DIR' not in os.environ: + os.environ['RM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwresmgrtasklet') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + VnsTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args) + +if __name__ == '__main__': + main() + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/openmano_nsm_ut.py b/modules/core/mano/rwlaunchpad/test/openmano_nsm_ut.py new file mode 100755 index 0000000..d6947f0 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/openmano_nsm_ut.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import logging +import os +import sys +import time +import unittest +import uuid + +import xmlrunner + +from gi.repository import ( + RwDts as rwdts, + RwLaunchpadYang as launchpadyang, + RwNsmYang as rwnsmyang, + RwCloudYang as rwcloudyang, + RwResourceMgrYang, + ) +import rift.tasklets +import rift.test.dts + +import mano_ut + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class OpenManoNsmTestCase(mano_ut.ManoTestCase): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + launchpad_build_dir = os.path.join( + cls.top_dir, + '.build/modules/core/mc/core_mc-build/rwlaunchpad' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwnsm'), + 'rwnsmtasklet' + ) + + cls.waited_for_tasklets = False + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @asyncio.coroutine + def wait_tasklets(self): + if not OpenManoNsmTestCase.waited_for_tasklets: + OpenManoNsmTestCase.waited_for_tasklets = True + self._wait_event = asyncio.Event(loop=self.loop) + yield from asyncio.sleep(5, loop=self.loop) + self._wait_event.set() + + yield from self._wait_event.wait() + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + yield from self.ping_pong.publish_desciptors( + num_external_vlrs, + num_internal_vlrs, + num_ping_vms + ) + + def unpublish_descriptors(self): + self.ping_pong.unpublish_descriptors() + + @asyncio.coroutine + def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20): + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + if len(nsrs) == 0: + continue + self.assertEqual(1, len(nsrs)) + if nsrs[0].operational_status in ['running', 'failed']: + return + + self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status) + yield from asyncio.sleep(2, loop=self.loop) + + self.assertIn(nsrs[0].operational_status, ['running', 'failed']) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @asyncio.coroutine + def configure_cloud_account(self): + account_xpath = "C,/rw-cloud:cloud-account" + account = rwcloudyang.CloudAccount() + account.name = "openmano_name" + account.account_type = "openmano" + account.openmano.host = "10.64.5.73" + account.openmano.port = 9090 + account.openmano.tenant_id = "eecfd632-bef1-11e5-b5b8-0800273ab84b" + self.log.info("Configuring cloud-account: %s", account) + yield from self.dts.query_create( + account_xpath, + rwdts.Flag.ADVISE, + account, + ) + + @rift.test.dts.async_test + def test_ping_pong_nsm_instantiate(self): + yield from self.wait_tasklets() + yield from self.configure_cloud_account() + yield from self.publish_desciptors(num_internal_vlrs=0) + + nsr_id = yield from self.ping_pong.publish_nsr_config() + + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "running") + yield from self.verify_num_vlrs(0) + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + yield from self.verify_num_vnfrs(2) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "running") + yield from self.verify_vnf_state(nsr_vnfs[1], "running") + + yield from self.terminate_nsr(nsr_id) + yield from asyncio.sleep(2, loop=self.loop) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vnfrs(0) + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + OpenManoNsmTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_test.py new file mode 100644 index 0000000..6f9fcbb --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_test.py @@ -0,0 +1,308 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import gi + +gi.require_version('RwlogMgmtYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') + +from gi.repository import ( + NsdYang, + NsrYang, + RwBaseYang, + RwCloudYang, + RwIwpYang, + RwlogMgmtYang, + RwNsmYang, + RwNsrYang, + RwResourceMgrYang, + RwConmanYang, + RwVnfdYang, + VldYang, + ) + +logging.basicConfig(level=logging.DEBUG) + + +RW_KT_UTM_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/kt_utm" + ) + +RW_KT_UTM_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/utm_only" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + + +@pytest.fixture(scope='module') +def rwlog_mgmt_proxy(request, mgmt_session): + return mgmt_session.proxy(RwlogMgmtYang) + + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + + +@pytest.fixture(scope='session') +def kt_utm_vnfd_package_file(): + ktutm_pkg_file = os.path.join( + RW_KT_UTM_PKG_INSTALL_DIR, + "kt_utm_vnfd.tar.gz", + ) + if not os.path.exists(ktutm_pkg_file): + raise_package_error() + + return ktutm_pkg_file + +@pytest.fixture(scope='session') +def utm_only_nsd_package_file(): + ktutm_nsd_pkg_file = os.path.join( + RW_KT_UTM_NSD_PKG_INSTALL_DIR, + "utm_only_nsd.tar.gz", + ) + if not os.path.exists(ktutm_nsd_pkg_file): + raise_package_error() + + return ktutm_nsd_pkg_file + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "UTM-only" + nsr.short_name = "UTM-only" + nsr.description = "1 VNFs with 5 VLs" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_logging(self, rwlog_mgmt_proxy): + logging = RwlogMgmtYang.Logging.from_dict({ + "console": { + "on": True, + "filter": { + "category": [{ + "name": "rw-generic", + "severity": "error" + }], + } + } + }) + rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging) + + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + # cloud_account.name = "cloudsim_proxy" + # cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "openstack" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.13:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file): + logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file) + trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "kt_utm_vnfd" + + def test_onboard_utm_only_nsd(self, logger, nsd_proxy, utm_only_nsd_package_file): + logger.info("Onboarding utm_onlynsd package: %s", utm_only_nsd_package_file) + trans_id = upload_descriptor(logger, utm_only_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + + def test_instantiate_utm_only_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py new file mode 100644 index 0000000..d366e0f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py @@ -0,0 +1,335 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import gi + +gi.require_version('RwlogMgmtYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') + +from gi.repository import ( + NsdYang, + NsrYang, + RwBaseYang, + RwCloudYang, + RwIwpYang, + RwlogMgmtYang, + RwNsmYang, + RwNsrYang, + RwResourceMgrYang, + RwConmanYang, + RwVnfdYang, + VldYang, + ) + +logging.basicConfig(level=logging.DEBUG) + + +RW_KT_UTM_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/kt_utm" + ) + +RW_KT_WIMS_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/kt_wims" + ) + +RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/utm_wims" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + + +@pytest.fixture(scope='module') +def rwlog_mgmt_proxy(request, mgmt_session): + return mgmt_session.proxy(RwlogMgmtYang) + + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + + +@pytest.fixture(scope='session') +def kt_utm_vnfd_package_file(): + ktutm_pkg_file = os.path.join( + RW_KT_UTM_PKG_INSTALL_DIR, + "kt_utm_vnfd.tar.gz", + ) + if not os.path.exists(ktutm_pkg_file): + raise_package_error() + + return ktutm_pkg_file + +@pytest.fixture(scope='session') +def kt_wims_vnfd_package_file(): + ktwims_pkg_file = os.path.join( + RW_KT_WIMS_PKG_INSTALL_DIR, + "kt_wims_vnfd.tar.gz", + ) + if not os.path.exists(ktwims_pkg_file): + raise_package_error() + + return ktwims_pkg_file + +@pytest.fixture(scope='session') +def utm_wims_nsd_package_file(): + ktutm_wims_nsd_pkg_file = os.path.join( + RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR, + "utm_wims_nsd.tar.gz", + ) + if not os.path.exists(ktutm_wims_nsd_pkg_file): + raise_package_error() + + return ktutm_wims_nsd_pkg_file + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "UTM-WIMS" + nsr.short_name = "UTM-WIMS" + nsr.description = "2 VNFs with 4 VLs" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_logging(self, rwlog_mgmt_proxy): + logging = RwlogMgmtYang.Logging.from_dict({ + "console": { + "on": True, + "filter": { + "category": [{ + "name": "rw-generic", + "severity": "error" + }], + } + } + }) + rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging) + + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + # cloud_account.name = "cloudsim_proxy" + # cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "openstack" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file): + logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file) + trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "kt_utm_vnfd" + + def test_onboard_ktwims_vnfd(self, logger, vnfd_proxy, kt_wims_vnfd_package_file): + logger.info("Onboarding kt_wims_vnfd package: %s", kt_wims_vnfd_package_file) + trans_id = upload_descriptor(logger, kt_wims_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should only be two vnfd" + assert "kt_wims_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_utm_wims_nsd(self, logger, nsd_proxy, utm_wims_nsd_package_file): + logger.info("Onboarding utm_wims_nsd package: %s", utm_wims_nsd_package_file) + trans_id = upload_descriptor(logger, utm_wims_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + + def test_instantiate_utm_wims_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_test.py new file mode 100644 index 0000000..3253dae --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_test.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import datetime + +import gi +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwlogMgmtYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') + +from gi.repository import ( + NsdYang, + NsrYang, + RwBaseYang, + RwCloudYang, + RwIwpYang, + RwlogMgmtYang, + RwNsmYang, + RwNsrYang, + RwResourceMgrYang, + RwConmanYang, + RwVnfdYang, + VldYang, + ) + +logging.basicConfig(level=logging.DEBUG) + + +RW_PING_PONG_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_ROOT"], + "images" + ) + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + + +@pytest.fixture(scope='module') +def rwlog_mgmt_proxy(request, mgmt_session): + return mgmt_session.proxy(RwlogMgmtYang) + + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + + +@pytest.fixture(scope='session') +def ping_vnfd_package_file(): + ping_pkg_file = os.path.join( + RW_PING_PONG_PKG_INSTALL_DIR, + "ping_vnfd_with_image.tar.gz", + ) + if not os.path.exists(ping_pkg_file): + raise_package_error() + + return ping_pkg_file + + +@pytest.fixture(scope='session') +def pong_vnfd_package_file(): + pong_pkg_file = os.path.join( + RW_PING_PONG_PKG_INSTALL_DIR, + "pong_vnfd_with_image.tar.gz", + ) + if not os.path.exists(pong_pkg_file): + raise_package_error() + + return pong_pkg_file + + +@pytest.fixture(scope='session') +def ping_pong_nsd_package_file(): + ping_pong_pkg_file = os.path.join( + RW_PING_PONG_PKG_INSTALL_DIR, + "ping_pong_nsd.tar.gz", + ) + if not os.path.exists(ping_pong_pkg_file): + raise_package_error() + + return ping_pong_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) + nsr.short_name = "nsr_short_name" + nsr.description = "This is a description" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + nsr.cloud_account = "openstack" + + param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter() + param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor' + param.value = "rift-o-matic" + + nsr.input_parameter.append(param) + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_logging(self, rwlog_mgmt_proxy): + logging = RwlogMgmtYang.Logging.from_dict({ + "console": { + "on": True, + "filter": { + "category": [{ + "name": "rw-generic", + "severity": "error" + }], + } + } + }) + rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging) + + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccount() + # cloud_account.name = "cloudsim_proxy" + # cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "openstack" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.96.4.2:5000/v3/' + cloud_account.openstack.tenant = 'mano1' + cloud_account.openstack.mgmt_network = 'private1' + + cloud_proxy.merge_config("/rw-cloud:cloud/account", cloud_account) + + def test_onboard_ping_vnfd(self, logger, vnfd_proxy, ping_vnfd_package_file): + logger.info("Onboarding ping_vnfd package: %s", ping_vnfd_package_file) + trans_id = upload_descriptor(logger, ping_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "ping_vnfd" + + def test_onboard_pong_vnfd(self, logger, vnfd_proxy, pong_vnfd_package_file): + logger.info("Onboarding pong_vnfd package: %s", pong_vnfd_package_file) + trans_id = upload_descriptor(logger, pong_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ping_pong_nsd(self, logger, nsd_proxy, ping_pong_nsd_package_file): + logger.info("Onboarding ping_pong_nsd package: %s", ping_pong_nsd_package_file) + trans_id = upload_descriptor(logger, ping_pong_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "ping_pong_nsd" + + def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + rwnsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id + + # logger.info("Waiting up to 30 seconds for ping and pong components to show " + # "up in show tasklet info") + + # start_time = time.time() + # while (time.time() - start_time) < 30: + # vcs_info = base_proxy.get('/vcs/info') + # components = vcs_info.components.component_info + + # def find_component_by_name(name): + # for component in components: + # if name in component.component_name: + # return component + + # logger.warning("Did not find %s component name in show tasklet info", + # name) + + # return None + + # """ + # ping_cluster_component = find_component_by_name( + # "rw_ping_vnfd:rwping_cluster" + # ) + # if ping_cluster_component is None: + # continue + + # pong_cluster_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_cluster" + # ) + # if pong_cluster_component is None: + # continue + # """ + + # ping_vm_component = find_component_by_name( + # "rw_ping_vnfd:rwping_vm" + # ) + # if ping_vm_component is None: + # continue + + # pong_vm_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_vm" + # ) + # if pong_vm_component is None: + # continue + + # ping_proc_component = find_component_by_name( + # "rw_ping_vnfd:rwping_proc" + # ) + # if ping_proc_component is None: + # continue + + # pong_proc_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_proc" + # ) + # if pong_proc_component is None: + # continue + + # ping_tasklet_component = find_component_by_name( + # "rw_ping_vnfd:rwping_tasklet" + # ) + # if ping_tasklet_component is None: + # continue + + # pong_tasklet_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_tasklet" + # ) + # if pong_tasklet_component is None: + # continue + + # logger.info("TEST SUCCESSFUL: All ping and pong components were found in show tasklet info") + # break + + # else: + # assert False, "Did not find all ping and pong component in time" + + #def test_terminate_ping_pong_ns(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + # nsr_configs = nsr_proxy.get_config('/ns-instance-config') + # nsr = nsr_configs.nsr[0] + # nsr_id = nsr.id + + # nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(nsr_id)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py new file mode 100644 index 0000000..167e65a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_3vnfs_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test ExtVNF +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid + +import gi +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') + + + +from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang + +logging.basicConfig(level=logging.DEBUG) + + +RW_VROUTER_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/vrouter" + ) +RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafgen" + ) +RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafsink" + ) +RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/tg_2vrouter_ts" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + +@pytest.fixture(scope='session') +def vrouter_vnfd_package_file(): + vrouter_pkg_file = os.path.join( + RW_VROUTER_PKG_INSTALL_DIR, + "vrouter_vnfd_with_epa.tar.gz", + ) + if not os.path.exists(vrouter_pkg_file): + raise_package_error() + + return vrouter_pkg_file + +@pytest.fixture(scope='session') +def tg_vnfd_package_file(): + tg_pkg_file = os.path.join( + RW_TRAFGEN_PKG_INSTALL_DIR, + "trafgen_vnfd_with_epa.tar.gz", + ) + if not os.path.exists(tg_pkg_file): + raise_package_error() + + return tg_pkg_file + +@pytest.fixture(scope='session') +def ts_vnfd_package_file(): + ts_pkg_file = os.path.join( + RW_TRAFSINK_PKG_INSTALL_DIR, + "trafsink_vnfd_with_epa.tar.gz", + ) + if not os.path.exists(ts_pkg_file): + raise_package_error() + + return ts_pkg_file + +@pytest.fixture(scope='session') +def tg_2vrouter_ts_nsd_package_file(): + tg_2vrouter_ts_nsd_pkg_file = os.path.join( + RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR, + "tg_2vrouter_ts_nsd_with_epa.tar.gz", + ) + if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file): + raise_package_error() + + return tg_2vrouter_ts_nsd_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "TG-2Vrouter-TS EPA" + nsr.short_name = "TG-2Vrouter-TS EPA" + nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + #cloud_account.name = "cloudsim_proxy" + #cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "riftuser1" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file): + logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file) + trans_id = upload_descriptor(logger, tg_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should be one vnfds" + assert "trafgen_vnfd" in [vnfds[0].name] + + def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file): + logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file) + trans_id = upload_descriptor(logger, vrouter_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file): + logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file) + trans_id = upload_descriptor(logger, ts_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 3, "There should be three vnfds" + assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name] + + def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file): + logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file) + trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "tg_vrouter_ts_nsd" + assert nsd.short_name == "tg_2vrouter_ts_nsd" + + def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py new file mode 100644 index 0000000..9570002 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_3vnfs_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test ExtVNF +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid + +import gi +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') + + + +from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang + +logging.basicConfig(level=logging.DEBUG) + + +RW_VROUTER_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/vrouter" + ) +RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafgen" + ) +RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafsink" + ) +RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/tg_2vrouter_ts" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + +@pytest.fixture(scope='session') +def vrouter_vnfd_package_file(): + vrouter_pkg_file = os.path.join( + RW_VROUTER_PKG_INSTALL_DIR, + "vrouter_vnfd.tar.gz", + ) + if not os.path.exists(vrouter_pkg_file): + raise_package_error() + + return vrouter_pkg_file + +@pytest.fixture(scope='session') +def tg_vnfd_package_file(): + tg_pkg_file = os.path.join( + RW_TRAFGEN_PKG_INSTALL_DIR, + "trafgen_vnfd.tar.gz", + ) + if not os.path.exists(tg_pkg_file): + raise_package_error() + + return tg_pkg_file + +@pytest.fixture(scope='session') +def ts_vnfd_package_file(): + ts_pkg_file = os.path.join( + RW_TRAFSINK_PKG_INSTALL_DIR, + "trafsink_vnfd.tar.gz", + ) + if not os.path.exists(ts_pkg_file): + raise_package_error() + + return ts_pkg_file + +@pytest.fixture(scope='session') +def tg_2vrouter_ts_nsd_package_file(): + tg_2vrouter_ts_nsd_pkg_file = os.path.join( + RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR, + "tg_2vrouter_ts_nsd.tar.gz", + ) + if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file): + raise_package_error() + + return tg_2vrouter_ts_nsd_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "TG-2Vrouter-TS EPA" + nsr.short_name = "TG-2Vrouter-TS EPA" + nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + #cloud_account.name = "cloudsim_proxy" + #cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "riftuser1" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file): + logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file) + trans_id = upload_descriptor(logger, tg_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should be one vnfds" + assert "trafgen_vnfd" in [vnfds[0].name] + + def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file): + logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file) + trans_id = upload_descriptor(logger, vrouter_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file): + logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file) + trans_id = upload_descriptor(logger, ts_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 3, "There should be three vnfds" + assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name] + + def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file): + logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file) + trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "tg_vrouter_ts_nsd" + assert nsd.short_name == "tg_2vrouter_ts_nsd" + + def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py new file mode 100644 index 0000000..fb41684 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_3vnfs_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test ExtVNF +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid + +import gi +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') + + +from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang + +logging.basicConfig(level=logging.DEBUG) + + +RW_VROUTER_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/vrouter" + ) +RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafgen" + ) +RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafsink" + ) +RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/tg_vrouter_ts" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + +@pytest.fixture(scope='session') +def vrouter_vnfd_package_file(): + vrouter_pkg_file = os.path.join( + RW_VROUTER_PKG_INSTALL_DIR, + "vrouter_vnfd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(vrouter_pkg_file): + raise_package_error() + + return vrouter_pkg_file + +@pytest.fixture(scope='session') +def tg_vnfd_package_file(): + tg_pkg_file = os.path.join( + RW_TRAFGEN_PKG_INSTALL_DIR, + "trafgen_vnfd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(tg_pkg_file): + raise_package_error() + + return tg_pkg_file + +@pytest.fixture(scope='session') +def ts_vnfd_package_file(): + ts_pkg_file = os.path.join( + RW_TRAFSINK_PKG_INSTALL_DIR, + "trafsink_vnfd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(ts_pkg_file): + raise_package_error() + + return ts_pkg_file + +@pytest.fixture(scope='session') +def tg_vrouter_ts_nsd_package_file(): + tg_vrouter_ts_nsd_pkg_file = os.path.join( + RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR, + "tg_vrouter_ts_nsd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(tg_vrouter_ts_nsd_pkg_file): + raise_package_error() + + return tg_vrouter_ts_nsd_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "TG-Vrouter-TS-EPA-SRIOV" + nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV" + nsr.description = "3 VNFs with Trafgen, Vrouter and Trafsink EPA SRIOV" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + #cloud_account.name = "cloudsim_proxy" + #cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "riftuser1" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file): + logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file) + trans_id = upload_descriptor(logger, tg_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should be one vnfds" + assert "trafgen_vnfd" in [vnfds[0].name] + + def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file): + logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file) + trans_id = upload_descriptor(logger, vrouter_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file): + logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file) + trans_id = upload_descriptor(logger, ts_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 3, "There should be three vnfds" + assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name] + + def test_onboard_tg_vrouter_ts_nsd(self, logger, nsd_proxy, tg_vrouter_ts_nsd_package_file): + logger.info("Onboarding tg_vrouter_ts nsd package: %s", tg_vrouter_ts_nsd_package_file) + trans_id = upload_descriptor(logger, tg_vrouter_ts_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "tg_vrouter_ts_nsd" + + def test_instantiate_tg_vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/racfg/launchpad_module_test.racfg b/modules/core/mano/rwlaunchpad/test/racfg/launchpad_module_test.racfg new file mode 100644 index 0000000..9a13244 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/racfg/launchpad_module_test.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_LAUNCHPAD_MODULE_0100", + "commandline":"./launchpad_module_test", + "target_vm":"VM", + "test_description":"Module test for launchpad", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"], + "timelimit": 600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/test/utest_rwmonitor.py b/modules/core/mano/rwlaunchpad/test/utest_rwmonitor.py new file mode 100755 index 0000000..e692956 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/utest_rwmonitor.py @@ -0,0 +1,454 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import asyncio +import concurrent.futures +import logging +import os +import sys +import unittest +import uuid +import xmlrunner + +import gi +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwTypes', '1.0') + +from gi.repository import ( + RwcalYang, + RwVnfrYang, + RwTypes, + VnfrYang, + NsrYang, + ) + +from rift.tasklets.rwmonitor.core import (RecordManager, NfviMetricsAggregator) + + +class MockTasklet(object): + def __init__(self, dts, log, loop, records): + self.dts = dts + self.log = log + self.loop = loop + self.records = records + self.polling_period = 0 + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=16) + + +def make_nsr(ns_instance_config_ref=str(uuid.uuid4())): + nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr() + nsr.ns_instance_config_ref = ns_instance_config_ref + return nsr + +def make_vnfr(id=str(uuid.uuid4())): + vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr() + vnfr.id = id + return vnfr + +def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())): + vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur() + vdur.id = id + vdur.vim_id = vim_id + return vdur + + +class MockNfviMonitorPlugin(object): + def __init__(self): + self.metrics = dict() + + def nfvi_metrics(self, account, vim_id): + key = (account, vim_id) + + if key in self.metrics: + return RwTypes.RwStatus.SUCCESS, self.metrics[key] + + metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + status = RwTypes.RwStatus.FAILURE + + return status, metrics + + +class TestAggregator(unittest.TestCase): + """ + Ths NfviMetricsAggregator queries NFVI metrics from VIM components and + aggregates the data are the VNF and NS levels. This test case validates + that the aggregation happens as expected. + """ + + def setUp(self): + self.nfvi_monitor = MockNfviMonitorPlugin() + self.cloud_account = RwcalYang.CloudAccount( + name="test-account", + account_type="mock", + ), + + # Create a simple record hierarchy to represent the system + self.records = RecordManager() + + nsr = make_nsr('test-nsr') + + vnfr_1 = make_vnfr('test-vnfr-1') + vnfr_2 = make_vnfr('test-vnfr-1') + + vdur_1 = make_vdur(vim_id='test-vdur-1') + vdur_1.vm_flavor.vcpu_count = 4 + vdur_1.vm_flavor.memory_mb = 16e3 + vdur_1.vm_flavor.storage_gb = 1e3 + + vdur_2 = make_vdur(vim_id='test-vdur-2') + vdur_2.vm_flavor.vcpu_count = 4 + vdur_2.vm_flavor.memory_mb = 16e3 + vdur_2.vm_flavor.storage_gb = 1e3 + + vdur_3 = make_vdur(vim_id='test-vdur-3') + vdur_3.vm_flavor.vcpu_count = 8 + vdur_3.vm_flavor.memory_mb = 32e3 + vdur_3.vm_flavor.storage_gb = 1e3 + + nsr.constituent_vnfr_ref.append(vnfr_1.id) + nsr.constituent_vnfr_ref.append(vnfr_2.id) + + vnfr_1.vdur.append(vdur_1) + vnfr_1.vdur.append(vdur_2) + vnfr_2.vdur.append(vdur_3) + + self.records.add_nsr(nsr) + self.records.add_vnfr(vnfr_1) + self.records.add_vnfr(vnfr_2) + + # Populate the NFVI monitor with static data + vdu_metrics_1 = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + vdu_metrics_1.vcpu.utilization = 10.0 + vdu_metrics_1.memory.used = 2e9 + vdu_metrics_1.storage.used = 1e10 + vdu_metrics_1.network.incoming.bytes = 1e5 + vdu_metrics_1.network.incoming.packets = 1e3 + vdu_metrics_1.network.incoming.byte_rate = 1e6 + vdu_metrics_1.network.incoming.packet_rate = 1e4 + vdu_metrics_1.network.outgoing.bytes = 1e5 + vdu_metrics_1.network.outgoing.packets = 1e3 + vdu_metrics_1.network.outgoing.byte_rate = 1e6 + vdu_metrics_1.network.outgoing.packet_rate = 1e4 + + vdu_metrics_2 = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + vdu_metrics_2.vcpu.utilization = 10.0 + vdu_metrics_2.memory.used = 2e9 + vdu_metrics_2.storage.used = 1e10 + vdu_metrics_2.network.incoming.bytes = 1e5 + vdu_metrics_2.network.incoming.packets = 1e3 + vdu_metrics_2.network.incoming.byte_rate = 1e6 + vdu_metrics_2.network.incoming.packet_rate = 1e4 + vdu_metrics_2.network.outgoing.bytes = 1e5 + vdu_metrics_2.network.outgoing.packets = 1e3 + vdu_metrics_2.network.outgoing.byte_rate = 1e6 + vdu_metrics_2.network.outgoing.packet_rate = 1e4 + + vdu_metrics_3 = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + vdu_metrics_3.vcpu.utilization = 20.0 + vdu_metrics_3.memory.used = 28e9 + vdu_metrics_3.storage.used = 1e10 + vdu_metrics_3.network.incoming.bytes = 1e5 + vdu_metrics_3.network.incoming.packets = 1e3 + vdu_metrics_3.network.incoming.byte_rate = 1e6 + vdu_metrics_3.network.incoming.packet_rate = 1e4 + vdu_metrics_3.network.outgoing.bytes = 1e5 + vdu_metrics_3.network.outgoing.packets = 1e3 + vdu_metrics_3.network.outgoing.byte_rate = 1e6 + vdu_metrics_3.network.outgoing.packet_rate = 1e4 + + metrics = self.nfvi_monitor.metrics + metrics[(self.cloud_account, vdur_1.vim_id)] = vdu_metrics_1 + metrics[(self.cloud_account, vdur_2.vim_id)] = vdu_metrics_2 + metrics[(self.cloud_account, vdur_3.vim_id)] = vdu_metrics_3 + + def test_aggregation(self): + """ + The hierarchy of the network service tested here is, + + test-nsr + |-- test-vnfr-1 + | |-- test-vdur-1 + | \-- test-vdur-2 + \-- test-vnfr-2 + \-- test-vdur-3 + + """ + loop = asyncio.get_event_loop() + + tasklet = MockTasklet( + dts=None, + loop=loop, + log=logging.getLogger(), + records=self.records, + ) + + # Create an instance of the NfviMetricsAggregator using a mock cloud + # account and NFVI monitor + aggregator = NfviMetricsAggregator( + tasklet=tasklet, + cloud_account=self.cloud_account, + nfvi_monitor=self.nfvi_monitor, + ) + + # Run the event loop to retrieve the metrics from the aggregator + task = loop.create_task(aggregator.request_ns_metrics('test-nsr')) + loop.run_until_complete(task) + + ns_metrics = task.result() + + # Validate the metrics returned by the aggregator + self.assertEqual(ns_metrics.vm.active_vm, 3) + self.assertEqual(ns_metrics.vm.inactive_vm, 0) + + self.assertEqual(ns_metrics.vcpu.total, 16) + self.assertEqual(ns_metrics.vcpu.utilization, 15.0) + + self.assertEqual(ns_metrics.memory.used, 32e9) + self.assertEqual(ns_metrics.memory.total, 64e9) + self.assertEqual(ns_metrics.memory.utilization, 50.0) + + self.assertEqual(ns_metrics.storage.used, 30e9) + self.assertEqual(ns_metrics.storage.total, 3e12) + self.assertEqual(ns_metrics.storage.utilization, 1.0) + + self.assertEqual(ns_metrics.network.incoming.bytes, 3e5) + self.assertEqual(ns_metrics.network.incoming.packets, 3e3) + self.assertEqual(ns_metrics.network.incoming.byte_rate, 3e6) + self.assertEqual(ns_metrics.network.incoming.packet_rate, 3e4) + + self.assertEqual(ns_metrics.network.outgoing.bytes, 3e5) + self.assertEqual(ns_metrics.network.outgoing.packets, 3e3) + self.assertEqual(ns_metrics.network.outgoing.byte_rate, 3e6) + self.assertEqual(ns_metrics.network.outgoing.packet_rate, 3e4) + + def test_publish_nfvi_metrics(self): + loop = asyncio.get_event_loop() + + class RegistrationHandle(object): + """ + Normally the aggregator uses the DTS RegistrationHandle to publish + the NFVI metrics. This placeholder class is used to record the + first NFVI metric data published by the aggregator, and then + removes the NSR so that the aggregator terminates. + + """ + + def __init__(self, test): + self.xpath = None + self.data = None + self.test = test + + def deregister(self): + pass + + def create_element(self, xpath, data): + pass + + def update_element(self, xpath, data): + # Record the results + self.xpath = xpath + self.data = data + + # Removing the NSR from the record manager will cause the + # coroutine responsible for publishing the NFVI metric data to + # terminate + self.test.records.remove_nsr('test-nsr') + + @asyncio.coroutine + def delete_element(self, xpath): + assert xpath == self.xpath + + class Dts(object): + """ + Placeholder Dts class that is used solely for the purpose of + returning a RegistrationHandle to the aggregator. + + """ + def __init__(self, test): + self.handle = RegistrationHandle(test) + + @asyncio.coroutine + def register(self, *args, **kwargs): + return self.handle + + dts = Dts(self) + + tasklet = MockTasklet( + dts=dts, + loop=loop, + log=logging.getLogger(), + records=self.records, + ) + + # Create an instance of the NfviMetricsAggregator using a mock cloud + # account and NFVI monitor + aggregator = NfviMetricsAggregator( + tasklet=tasklet, + cloud_account=self.cloud_account, + nfvi_monitor=self.nfvi_monitor, + ) + + # Create a coroutine wrapper to timeout the test if it takes too long. + @asyncio.coroutine + def timeout_wrapper(): + coro = aggregator.publish_nfvi_metrics('test-nsr') + yield from asyncio.wait_for(coro, timeout=1) + + loop.run_until_complete(timeout_wrapper()) + + # Verify the data published by the aggregator + self.assertEqual(dts.handle.data.vm.active_vm, 3) + self.assertEqual(dts.handle.data.vm.inactive_vm, 0) + + self.assertEqual(dts.handle.data.vcpu.total, 16) + self.assertEqual(dts.handle.data.vcpu.utilization, 15.0) + + self.assertEqual(dts.handle.data.memory.used, 32e9) + self.assertEqual(dts.handle.data.memory.total, 64e9) + self.assertEqual(dts.handle.data.memory.utilization, 50.0) + + self.assertEqual(dts.handle.data.storage.used, 30e9) + self.assertEqual(dts.handle.data.storage.total, 3e12) + self.assertEqual(dts.handle.data.storage.utilization, 1.0) + + self.assertEqual(dts.handle.data.network.incoming.bytes, 3e5) + self.assertEqual(dts.handle.data.network.incoming.packets, 3e3) + self.assertEqual(dts.handle.data.network.incoming.byte_rate, 3e6) + self.assertEqual(dts.handle.data.network.incoming.packet_rate, 3e4) + + self.assertEqual(dts.handle.data.network.outgoing.bytes, 3e5) + self.assertEqual(dts.handle.data.network.outgoing.packets, 3e3) + self.assertEqual(dts.handle.data.network.outgoing.byte_rate, 3e6) + self.assertEqual(dts.handle.data.network.outgoing.packet_rate, 3e4) + + +class TestRecordManager(unittest.TestCase): + def setUp(self): + pass + + def test_add_and_remove_nsr(self): + records = RecordManager() + + # Create an empty NSR and add it to the record manager + nsr = make_nsr() + records.add_nsr(nsr) + + # The record manager should ignore this NSR because it contains no + # VNFRs + self.assertFalse(records.has_nsr(nsr.ns_instance_config_ref)) + + + # Now add a VNFR (with a VDUR) to the NSR and, once again, add it to + # the record manager + vdur = make_vdur() + vnfr = make_vnfr() + + vnfr.vdur.append(vdur) + + nsr.constituent_vnfr_ref.append(vnfr.id) + records.add_nsr(nsr) + + # The mapping from the NSR to the VNFR has been added, but the + # relationship between the VNFR and the VDUR is not added. + self.assertTrue(records.has_nsr(nsr.ns_instance_config_ref)) + self.assertFalse(records.has_vnfr(vnfr.id)) + + + # Try adding the same NSR again. The record manager should be + # unchanged. + records.add_nsr(nsr) + + self.assertEqual(1, len(records._nsr_to_vnfrs.keys())) + self.assertEqual(1, len(records._nsr_to_vnfrs.values())) + + + # Now remove the NSR and check that the internal structures have been + # properly cleaned up. + records.remove_nsr(nsr.ns_instance_config_ref) + + self.assertFalse(records.has_nsr(nsr.ns_instance_config_ref)) + self.assertFalse(records.has_vnfr(vnfr.id)) + + def test_add_and_remove_vnfr(self): + records = RecordManager() + + # Create an empty VNFR and add it to the record manager + vnfr = make_vnfr() + records.add_vnfr(vnfr) + + # The record manager should ignore this VNFR because it contains no + # VDURs + self.assertFalse(records.has_vnfr(vnfr.id)) + + + # Now add a VDUR to the VNFR and, once again, add it to the record + # manager. + vdur = make_vdur() + vnfr.vdur.append(vdur) + + records.add_vnfr(vnfr) + + # The mapping from the VNFR to the VDUR has been added, and the VDUR + # has been added the internal dictionary for mapping a vim_id to a + # VDUR. + self.assertTrue(records.has_vnfr(vnfr.id)) + self.assertIn(vdur.vim_id, records._vdurs) + + + # Try adding the same VNFR again. The record manager should be + # unchanged. + records.add_vnfr(vnfr) + + self.assertEqual(1, len(records._vnfr_to_vdurs.keys())) + self.assertEqual(1, len(records._vnfr_to_vdurs.values())) + self.assertEqual(1, len(records._vdurs)) + + + # Now remove the VNFR and check that the internal structures have been + # properly cleaned up. + records.remove_vnfr(vnfr.id) + + self.assertFalse(records.has_vnfr(vnfr.id)) + self.assertNotIn(vdur.vim_id, records._vdurs) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='TEST %(message)s') + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + + args = parser.parse_args(argv) + + # Set the global logging level + logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR) + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main(argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner( + output=os.environ["RIFT_MODULE_TEST"])) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/utest_rwnsm.py b/modules/core/mano/rwlaunchpad/test/utest_rwnsm.py new file mode 100755 index 0000000..44e6dda --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/utest_rwnsm.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import logging +import os +import sys +import unittest +import uuid +import xmlrunner + +from gi.repository import ( + NsdYang, + NsrYang, + ) + +logger = logging.getLogger('test-rwnsmtasklet') + +import rift.tasklets.rwnsmtasklet.rwnsmtasklet as rwnsmtasklet +import rift.tasklets.rwnsmtasklet.xpath as rwxpath + +class TestGiXpath(unittest.TestCase): + def setUp(self): + rwxpath.reset_cache() + + def test_nsd_elements(self): + """ + Test that a particular element in a list is corerctly retrieved. In + this case, we are trying to retrieve an NSD from the NSD catalog. + + """ + # Create the initial NSD catalog + nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog() + + # Create an NSD, set its 'id', and add it to the catalog + nsd_id = str(uuid.uuid4()) + nsd_catalog.nsd.append( + NsdYang.YangData_Nsd_NsdCatalog_Nsd( + id=nsd_id, + ) + ) + + # Retrieve the NSD using and xpath expression + xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id) + nsd = rwxpath.getxattr(nsd_catalog, xpath) + + self.assertEqual(nsd_id, nsd.id) + + # Modified the name of the NSD using an xpath expression + rwxpath.setxattr(nsd_catalog, xpath + "/nsd:name", "test-name") + + name = rwxpath.getxattr(nsd_catalog, xpath + "/nsd:name") + self.assertEqual("test-name", name) + + def test_nsd_scalar_fields(self): + """ + Test that setxattr correctly sets the value specified by an xpath. + + """ + # Define a simple NSD + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + + # Check that the unset fields are in fact set to None + self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name")) + self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name")) + + # Set the values of the 'name' and 'short-name' fields + rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name") + rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name") + + # Check that the 'name' and 'short-name' fields are correctly set + self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name")) + self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name")) + + +class TestInputParameterSubstitution(unittest.TestCase): + def setUp(self): + self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger) + + def test_null_arguments(self): + """ + If None is passed to the substitutor for either the NSD or the NSR + config, no exception should be raised. + + """ + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + + self.substitute_input_parameters(None, None) + self.substitute_input_parameters(nsd, None) + self.substitute_input_parameters(None, nsr_config) + + def test_illegal_input_parameter(self): + """ + In the NSD there is a list of the parameters that are allowed to be + sbustituted by input parameters. This test checks that when an input + parameter is provided in the NSR config that is not in the NSD, it is + not applied. + + """ + # Define the original NSD + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsd.name = "robert" + nsd.short_name = "bob" + + # Define which parameters may be modified + nsd.input_parameter_xpath.append( + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + label="NSD Name", + ) + ) + + # Define the input parameters that are intended to be modified + nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr_config.input_parameter.extend([ + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + value="alice", + ), + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name", + value="alice", + ), + ]) + + self.substitute_input_parameters(nsd, nsr_config) + + # Verify that only the parameter in the input_parameter_xpath list is + # modified after the input parameters have been applied. + self.assertEqual("alice", nsd.name) + self.assertEqual("bob", nsd.short_name) + + def test_substitution(self): + """ + Test that substitution of input parameters occurs as expected. + + """ + # Define the original NSD + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsd.name = "robert" + nsd.short_name = "bob" + + # Define which parameters may be modified + nsd.input_parameter_xpath.extend([ + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + label="NSD Name", + ), + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name", + label="NSD Short Name", + ), + ]) + + # Define the input parameters that are intended to be modified + nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr_config.input_parameter.extend([ + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + value="robert", + ), + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name", + value="bob", + ), + ]) + + self.substitute_input_parameters(nsd, nsr_config) + + # Verify that both the 'name' and 'short-name' fields are correctly + # replaced. + self.assertEqual("robert", nsd.name) + self.assertEqual("bob", nsd.short_name) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='TEST %(message)s') + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + + args = parser.parse_args(argv) + + # Set the global logging level + logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.FATAL) + + # Make the test logger very quiet + logger.addHandler(logging.NullHandler()) + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main(argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner( + output=os.environ["RIFT_MODULE_TEST"])) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/utest_uploader.py b/modules/core/mano/rwlaunchpad/test/utest_uploader.py new file mode 100755 index 0000000..058ab18 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/utest_uploader.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import io +import logging +import os +import random +import string +import sys +import unittest +import xmlrunner + +from rift.tasklets.rwlaunchpad.uploader import ( + boundary_search, + extract_package, + ) + + +message_template = """ +---------------------------------------- +POST /{url} HTTP/1.1 +User-Agent: curl/7.32.0 +Host: localhost:1337 +Accept: */* +Content-Length: {length} +Expect: 100-continue +Content-Type: multipart/form-data; boundary={boundary} + +{boundary} +Content-Disposition: form-data; name=descriptor + +{binary} +{boundary}-- +""" + +def random_string(ncharacters): + refs = string.ascii_lowercase + '\n' + return ''.join(random.choice(refs) for _ in range(ncharacters)) + +class TestBoundarySearch(unittest.TestCase): + """ + The boundary_search function is used to efficiently search for a boundary + string of a message that has been saved to file. In searches the file, + without loading it all into memory. + + """ + def setUp(self): + self.log = logging.getLogger('test') + self.boundary = "------------------------test-boundary" + + def test(self): + """ + Create a message that contains 3 instance of the boundary interspersed + with random characters. The message is presented to the boundary_search + function as a BytesIO so that it can treat it was a file. + + """ + # Construct the message + message = self.boundary + message += random_string(32) + message += self.boundary + message += random_string(64) + message += self.boundary + + # Search for the boundaries + indices = boundary_search(io.BytesIO(message.encode()), self.boundary) + + # Check the results + self.assertEqual(0, indices[0]) + self.assertEqual(32 + len(self.boundary), indices[1]) + self.assertEqual(96 + 2 * len(self.boundary), indices[2]) + + +class TestExtractPackage(unittest.TestCase): + def setUp(self): + self.log = logging.getLogger('devnull') + self.log.addHandler(logging.NullHandler()) + self.boundary = "------------------------test-boundary" + self.pkgfile = "/tmp/test-extract-package" + self.package = random_string(128) + self.url = "api/upload" + + def test(self): + """ + This test takes a known message (form-data) and extract the 'package' + data from it. + + """ + try: + message = message_template.format( + length=len(self.package), + boundary=self.boundary, + binary=self.package, + url=self.url, + ) + + extract_package( + self.log, + io.BytesIO(message.encode()), + self.boundary, + self.pkgfile, + ) + + # Read the package file that is extracted to disk, and compare it with + # the expected data. + with open(self.pkgfile) as fp: + for u, v in zip(fp.readline(), self.package): + self.assertEqual(u, v) + + finally: + # Cleanup possible files + if os.path.exists(self.package): + os.remove(self.package) + + if os.path.exists(self.package + ".partial"): + os.remove(self.package + ".partial") + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='TEST %(message)s') + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + + args = parser.parse_args(argv) + + # Set the global logging level + logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR) + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main(argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner( + output=os.environ["RIFT_MODULE_TEST"])) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/CMakeLists.txt b/modules/core/mano/rwmc/CMakeLists.txt new file mode 100644 index 0000000..f1a5c92 --- /dev/null +++ b/modules/core/mano/rwmc/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwmc) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + ra + test + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +install( + PROGRAMS + bin/cloudsim_http_proxy.sh + DESTINATION usr/bin + COMPONENT ${PKG_LONG_NAME} +) diff --git a/modules/core/mano/rwmc/Makefile b/modules/core/mano/rwmc/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/bin/cloudsim_http_proxy.sh b/modules/core/mano/rwmc/bin/cloudsim_http_proxy.sh new file mode 100755 index 0000000..f2319d4 --- /dev/null +++ b/modules/core/mano/rwmc/bin/cloudsim_http_proxy.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +function die { + echo "$@" + exit 1 +} + +which tinyproxy 2>/dev/null || die "You must install tinyproxy (sudo yum install tinyproxy)" + +tiny_cfg=$(mktemp) + +trap "rm ${tiny_cfg}" EXIT + +# Some default tinyproxy config to act as a very simple http proxy +cat << EOF > ${tiny_cfg} +User tinyproxy +Group tinyproxy +Port 9999 +Timeout 600 +DefaultErrorFile "/usr/share/tinyproxy/default.html" +StatFile "/usr/share/tinyproxy/stats.html" +LogFile "/var/log/tinyproxy/tinyproxy.log" +LogLevel Info +PidFile "/run/tinyproxy/tinyproxy.pid" +MaxClients 100 +MinSpareServers 5 +MaxSpareServers 20 +StartServers 10 +MaxRequestsPerChild 0 +ViaProxyName "tinyproxy" +EOF + +echo "Running TinyProxy in the foreground. Ctrl-C to exit." +tinyproxy -c ${tiny_cfg} -d \ No newline at end of file diff --git a/modules/core/mano/rwmc/include/riftware/rwmc_log.h b/modules/core/mano/rwmc/include/riftware/rwmc_log.h new file mode 100644 index 0000000..bd6f00e --- /dev/null +++ b/modules/core/mano/rwmc/include/riftware/rwmc_log.h @@ -0,0 +1,40 @@ +/* * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ + + +/** + * @file rwmc_log.h + * @author Anil Gunturu (anil.gunturu@riftio.com) + * @date 08/14/2015 + * @brief Internal logging macros for rwmc + * + */ + +#include "rw-mc-log.pb-c.h" +#include "rw-log.pb-c.h" +#include "rwlog.h" + +// logging macros +#define RWMC_LOG_HANDLE(_inst) \ + ((_inst)->rwtasklet_info->rwlog_instance) + +#define RWMC_LOG_EVENT(__inst__, __evt__, ...) \ + RWLOG_EVENT(RWMC_LOG_HANDLE(__inst__), RwMcLog_notif_##__evt__, __VA_ARGS__) diff --git a/modules/core/mano/rwmc/include/riftware/rwmctasklet.h b/modules/core/mano/rwmc/include/riftware/rwmctasklet.h new file mode 100644 index 0000000..057cfd4 --- /dev/null +++ b/modules/core/mano/rwmc/include/riftware/rwmctasklet.h @@ -0,0 +1,84 @@ +/* * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + * + */ + + +#ifndef __rwmctasklet_H__ +#define __rwmctasklet_H__ + +#include +#include "rwtasklet.h" +#include "rwdts.h" +#include "rwmc_log.h" + +struct rwmctasklet_component_s { + CFRuntimeBase _base; + /* ADD ADDITIONAL FIELDS HERE */ +}; + +RW_TYPE_DECL(rwmctasklet_component); +RW_CF_TYPE_EXTERN(rwmctasklet_component_ptr_t); + + +struct rwmctasklet_instance_s { + CFRuntimeBase _base; + rwtasklet_info_ptr_t rwtasklet_info; + rwmctasklet_component_ptr_t component; + + rwdts_member_reg_handle_t dts_member_handle; + rwdts_api_t *dts_h; + rwdts_appconf_t *dts_mgmt_handle; + + + + /* ADD ADDITIONAL FIELDS HERE */ +}; + +struct rwmctasklet_scratchpad_s { + char reason[256]; + struct rwmctasklet_instance_s *instance; +}; +RW_TYPE_DECL(rwmctasklet_scratchpad); +RW_CF_TYPE_EXTERN(rwmctasklet_scratchpad_ptr_t); + + +RW_TYPE_DECL(rwmctasklet_instance); +RW_CF_TYPE_EXTERN(rwmctasklet_instance_ptr_t); + +rwmctasklet_component_ptr_t rwmctasklet_component_init(void); + +void rwmctasklet_component_deinit(rwmctasklet_component_ptr_t component); + +rwmctasklet_instance_ptr_t rwmctasklet_instance_alloc( + rwmctasklet_component_ptr_t component, + struct rwtasklet_info_s * rwtasklet_info, + RwTaskletPlugin_RWExecURL *instance_url); + +void rwmctasklet_instance_free( + rwmctasklet_component_ptr_t component, + rwmctasklet_instance_ptr_t instance); + +void rwmctasklet_instance_start( + rwmctasklet_component_ptr_t component, + rwmctasklet_instance_ptr_t instance); + +#endif //__rwmctasklet_H__ + diff --git a/modules/core/mano/rwmc/plugins/CMakeLists.txt b/modules/core/mano/rwmc/plugins/CMakeLists.txt new file mode 100644 index 0000000..dd64b02 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/CMakeLists.txt @@ -0,0 +1,19 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + rwmctasklet + yang + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwmc/plugins/Makefile b/modules/core/mano/rwmc/plugins/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/plugins/cli/cli_rwmc.xml b/modules/core/mano/rwmc/plugins/cli/cli_rwmc.xml new file mode 100644 index 0000000..ff12cc2 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/cli/cli_rwmc.xml @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/cli/cli_rwmc_schema_listing.txt b/modules/core/mano/rwmc/plugins/cli/cli_rwmc_schema_listing.txt new file mode 100644 index 0000000..402c281 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/cli/cli_rwmc_schema_listing.txt @@ -0,0 +1,54 @@ +ietf-inet-types +ietf-l2-topology +ietf-netconf-notifications +ietf-network +ietf-network-topology +ietf-restconf-monitoring +ietf-yang-types +mano-types +nsd +nsr +rw-base +rwcal +rw-cli-ext +rw-cloud +rw-config-agent +rw-conman +rw-debug +rw-dts +rw-dtsperf +rw-dtsperfmgr +rw-launchpad +rw-log +rwlog-mgmt +rw-manifest +rw-mc +rw-memlog +rw-mgmtagt +rw-mgmt-schema +rwmsg-data +rw-netconf +rw-notify-ext +rw-nsd +rw-nsm +rw-nsr +rw-pb-ext +rw-resource-mgr +rw-restportforward +rwsdn +rw-sdn +rwshell-mgmt +rw-sorch +rw-topology +rw-vcs +rwvcs-types +rw-vld +rw-vlr +rw-vnfd +rw-vnfr +rw-yang-types +vld +vlr +vnfd +vnffgd +vnfr diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/CMakeLists.txt b/modules/core/mano/rwmc/plugins/rwmctasklet/CMakeLists.txt new file mode 100644 index 0000000..72b5aa7 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwmctasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME}-python ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/launchpad.py + rift/tasklets/${TASKLET_NAME}/salt.py + rift/tasklets/${TASKLET_NAME}/util.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/Makefile b/modules/core/mano/rwmc/plugins/rwmctasklet/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/__init__.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/__init__.py new file mode 100644 index 0000000..388fbaf --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwmctasklet import MissionControlTasklet +from . import launchpad diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/launchpad.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/launchpad.py new file mode 100644 index 0000000..e52afe6 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/launchpad.py @@ -0,0 +1,495 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import ncclient +import ncclient.asyncio_manager +import os +import time +from datetime import timedelta + +from . import salt + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +from gi.repository import RwYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang, RwLaunchpadYang + + +class JobNotStarted(Exception): + pass + + +class LaunchpadStartError(Exception): + pass + + +class LaunchpadConfigError(Exception): + pass + + +class Launchpad(object): + def __init__(self, mgmt_domain_name, node_id=None, ip_address=None): + self._mgmt_domain_name = mgmt_domain_name + self._node_id = node_id + self._ip_address = ip_address + + def __repr__(self): + return "Launchpad(mgmt_domain_name={}, node_id={}, ip_address={})".format( + self._mgmt_domain_name, self._node_id, self._ip_address + ) + + @property + def ip_address(self): + return self._ip_address + + @ip_address.setter + def ip_address(self, ip_address): + self._ip_address = ip_address + + @property + def node_id(self): + return self._node_id + + @node_id.setter + def node_id(self, node_id): + self._node_id = node_id + + @property + def mgmt_domain_name(self): + return self._mgmt_domain_name + + @property + def exe_path(self): + return "{}/demos/launchpad.py".format(os.environ["RIFT_INSTALL"]) + + @property + def args(self): + return "-m ethsim --ip-list=\"{}\"".format(self.ip_address) + + +class LaunchpadConfigurer(object): + NETCONF_PORT=2022 + NETCONF_USER="admin" + NETCONF_PW="admin" + + def __init__(self, log, loop, launchpad, vm_pool_mgr, network_pool_mgr): + self._log = log + self._loop = loop + self._launchpad = launchpad + self._vm_pool_mgr = vm_pool_mgr + self._network_pool_mgr = network_pool_mgr + + self._manager = None + + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(RwCloudYang.get_schema()) + self._model.load_schema_ypbc(RwBaseYang.get_schema()) + self._model.load_schema_ypbc(RwResourceMgrYang.get_schema()) + self._model.load_schema_ypbc(RwNsmYang.get_schema()) + self._model.load_schema_ypbc(RwConmanYang.get_schema()) + self._model.load_schema_ypbc(RwLaunchpadYang.get_schema()) + self._cloud_account = None + + @staticmethod + def wrap_netconf_config_xml(xml): + xml = '{}'.format(xml) + return xml + + @asyncio.coroutine + def _connect(self, timeout_secs=240): + + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + + try: + self._log.debug("Attemping Launchpad netconf connection.") + + manager = yield from ncclient.asyncio_manager.asyncio_connect( + loop=self._loop, + host=self._launchpad.ip_address, + port=LaunchpadConfigurer.NETCONF_PORT, + username=LaunchpadConfigurer.NETCONF_USER, + password=LaunchpadConfigurer.NETCONF_PW, + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + + return manager + + except ncclient.transport.errors.SSHError as e: + self._log.warning("Netconf connection to launchpad %s failed: %s", + self._launchpad, str(e)) + + yield from asyncio.sleep(5, loop=self._loop) + + raise LaunchpadConfigError("Failed to connect to Launchpad within %s seconds" % + timeout_secs) + + @asyncio.coroutine + def _configure_launchpad_mode(self): + """ configure launchpad mode """ + cfg = RwLaunchpadYang.YangData_RwLaunchpad_LaunchpadConfig.from_dict({'operational_mode': 'MC_MANAGED'}) + xml = cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending launchpad mode config xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config(target="running", + config=netconf_xml,) + + self._log.debug("Received edit config response: %s", str(response)) + + @asyncio.coroutine + def _configure_service_orchestrator(self): + @asyncio.coroutine + def configure_service_orchestrator_endpoint(): + """ Configure Service Orchestrator Information to NSM Tasklet""" + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + + xml = cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending cm-endpoint config xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config(target="running", + config=netconf_xml,) + self._log.debug("Received edit config response: %s", str(response)) + + @asyncio.coroutine + def configure_resource_orchestrator_endpoint(): + """ Configure Resource Orchestrator Information to SO Tasklet""" + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + xml = cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending ro-endpoint config xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config(target="running", + config=netconf_xml,) + self._log.debug("Received edit config response: %s", str(response)) + + yield from configure_service_orchestrator_endpoint() + yield from configure_resource_orchestrator_endpoint() + + + @asyncio.coroutine + def _configure_cloud_account(self, cloud_account): + self._log.debug("Configuring launchpad %s cloud account: %s", + self._launchpad, cloud_account) + + cloud_account_cfg = RwCloudYang.CloudAccount.from_dict( + cloud_account.account.as_dict() + ) + + xml = cloud_account_cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending configure cloud account xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config( + target="running", + config=netconf_xml, + ) + + self._log.debug("Received edit config response: %s", str(response)) + + @asyncio.coroutine + def _wait_until_system_ready(self, timeout_secs=60): + self._log.debug("Waiting for all tasklets in launchpad %s to be ready", self._launchpad) + + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + yield from asyncio.sleep(1, loop=self._loop) + + if self._manager is None: + self._log.info("Reconnecting to launchpad") + self._manager = yield from self._connect() + + try: + response = yield from self._manager.get(('xpath', '/vcs/info')) + except (ncclient.NCClientError, ncclient.operations.errors.TimeoutExpiredError) as e: + self._log.error("Caught error when requesting tasklet info: %s", str(e)) + self._manager = None + continue + + try: + response_xml = response.data_xml.decode() + except Exception as e: + self._log.error("ncclient_manager failed to decode xml: %s", str(e)) + self._log.error("raw ncclient response: %s", response.xml) + continue + + response_xml = response_xml[response_xml.index(' len(self._unallocated_ids): + raise AllocateError("Not enough unallocated resources in pool %s. " + "(num_resources: %s, num_unallocated: %s)", + self, len(self._resource_ids), len(self._unallocated_ids)) + + ids = [] + for i in range(num): + ids.append(self._unallocated_ids.pop()) + + return ids + + def deallocate(self, resource_ids): + for id in resource_ids: + if id not in self._resource_ids: + raise DeallocateError("Unknown resource id: %s", id) + + for id in resource_ids: + self._unallocated_ids.add(id) + + +class PoolResourceAllocator(object): + def __init__(self, log, loop, mgmt_domain, pool_mgr, num_allocate): + self._log = log + self._loop = loop + self._mgmt_domain = mgmt_domain + self._pool_mgr = pool_mgr + self._num_allocate = num_allocate + + self._pool = None + self._resources = None + + def __del__(self): + if self._resources is not None: + self.deallocate() + + @property + def pool(self): + return self._pool + + @property + def resources(self): + return self._resources + + def has_static_resources(self): + for pool in self._pool_mgr.list_mgmt_domain_pools(self._mgmt_domain.name): + if pool.resource_ids: + return True + + return False + + def get_cloud_account(self): + for pool in self._pool_mgr.list_mgmt_domain_pools(self._mgmt_domain.name): + return pool.cloud_account + + raise CloudAccountError("Could not find cloud account associated with mgmt_domain: %s", + self._mgmt_domain.name) + + @asyncio.coroutine + def allocate(self): + self._log.info("Entered Pool Resource allocate") + if self.pool is not None or self.resources is not None: + raise AllocateError("Resources already allocated") + + self._log.info("Starting %s pool allocation for %s resouces", + self._pool, self._num_allocate) + while self._resources is None: + self._log.info("Pool resources is None, waiting for resources to allocate") + # Add a callback notification to the pool for when resources + # are available. + yield from asyncio.sleep(3, loop=self._loop) + + try: + current_pools = self._pool_mgr.list_mgmt_domain_pools(self._mgmt_domain.name) + except Exception as e: + self._log.warning("Mgmt Domain lookup may have failed (possibly due to mgmt-domain being deleted) , current_pools: %s", current_pools) + break + + for pool in current_pools: + try: + self._resources = pool.allocate(self._num_allocate) + self._pool = pool + except AllocateError as e: + self._log.debug("Could not allocate resources from pool %s: %s", + pool, str(e)) + + return self._resources + + def deallocate(self): + if self._resources is None: + self._log.warning("Nothing to deallocate") + return + + self._pool.deallocate(self._resources) + + self._resources = None + self._pool = None + + +class ResourcePoolManager(object): + def __init__(self, log, mgmt_domains, cloud_accounts): + self._log = log + self._mgmt_domains = mgmt_domains + self._cloud_accounts = cloud_accounts + + self._resource_pools = {} + + @property + def id_field(self): + raise NotImplementedError() + + def list_cloud_resources(self, cloud_account): + raise NotImplementedError() + + def _find_resource_id_pool(self, resource_id, cloud_account): + for pool in self._resource_pools.values(): + if resource_id in pool.resource_ids: + return pool + + return None + + def _get_mgmt_domain(self, mgmt_domain_name): + try: + return self._mgmt_domains[mgmt_domain_name] + except KeyError as e: + raise MgmtDomainNotFound(e) + + def _get_cloud_account(self, cloud_account_name): + if cloud_account_name not in self._cloud_accounts: + raise CloudAccountNotFound("Cloud account name not found: %s", cloud_account_name) + + cloud_account = self._cloud_accounts[cloud_account_name] + + return cloud_account + + def _assign_resource_pool_mgmt_domain(self, pool, mgmt_domain): + try: + self._log.debug("Assigning pool (%s) to mgmt_domain (%s)", pool, mgmt_domain) + pool.mgmt_domain = mgmt_domain + except PoolError as e: + raise AssignResourceError(e) + + self._log.info("Assigned pool (%s) to mgmt_domain (%s)", pool, mgmt_domain) + + def _unassign_resource_pool_mgmt_domain(self, pool): + try: + mgmt_domain = pool.mgmt_domain + if mgmt_domain is None: + self._log.warning("Pool does not have a mgmt_domain assigned.") + return + + self._log.debug("Unassigning pool (%s) from mgmt_domain (%s)", pool, mgmt_domain) + pool.mgmt_domain = None + except PoolError as e: + raise AssignResourceError(e) + + self._log.info("Unassigned mgmt_domain (%s) from pool: %s", mgmt_domain, pool) + + def _assign_mgmt_domain(self, mgmt_domain, pool): + self._log.debug("Assigning pool %s to mgmt_domain %s", pool, mgmt_domain) + pool.mgmt_domain = mgmt_domain + + def _unassign_mgmt_domain(self, mgmt_domain, pool): + self._log.debug("Unassigning pool %s from mgmt_domain %s", pool, mgmt_domain) + pool.mgmt_domain = None + + def list_cloud_pools(self, cloud_account_name): + cloud_pools = [] + cloud_account = self._get_cloud_account(cloud_account_name) + for pool in self._resource_pools.values(): + if pool.cloud_account == cloud_account: + cloud_pools.append(pool) + + return cloud_pools + + def list_mgmt_domain_pools(self, mgmt_domain_name): + mgmt_domain_pools = [] + mgmt_domain = self._get_mgmt_domain(mgmt_domain_name) + for pool in self._resource_pools.values(): + if pool.mgmt_domain == mgmt_domain: + mgmt_domain_pools.append(pool) + + return mgmt_domain_pools + + def list_available_cloud_resources(self, cloud_account_name, cloud_resources=None): + cloud = self._get_cloud_account(cloud_account_name) + resources = [] + + # If cloud_resources wasn't passed in, then fetch the latest resources + # from the import cloud. + if cloud_resources is None: + cloud_resources = self.list_cloud_resources(cloud_account_name) + + for resource in cloud_resources: + if self._find_resource_id_pool( + getattr(resource, self.id_field), + cloud, + ) is None: + resources.append(resource) + + return resources + + def list_available_resources(self, pool_name, cloud_resources=None): + pool = self.get_pool(pool_name) + cloud_account = pool.cloud_account + + return self.list_available_cloud_resources(cloud_account.name, cloud_resources) + + def get_pool(self, pool_name): + try: + return self._resource_pools[pool_name] + except KeyError as e: + raise PoolNotFoundError(e) + + def delete_mgmt_domain_pool(self, mgmt_domain_name, pool_name): + mgmt_domain = self._get_mgmt_domain(mgmt_domain_name) + pool = self.get_pool(pool_name) + + self._log.debug("Deleting mgmt_domain %s pool: %s)", + mgmt_domain, pool) + + self._unassign_mgmt_domain(mgmt_domain, pool) + + def update_mgmt_domain_pools(self, mgmt_domain_name, pool_name): + mgmt_domain = self._get_mgmt_domain(mgmt_domain_name) + pool = self.get_pool(pool_name) + + self._log.debug("Updating mgmt_domain %s pools: %s", + mgmt_domain, pool) + + self._assign_mgmt_domain(mgmt_domain, pool) + + def add_id_to_pool(self, pool_name, resource_id): + pool = self.get_pool(pool_name) + resource_list = self.list_cloud_resources(pool.cloud_account.name) + resource_ids = [getattr(r, self.id_field) for r in resource_list] + if resource_id not in resource_ids: + msg = ("Could not find resource_id %s in cloud account %s" % + (resource_id, pool.cloud_account.name)) + raise AddResourceError(msg) + + find_pool = self._find_resource_id_pool(pool.cloud_account, resource_id) + if find_pool is not None: + msg = ("resource_id %s in cloud account %s already added to pool %s" % + (resource_id, pool.cloud_account.name, find_pool.name)) + raise AddResourceError(msg) + + self._log.debug("Adding id %s to pool %s", resource_id, pool) + pool.add_resource_id(resource_id) + + def remove_id_from_pool(self, pool_name, resource_id): + pool = self.get_pool(pool_name) + try: + self._log.debug("Removing id %s from pool %s", resource_id, pool) + pool.remove_resource_id(resource_id) + except ValueError as e: + self._log.error("Could not remove unknown resource_id(%s) from pool(%s)", + resource_id, pool_name) + raise RemoveResourceError(e) + + self._log.info("Removed Resource (%s) from pool: %s", resource_id, pool) + + def update_dynamic_scaling(self, pool_name, dynamic_scaling): + pool = self.get_pool(pool_name) + pool.set_dynamic_scaling(dynamic_scaling) + self._log.info("Updated Resource pool Dynamic Scaling to %s", dynamic_scaling) + + def add_resource_pool(self, pool_name, cloud_account_name, assigned_ids, is_dynamic_scaling): + if pool_name in self._resource_pools: + self._log.warning("Pool name already exists: %s" % pool_name) + return + + avail_resources = self.list_available_cloud_resources(cloud_account_name) + avail_ids = [getattr(a, self.id_field) for a in avail_resources] + for assign_id in assigned_ids: + if assign_id not in avail_ids: + raise AddPoolError("Resource ID already assigned or not found: %s", assign_id) + + cloud_account = self._get_cloud_account(cloud_account_name) + + pool = ResourcePool( + self._log, + pool_name, + cloud_account, + is_dynamic_scaling, + ) + + self._resource_pools[pool_name] = pool + + self._log.info("Added Resource Pool: %s", pool) + + def update_resource_pool(self, pool_name, cloud_account_name, assigned_ids, is_dynamic_scaling): + pool = self.get_pool(pool_name) + cloud_account = self._get_cloud_account(cloud_account_name) + + if pool.cloud_account != cloud_account: + raise PoolError("Cannnot modify a resource pool's cloud account") + + current_ids = pool.resource_ids + + added_ids = set(assigned_ids) - set(current_ids) + for id in added_ids: + pool.add_resource_id(id) + + removed_ids = set(current_ids) - set(assigned_ids) + for id in removed_ids: + pool.remove_resource_id(id) + + pool.set_dynamic_scaling(is_dynamic_scaling) + + self._log.info("Updated Resource Pool: %s", pool) + + def delete_resource_pool(self, pool_name): + pool = self.get_pool(pool_name) + if pool.resource_ids: + self._log.warning("Resource pool still has Resources: %s. Disassociating them from the pool.", pool.resource_ids) + for resourceid in pool.resource_ids: + self.remove_id_from_pool(pool_name, resourceid) + + if pool.mgmt_domain: + raise DeletePoolError("Management Domain %s still associated with Resource Pool: %s", + pool.mgmt_domain.name, pool_name) + + del self._resource_pools[pool_name] + + self._log.info("Removed Resource Pool: %s", pool) + + +class VMPoolManager(ResourcePoolManager): + @property + def id_field(self): + return "vm_id" + + def list_cloud_resources(self, cloud_account_name): + cloud = self._get_cloud_account(cloud_account_name) + resources = cloud.list_vms() + return resources.vminfo_list + + +class NetworkPoolManager(ResourcePoolManager): + @property + def id_field(self): + return "network_id" + + def list_cloud_resources(self, cloud_account_name): + cloud = self._get_cloud_account(cloud_account_name) + resources = cloud.list_networks() + return resources.networkinfo_list + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class ResourcePoolDtsConfigHandler(object): + def __init__(self, dts, log, pool_mgr, xpath): + self._dts = dts + self._log = log + self._pool_mgr = pool_mgr + self._xpath = xpath + + self._pool_reg = None + + def _delete_pool(self, pool_name): + self._log.info("Deleting pool %s", pool_name) + + self._pool_mgr.delete_resource_pool(pool_name) + + def _add_pool(self, pool_cfg): + self._log.info("Adding pool: %s", pool_cfg) + + self._pool_mgr.add_resource_pool( + pool_name=pool_cfg.name, + cloud_account_name=pool_cfg.cloud_account, + assigned_ids=[a.id for a in pool_cfg.assigned], + is_dynamic_scaling=pool_cfg.dynamic_scaling, + ) + + def _update_pool(self, pool_cfg): + self._log.info("Updating pool: %s", pool_cfg) + + self._pool_mgr.update_resource_pool( + pool_name=pool_cfg.name, + cloud_account_name=pool_cfg.cloud_account, + assigned_ids=[a.id for a in pool_cfg.assigned], + is_dynamic_scaling=pool_cfg.dynamic_scaling, + ) + + def register(self): + """ Register for Resource Pool create/update/delete/read requests from dts """ + + def apply_config(dts, acg, xact, action, _): + """Apply the pending pool configuration""" + + self._log.debug("Got pool apply config (xact: %s) (action: %s)", + xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ prepare callback from dts for resource pool """ + + action = xact_info.handle.get_query_action() + + self._log.debug("Got resource pool prepare config (msg %s) (action %s)", + msg, action) + + fref = ProtobufC.FieldReference.alloc() + pb_msg = msg.to_pbcm() + fref.goto_whole_message(pb_msg) + + if action == rwdts.QueryAction.UPDATE: + # Got UPDATE action in prepare callback. Check what got Created/Updated in a Resource Pool + # It could either be a create of a new pool or updates for existing pool. + # Separating the creation of Pool and adding resources to the pool. + # In case of updates, we do not get the entire existing config, but only what changed + + # Create a new pool, return if it already exists + fref.goto_proto_name(pb_msg,"name") + if fref.is_field_present(): + self._add_pool(msg) + + # Now either a resource ID is assigned to a newly created pool + # or a pool is updated with a resource ID. + fref.goto_proto_name(pb_msg,"assigned") + if fref.is_field_present(): + ids = msg.get_assigned() + for assign_id in ids: + assign_id_pb = assign_id.to_pbcm() + fref.goto_proto_name(assign_id_pb,"id") + if fref.is_field_present(): + self._pool_mgr.add_id_to_pool(msg.get_name(), assign_id.get_id()) + + # Dynamic scaling attribute was updated + fref.goto_proto_name(pb_msg, "dynamic_scaling") + if fref.is_field_present(): + self._pool_mgr.update_dynamic_scaling(msg.get_name(), msg.get_dynamic_scaling()) + + + elif action == rwdts.QueryAction.DELETE: + # Got DELETE action in prepare callback + # Check what got deleted - it could be either + # the pool itself, or its cloud account, or its assigned IDs. + + # Did the entire pool get deleted? + # no [vm|network]-pool pool + if fref.is_field_deleted(): + self._delete_pool(msg.name); + + # Did the assigned ID get deleted? + # no [vm|network]-pool pool assigned + fref.goto_proto_name(pb_msg,"assigned") + if fref.is_field_deleted(): + ids = msg.get_assigned() + for assign_id in ids: + assign_id_pb = assign_id.to_pbcm() + fref.goto_proto_name(assign_id_pb,"id") + if fref.is_field_present(): + self._pool_mgr.remove_id_from_pool(msg.get_name(), assign_id.get_id()) + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Resource Pool config using xpath: %s", + self._xpath, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._pool_reg = acg.register( + xpath="C," + self._xpath, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class PoolDtsOperdataHandler(object): + def __init__(self, dts, log, pool_mgr): + self._dts = dts + self._log = log + self._pool_mgr = pool_mgr + + @property + def pool_gi_cls(self): + raise NotImplementedError() + + @property + def id_field(self): + raise NotImplementedError() + + @property + def name_field(self): + raise NotImplementedError() + + def get_show_pool_xpath(self, pool_name=None): + raise NotImplementedError() + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare_pool(xact_info, action, ks_path, msg): + path_entry = self.pool_gi_cls.schema().keyspec_to_entry(ks_path) + pool_name = path_entry.key00.name + self._log.debug("Got show %s request: %s", + str(self.pool_gi_cls), ks_path.create_string()) + + if not pool_name: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + pool = self._pool_mgr.get_pool(pool_name) + self._log.debug("Showing pool: %s", pool) + except Exception as e: + self._log.warning("Could not get pool: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + cloud_resources = self._pool_mgr.list_cloud_resources(pool.cloud_account.name) + available_resources = self._pool_mgr.list_available_resources(pool_name, cloud_resources) + unreserved_pool_resources = pool.unallocated_ids + + def find_cloud_resource(resource_id): + for resource in cloud_resources: + if getattr(resource, self.id_field) == resource_id: + return resource + + raise ResourceNotFoundError( + "Could not find resource id %s in pool %s cloud account %s" % + (resource_id, pool, pool.cloud_account) + ) + + msg = self.pool_gi_cls(name=pool_name) + if pool.mgmt_domain is not None: + msg.mgmt_domain = pool.mgmt_domain.name + + for avail in available_resources: + new_avail = msg.available.add() + new_avail.id = getattr(avail, self.id_field) + new_avail.name = getattr(avail, self.name_field) + + for assigned_id in pool.resource_ids: + cloud_resource = find_cloud_resource(assigned_id) + self._log.debug("Found cloud resource: %s", cloud_resource) + assigned = msg.assigned_detail.add() + assigned.id = assigned_id + assigned.is_reserved = assigned_id not in unreserved_pool_resources + assigned.resource_info.from_dict(cloud_resource.as_dict()) + + msg.dynamic_scaling = pool.is_dynamic_scaling + + self._log.debug("Responding to show pool: %s", msg) + + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=self.get_show_pool_xpath(pool_name), + msg=msg, + ) + + yield from self._dts.register( + xpath=self.get_show_pool_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare_pool), + flags=rwdts.Flag.PUBLISHER, + ) + + + +class NetworkPoolDtsOperdataHandler(PoolDtsOperdataHandler): + def __init__(self, dts, log, network_pool_mgr): + super().__init__(dts, log, network_pool_mgr) + + @property + def pool_gi_cls(self): + return RwMcYang.NetworkPool + + @property + def id_field(self): + return "network_id" + + @property + def name_field(self): + return "network_name" + + def get_show_pool_xpath(self, pool_name=None): + path = "D,/rw-mc:network-pool/pool{}".format( + "[rw-mc:name='%s']" % pool_name if pool_name is not None else "" + ) + + return path + + +class VMPoolDtsOperdataHandler(PoolDtsOperdataHandler): + def __init__(self, dts, log, vm_pool_mgr): + super().__init__(dts, log, vm_pool_mgr) + + @property + def pool_gi_cls(self): + return RwMcYang.VmPool + + @property + def id_field(self): + return "vm_id" + + @property + def name_field(self): + return "vm_name" + + def get_show_pool_xpath(self, pool_name=None): + path = "D,/rw-mc:vm-pool/pool{}".format( + "[rw-mc:name='%s']" % pool_name if pool_name is not None else "" + ) + + return path + + +class CloudAccountDtsConfigHandler(object): + XPATH = "/rw-mc:cloud-account/account" + + def __init__(self, dts, loop, log, cloud_accounts): + self._dts = dts + self._loop = loop + self._log = log + self._cloud_accounts = cloud_accounts + + self._cloud_reg = None + + def _add_cloud(self, cfg): + self._log.info("Adding cloud account: %s", cfg) + # Check if cloud account already exists, if it does, its really + # an update for the cloud account, and rest of the details are + # handled in _update_cloud + if cfg.name in self._cloud_accounts: + self._log.warning("Cloud account name %s already exists!", cfg.name) + if cfg.has_field('account_type'): + raise CloudAccountError("Cannot change cloud's account-type") + + return False + + # If this is a new cloud account, then account-type field is mandatory + # NOTE: Right now, account-type is not mandatory in yang due to a bug, + # so we need to check for it and artifically enforce it to be mandatory + if cfg.has_field('account_type'): + cls = get_cloud_account_cls_from_type(cfg.account_type) + else: + raise CloudAccountError("Missing mandatory 'cloud-account' field") + + account = cls.from_cfg(self._log, cfg) + + self._cloud_accounts[account.name] = account + return True + + def _delete_cloud(self, name): + self._log.info("Deleting cloud account: %s", name) + + if name not in self._cloud_accounts: + self._log.warning("Cloud name doesn't exist!") + return + + del self._cloud_accounts[name] + + def _update_cloud(self, cfg): + self._log.info("Updating cloud account: %s", cfg) + + if cfg.name not in self._cloud_accounts: + self._log.warning("Cloud name doesn't exist!") + return + + account = self._cloud_accounts[cfg.name] + account.update_from_cfg(cfg) + self._log.debug("After update, new account details: %s", account.account ) + + def register(self): + """ Register for Cloud Account create/update/delete/read requests from dts """ + + def apply_config(dts, acg, xact, action, _): + """Apply the pending cloud account configuration""" + + self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", + xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + #return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for Cloud Account """ + + action = xact_info.handle.get_query_action() + self._log.debug("Got cloud account prepare config (msg %s) (action %s)", + msg, action) + + @asyncio.coroutine + def start_cloud_account_validation(cloud_name): + account = self._cloud_accounts[cloud_name] + yield from account.validate_cloud_account_credentials(self._loop) + + fref = ProtobufC.FieldReference.alloc() + pb_msg = msg.to_pbcm() + fref.goto_whole_message(pb_msg) + is_new_account = True + + if action == rwdts.QueryAction.UPDATE: + # We get an UPDATE if either a new cloud-account is created or one + # of its fields is updated. + # Separating the creation of cloud-account from updating its fields + fref.goto_proto_name(pb_msg,"name") + if fref.is_field_present(): + is_new_account = self._add_cloud(msg) + + if not is_new_account: + # This was an Update of the fields of the cloud account + # Need to check which account-type's fields were updated + self._update_cloud(msg) + + # Asynchronously check the cloud accounts credentials as soon as a + # new cloud account is created or an existing account is updated + self._loop.create_task(start_cloud_account_validation(msg.name)) + + elif action == rwdts.QueryAction.DELETE: + # Got DELETE action in prepare callback + # We only allow the deletion of cloud account itself, not its fields. + + fref.goto_whole_message(pb_msg) + if fref.is_field_deleted(): + # Cloud account was deleted + self._delete_cloud(msg.name); + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Cloud Account config using xpath: %s", + CloudAccountDtsConfigHandler.XPATH) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._cloud_reg = acg.register( + xpath="C," + CloudAccountDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class CloudAccountDtsOperdataHandler(object): + def __init__(self, dts, loop, log, cloud_accounts, + vm_pool_mgr, network_pool_mgr): + self._dts = dts + self._loop = loop + self._log = log + self._cloud_accounts = cloud_accounts + self._vm_pool_mgr = vm_pool_mgr + self._network_pool_mgr = network_pool_mgr + + def _register_show_pools(self): + def get_xpath(cloud_name=None): + return "D,/rw-mc:cloud-account/account{}/pools".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + self._log.debug("Got show cloud pools request: %s", ks_path.create_string()) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + account = self._cloud_accounts[cloud_account_name] + if not account: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + # If cloud account's credentials are not even valid, don't even try to fetch data using CAL APIs + # as they will throw an exception & tracebacks. + if account.credential_status != "Validated": + self._log.warning("Cloud Account Credentials are not valid: %s", account.credential_status_details) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + cloud_vm_pools = self._vm_pool_mgr.list_cloud_pools(cloud_account_name) + cloud_network_pools = self._network_pool_mgr.list_cloud_pools(cloud_account_name) + except Exception as e: + self._log.warning("Could not get cloud pools: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + cloud_pools = RwMcYang.CloudPools() + + for vm in cloud_vm_pools: + cloud_pools.vm.add().name = vm.name + + for network in cloud_network_pools: + cloud_pools.network.add().name = network.name + + self._log.debug("Responding to cloud pools request: %s", cloud_pools) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=cloud_pools, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + def _register_show_resources(self): + def get_xpath(cloud_name=None): + return "D,/rw-mc:cloud-account/account{}/resources".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + xpath = ks_path.create_string() + self._log.debug("Got show cloud resources request: %s", xpath) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + account = self._cloud_accounts[cloud_account_name] + if not account: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + # If cloud account's credentials are not even valid, don't even try to fetch data using CAL APIs + # as they will throw an exception & tracebacks. + if account.credential_status != "Validated": + self._log.warning("Cloud Account Credentials are not valid: %s", account.credential_status_details) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + respond_types = ["vm", "network"] + if "vm" in xpath: + respond_types = ["vm"] + + if "network" in xpath: + respond_types = ["network"] + + try: + if "vm" in respond_types: + vms = self._vm_pool_mgr.list_cloud_resources(cloud_account_name) + avail_vms = self._vm_pool_mgr.list_available_cloud_resources(cloud_account_name, vms) + avail_vm_ids = [v.vm_id for v in avail_vms] + + if "network" in respond_types: + networks = self._network_pool_mgr.list_cloud_resources(cloud_account_name) + avail_networks = self._network_pool_mgr.list_available_cloud_resources(cloud_account_name, networks) + avail_network_ids = [n.network_id for n in avail_networks] + + except Exception as e: + self._log.error("Could not get cloud resources: %s", e, exc_info=True) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + avail = RwMcYang.CloudResources() + + if "vm" in respond_types: + for vm in vms: + add_vm = avail.vm.add() + add_vm.id = vm.vm_id + add_vm.name = vm.vm_name + add_vm.available = add_vm.id in avail_vm_ids + + if "network" in respond_types: + for network in networks: + add_network = avail.network.add() + add_network.id = network.network_id + add_network.name = network.network_name + add_network.available = add_network.id in avail_network_ids + + self._log.debug("Responding to cloud resources request: %s", avail) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=avail, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + def _register_show_status(self): + def get_xpath(cloud_name=None): + return "D,/rw-mc:cloud-account/account{}/connection".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + self._log.debug("Got show cloud connection status request: %s", ks_path.create_string()) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + account = self._cloud_accounts[cloud_account_name] + if not account: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + cred_status = account.credential_status + cred_details = account.credential_status_details + except Exception as e: + self._log.error("Could not get cloud status: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + cloud_creds = RwMcYang.CloudStatus() + if cred_status is not None: + cloud_creds.status = cred_status + cloud_creds.details = cred_details + else: + cloud_creds.status = "Validating..." + cloud_creds.details = "Connection status is being validated, please wait..." + + self._log.debug("Responding to cloud connection status request: %s", cloud_creds) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=cloud_creds, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + @asyncio.coroutine + def register(self): + yield from self._register_show_pools() + yield from self._register_show_resources() + yield from self._register_show_status() + +class SDNAccountDtsConfigHandler(object): + XPATH="/rw-mc:sdn/account" + + def __init__(self, dts, log, sdn_accounts): + self._dts = dts + self._log = log + self._sdn_accounts = sdn_accounts + + self._sdn_reg = None + + def _add_sdn(self, cfg): + self._log.info("Adding sdn account: %s", cfg) + if cfg.name in self._sdn_accounts: + self._log.warning("SDN name already exists!") + return + + # Right now we only have one SDN Account of type ODL; + # when we support more SDN account types, we should + # create a similar funtion to get sdn account class from type, + # like 'get_cloud_account_cls_from_type' + cls = OdlSDNAccount + account = cls.from_cfg(self._log, cfg) + self._sdn_accounts[account.name] = account + + def _delete_sdn(self, name): + self._log.info("Deleting sdn account: %s", name) + + if name not in self._sdn_accounts: + self._log.warning("SDN name doesn't exist!") + return + + del self._sdn_accounts[name] + + def _update_sdn(self, cfg): + self._log.info("Updating sdn account: %s", cfg) + + if cfg.name not in self._sdn_accounts: + self._log.warning("SDN name doesn't exist!") + return + + account = self._sdn_accounts[cfg.name] + account.update_from_cfg(cfg) + + def register(self): + def apply_config(dts, acg, xact, action, _): + """Apply the pending sdn account configuration""" + + self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", + xact, action) + + try: + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + sdn_add_cfgs, sdn_delete_cfgs, sdn_update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self._sdn_reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in sdn_delete_cfgs: + self._delete_sdn(cfg.name) + + # Handle Adds + for cfg in sdn_add_cfgs: + self._add_sdn(cfg) + + # Handle Updates + for cfg in sdn_update_cfgs: + self._update_sdn(cfg) + + except Exception as e: + self._log.warning("Could not apply config for SDN account: %s", e) + + + self._log.debug("Registering for SDN Account config using xpath: %s", + SDNAccountDtsConfigHandler.XPATH) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + with self._dts.appconf_group_create(acg_handler) as acg: + self._sdn_reg = acg.register( + xpath="C," + SDNAccountDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + +class MgmtDomainDtsConfigHandler(object): + XPATH = "C,/rw-mc:mgmt-domain/rw-mc:domain" + + def __init__(self, dts, loop, log, mgmt_domains, + vm_pool_mgr, network_pool_mgr, lp_minions): + self._dts = dts + self._loop = loop + self._log = log + self._mgmt_domains = mgmt_domains + self._vm_pool_mgr = vm_pool_mgr + self._network_pool_mgr = network_pool_mgr + self._lp_minions = lp_minions + + self._fed_reg = None + self._vm_pool_configured = False + self._net_pool_configured = False + + def _delete_mgmt_domain_vm_pool(self, mgmt_domain_name, vm_pool_name): + self._log.debug("Deleting vm pool %s from mgmt_domain %s", vm_pool_name, mgmt_domain_name) + self._vm_pool_mgr.delete_mgmt_domain_pool(mgmt_domain_name, vm_pool_name) + self._vm_pool_configured = False + + def _delete_mgmt_domain_net_pool(self, mgmt_domain_name, net_pool_name): + self._log.debug("Deleting network pool %s from mgmt_domain %s", net_pool_name, mgmt_domain_name) + self._network_pool_mgr.delete_mgmt_domain_pool(mgmt_domain_name, net_pool_name) + self._net_pool_configured = False + + def _delete_mgmt_domain(self, fed_cfg): + self._log.debug("Deleting mgmt_domain: %s", fed_cfg.name) + + if self._mgmt_domains[fed_cfg.name].launchpad_state is "started": + # Launchpad is running, can not delete Mgmt-domin. Abort + raise DeleteMgmtDomainError("Cannot delete Mgmt-domain - Laucnhpad is still running!") + + for vm_pool in self._vm_pool_mgr.list_mgmt_domain_pools(fed_cfg.name): + self._delete_mgmt_domain_vm_pool(fed_cfg.name, vm_pool.name) + + for net_pool in self._network_pool_mgr.list_mgmt_domain_pools(fed_cfg.name): + self._delete_mgmt_domain_net_pool(fed_cfg.name, net_pool.name) + + # We need to free up LP resources when a MD is deleted + mgmt_domain = self._mgmt_domains[fed_cfg.name] + if self._mgmt_domains[fed_cfg.name].launchpad_state in ["pending", "configuring"]: + # Mgmt-domain was deleted while launchpad was in pending/configuring state + mgmt_domain.stop_launchpad() + + mgmt_domain.release_launchpad() + + del self._mgmt_domains[fed_cfg.name] + + def _update_mgmt_domain_pools(self, name, fed_cfg): + self._log.debug("Updating mgmt_domain pools %s", name) + + for vm_pool in fed_cfg.pools.vm: + self._vm_pool_mgr.update_mgmt_domain_pools(fed_cfg.name, vm_pool.name) + self._vm_pool_configured = True + + for network_pool in fed_cfg.pools.network: + self._network_pool_mgr.update_mgmt_domain_pools(fed_cfg.name, network_pool.name) + self._net_pool_configured = True + + def _add_mgmt_domain(self, fed_cfg): + self._log.debug("Creating new mgmt_domain: %s", fed_cfg.name) + if fed_cfg.name in self._mgmt_domains: + self._log.warning("Mgmt Domain name %s already exists!", fed_cfg.name) + return + + mgmt_domain = MgmtDomain( + self._loop, + self._log, + fed_cfg.name, + self._vm_pool_mgr, + self._network_pool_mgr, + self._lp_minions, + ) + + self._mgmt_domains[fed_cfg.name] = mgmt_domain + + def _update_mgmt_domain(self, fed_cfg): + self._log.debug("Updating mgmt_domain: %s", fed_cfg) + + self._update_mgmt_domain_pools(fed_cfg.name, fed_cfg) + + # Start launchpad ONLY IF both VM & NET pool have been configured + if self._vm_pool_configured and self._net_pool_configured: + mgmt_domain = self._mgmt_domains[fed_cfg.name] + mgmt_domain.allocate_start_configure_launchpad_task() + + + @asyncio.coroutine + def register(self): + """ Register for Mgmt Domain create/update/delete/read requests from dts """ + + def apply_config(dts, acg, xact, action, _): + """Apply the pending mgmt_domain configuration""" + + self._log.debug("Got mgmt_domain apply config (xact: %s) (action: %s)", + xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ prepare callback from dts for mgmt domain """ + + action = xact_info.handle.get_query_action() + + self._log.debug("Got mgmt domain prepare config (msg %s) (action %s)", + msg, action) + + fref = ProtobufC.FieldReference.alloc() + pb_msg = msg.to_pbcm() + fref.goto_whole_message(pb_msg) + + if action == rwdts.QueryAction.UPDATE: + # We get an UPDATE if either a new mgmt-domain is created or a pool is added/updated. + # Separating the creation of mgmt-domain from adding its pools + fref.goto_proto_name(pb_msg,"name") + if fref.is_field_present(): + self._add_mgmt_domain(msg) + + fref.goto_proto_name(pb_msg,"pools") + if fref.is_field_present(): + self._update_mgmt_domain(msg) + + elif action == rwdts.QueryAction.DELETE: + # Got DELETE action in prepare callback + # Check what got deleted - it could be either + # the mgmt_domain itself, or its network pool or its vm pool + + # Did the entire mgmt_domain get deleted? + # no mgmt-domain domain + fref.goto_whole_message(pb_msg) + if fref.is_field_deleted(): + self._delete_mgmt_domain(msg) + + # Did the assigned pools get deleted? + # no mgm-domain domain pools + # or + # Did a specific pool get deleted? + # no mgmt-domain domain pools [vm|network] + # in either case, we get a DELETE call for each pool separately + fref.goto_proto_name(pb_msg,"pools") + if fref.is_field_deleted(): + self._log.info("Removing pool: %s from mgmt-domain: %s", msg.pools, msg.get_name()) + + pools = msg.get_pools() + + pools_pb = pools.to_pbcm() + fref.goto_proto_name(pools_pb, "vm") + vmpool = pools.vm + if fref.is_field_deleted(): + self._delete_mgmt_domain_vm_pool(msg.get_name(), vmpool[0].name) + + fref.goto_proto_name(pools_pb, "network") + netpool = pools.network + if fref.is_field_deleted(): + self._delete_mgmt_domain_net_pool(msg.get_name(), netpool[0].name) + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for mgmt_domain config using xpath: %s", + MgmtDomainDtsConfigHandler.XPATH) + + acg_handler = rift.tasklets.AppConfGroup.Handler(on_apply=apply_config) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._fed_reg = acg.register( + xpath=MgmtDomainDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class MgmtDomainDtsRpcHandler(object): + START_LAUNCHPAD_XPATH= "/rw-mc:start-launchpad" + STOP_LAUNCHPAD_XPATH= "/rw-mc:stop-launchpad" + + def __init__(self, dts, log, mgmt_domains): + self._dts = dts + self._log = log + self._mgmt_domains = mgmt_domains + + self.pending_msgs = [] + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare_start(xact_info, action, ks_path, msg): + self._log.debug("Got launchpad start request: %s", msg) + + name = msg.mgmt_domain + if name not in self._mgmt_domains: + msg = "Launchpad name %s not found" % name + self._log.error(msg) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + mgmt_domain = self._mgmt_domains[name] + + try: + mgmt_domain.allocate_start_configure_launchpad_task() + except Exception as e: + self._log.error("Failed to start launchpad: %s", str(e)) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath="O," + MgmtDomainDtsRpcHandler.START_LAUNCHPAD_XPATH, + ) + + @asyncio.coroutine + def on_prepare_stop(xact_info, action, ks_path, msg): + self._log.debug("Got launchpad stop request: %s", msg) + + name = msg.mgmt_domain + if name not in self._mgmt_domains: + msg = "Launchpad name %s not found", name + self._log.error(msg) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + mgmt_domain = self._mgmt_domains[name] + try: + yield from mgmt_domain.stop_launchpad() + except Exception as e: + self._log.exception("Failed to stop launchpad: %s", str(e)) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath="O," + MgmtDomainDtsRpcHandler.STOP_LAUNCHPAD_XPATH + ) + + yield from self._dts.register( + xpath="I," + MgmtDomainDtsRpcHandler.START_LAUNCHPAD_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare_start), + flags=rwdts.Flag.PUBLISHER + ) + + yield from self._dts.register( + xpath="I," + MgmtDomainDtsRpcHandler.STOP_LAUNCHPAD_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare_stop), + flags=rwdts.Flag.PUBLISHER + ) + + +class MgmtDomainDtsOperdataHandler(object): + def __init__(self, dts, log, mgmt_domains): + self._dts = dts + self._log = log + self._mgmt_domains = mgmt_domains + + def _get_respond_xpath(self, mgmt_domain_name=None): + return "D,/rw-mc:mgmt-domain/domain{}/launchpad".format( + "[name='%s']" % mgmt_domain_name if mgmt_domain_name is not None else "" + ) + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.MgmtDomain.schema().keyspec_to_entry(ks_path) + mgmt_domain_name = path_entry.key00.name + self._log.debug("Got show mgmt_domain launchpad request: %s", ks_path.create_string()) + + if not mgmt_domain_name: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + mgmt_domain = self._mgmt_domains.get(mgmt_domain_name, None) + if mgmt_domain is None: + self._log.warning("Could not find management domain: %s", mgmt_domain) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + lp_state = mgmt_domain.launchpad_state + lp_state_details= mgmt_domain.launchpad_state_details + lp_uptime = mgmt_domain.launchpad_uptime + lp_create_time = mgmt_domain.launchpad_create_time + + lp_ip = None + if mgmt_domain.launchpad_vm_info is not None: + if mgmt_domain.launchpad_vm_info.public_ip: + lp_ip = mgmt_domain.launchpad_vm_info.public_ip + else: + lp_ip = mgmt_domain.launchpad_vm_info.management_ip + + except Exception as e: + self._log.warning("Could not get mgmt-domain launchpad info: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + msg = RwMcYang.MgmtDomainLaunchpad() + msg.state = lp_state + msg.state_details = lp_state_details + msg.uptime = lp_uptime + if lp_create_time is not None: + msg.create_time = lp_create_time + if lp_ip is not None: + msg.ip_address = lp_ip + + self._log.debug("Responding to mgmt_domain pools request: %s", msg) + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=self._get_respond_xpath(mgmt_domain_name), + msg=msg, + ) + + yield from self._dts.register( + xpath=self._get_respond_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + +class MCUptimeDtsOperdataHandler(object): + def __init__(self, dts, log, start_time): + self._dts = dts + self._log = log + self._mc_start_time = start_time + + + def get_start_time(self): + return self._mc_start_time + + def _get_uptime_xpath(self): + return "D,/rw-mc:mission-control" + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + self._log.debug("Got show MC uptime request: %s", ks_path.create_string()) + + msg = RwMcYang.Uptime() + uptime_secs = float(time.time() - self.get_start_time()) + uptime_str = str(timedelta(seconds = uptime_secs)) + msg.uptime = uptime_str + msg.create_time = self.get_start_time() + + self._log.debug("Responding to MC Uptime request: %s", msg) + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=self._get_uptime_xpath(), + msg=msg, + ) + + yield from self._dts.register( + xpath=self._get_uptime_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + +fallback_launchpad_resources = None +fallback_launchpad_public_ip = None + +def construct_fallback_launchpad_vm_pool(): + global fallback_launchpad_resources + global fallback_launchpad_public_ip + + if "RIFT_LP_NODES" not in os.environ: + return + + fallback_launchpad_resources = [] + for node in os.environ["RIFT_LP_NODES"].split(":"): + node_ip_id = node.split("|") + assert len(node_ip_id) == 2 + fallback_launchpad_resources.append(node_ip_id) + + if "RIFT_LP_PUBLIC_IP" not in os.environ: + fallback_launchpad_public_ip = None + return + + fallback_launchpad_public_ip = os.environ["RIFT_LP_PUBLIC_IP"] + + + +class MissionControlTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.rwlog.set_category('rw-mc') + self._dts = None + self._mgmt_domains = {} + self._domain_config_hdl = None + self._domain_rpc_hdl = None + self._pool_config_hdl = None + self._cloud_account_config_hdl = None + self._sdn_account_config_hdl = None + self._error_test_rpc_hdl = None + self._start_time = time.time() + + self._cloud_accounts = {} + self._sdn_accounts = {} + + self._lp_minions = {} + + self._vm_pool_mgr = VMPoolManager( + self.log, + self._mgmt_domains, + self._cloud_accounts, + ) + self._network_pool_mgr = NetworkPoolManager( + self.log, + self._mgmt_domains, + self._cloud_accounts, + ) + + def initialize_lxc(self): + self.log.info("Enabling Container Cal Debug Logging") + SimCloudAccount.enable_debug_logging(self.log.handlers) + + def start(self): + super().start() + self.log.info("Starting Mission Control Tasklet") + + CloudAccount.log_hdl = self.log_hdl + SDNAccount.log_hdl = self.log_hdl + + # Initialize LXC to the extent possible until RIFT-8483, RIFT-8485 are completed + self.initialize_lxc() + + # Use a fallback set of launchpad VM's when provided and no static + # resources are selected + construct_fallback_launchpad_vm_pool() + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS( + self.tasklet_info, + RwMcYang.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + @asyncio.coroutine + def init(self): + """Initialize application. During this state transition all DTS + registrations and subscriptions required by application should be started + """ + + self._lp_minions = yield from launchpad.get_previous_lp( + self.log, self.loop) + + self._uptime_operdata_hdl = MCUptimeDtsOperdataHandler( + self._dts, + self._log, + self._start_time, + ) + yield from self._uptime_operdata_hdl.register() + + self._domain_config_hdl = MgmtDomainDtsConfigHandler( + self._dts, + self.loop, + self.log, + self._mgmt_domains, + self._vm_pool_mgr, + self._network_pool_mgr, + self._lp_minions, + ) + yield from self._domain_config_hdl.register() + + self._domain_rpc_hdl = MgmtDomainDtsRpcHandler( + self._dts, + self.log, + self._mgmt_domains, + ) + yield from self._domain_rpc_hdl.register() + + self._domain_operdata_hdl = MgmtDomainDtsOperdataHandler( + self._dts, + self.log, + self._mgmt_domains, + ) + yield from self._domain_operdata_hdl.register() + + self._vm_pool_config_hdl = ResourcePoolDtsConfigHandler( + self._dts, + self.log, + self._vm_pool_mgr, + "/vm-pool/pool", + ) + self._vm_pool_config_hdl.register() + + self._network_pool_config_hdl = ResourcePoolDtsConfigHandler( + self._dts, + self.log, + self._network_pool_mgr, + "/network-pool/pool", + ) + self._network_pool_config_hdl.register() + + self._vm_pool_operdata_hdl = VMPoolDtsOperdataHandler( + self._dts, + self.log, + self._vm_pool_mgr, + ) + yield from self._vm_pool_operdata_hdl.register() + + self._network_pool_operdata_hdl = NetworkPoolDtsOperdataHandler( + self._dts, + self.log, + self._network_pool_mgr, + ) + yield from self._network_pool_operdata_hdl.register() + + self._cloud_account_config_hdl = CloudAccountDtsConfigHandler( + self._dts, + self.loop, + self.log, + self._cloud_accounts, + ) + self._cloud_account_config_hdl.register() + + self._cloud_account_operdata_hdl = CloudAccountDtsOperdataHandler( + self._dts, + self.loop, + self.log, + self._cloud_accounts, + self._vm_pool_mgr, + self._network_pool_mgr, + ) + yield from self._cloud_account_operdata_hdl.register() + + self._sdn_account_config_hdl = SDNAccountDtsConfigHandler( + self._dts, + self.log, + self._sdn_accounts, + ) + self._sdn_account_config_hdl.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/salt.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/salt.py new file mode 100644 index 0000000..c7b7962 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/salt.py @@ -0,0 +1,284 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import json +import logging +import shlex +import subprocess + +from . import util + + +class SaltCommandFailed(util.CommandFailed): + pass + + +class SaltCommandNotStarted(Exception): + pass + + +class MinionConnectionNotFound(Exception): + pass + + +def execute_salt_cmd(log, target, cmd): + saltcmd = "salt {target} cmd.run '{cmd}' --out txt".format( + target=target, + cmd=cmd + ) + log.info("Executing command: %s", saltcmd) + + try: + stdout = subprocess.check_output( + shlex.split(saltcmd), + universal_newlines=True, + ) + except subprocess.CalledProcessError as e: + log.error("Failed to execute subprocess command %s (exception %s)", cmd, str(e)) + raise + + return stdout + +def get_launchpad_hostname(log, node_id): + ''' + Find the hostname for launchpad VM + ''' + cmd = "hostnamectl --static" + + try: + stdout = execute_salt_cmd(log, node_id, cmd) + except Exception as e: + log.error("Failed to get Launchpad hostname (exception %s)", str(e)) + + for line in stdout.split("\n"): + (nodeid, hostname) = line.split(": ") + if nodeid is None: + raise SaltCommandFailed("Salt did not return proper node id (expected: %s) (received: %s)", + node_id, stdout) + + log.info("command (%s) returned result (%s) (id: %s)", cmd, hostname, nodeid) + return hostname + + raise SaltCommandFailed("Salt command did not return any output") + +@asyncio.coroutine +def is_node_connected(log, loop, node_id): + try: + stdout, _ = yield from util.run_command( + loop, 'salt %s test.ping' % node_id + ) + except subprocess.CalledProcessError: + log.warning("test.ping command failed against node_id: %s", node_id) + return True + + up_minions = stdout.splitlines() + for line in up_minions: + if "True" in line: + return True + + return False + + +@asyncio.coroutine +def find_job(log, loop, node_id, job_id): + cmd = "salt -t 60 {node_id} saltutil.find_job {job_id} --output json".format( + node_id=node_id, job_id=job_id) + + try: + output, _ = yield from util.run_command(loop, cmd) + except util.CommandFailed as e: + raise SaltCommandFailed("Salt command failed: %s" % str(e)) + + if not output: + raise SaltCommandFailed("Empty response from command: %s" % cmd) + + try: + resp = json.loads(output) + except ValueError: + raise SaltCommandFailed("Failed to parse find_job output: %s" % output) + + if node_id not in resp: + raise SaltCommandFailed("Expected %s in find_job response" % node_id) + + if "jid" in resp[node_id]: + return resp[node_id] + + return None + +@asyncio.coroutine +def find_running_minions(log, loop): + ''' + Queries Salt for running jobs, and creates a dict with node_id & job_id + Returns the node_id and job_id + ''' + cmd = "salt -t 60 '*' saltutil.running --output json --out-indent -1" + + try: + output, _ = yield from util.run_command(loop, cmd) + except util.CommandFailed as e: + raise SaltCommandFailed("Salt command failed: %s" % str(e)) + + if not output: + raise SaltCommandFailed("Empty response from command: %s" % cmd) + + minions = {} + for line in output.split("\n"): + # Interested in only those minions which have a "tgt" attribute in the result, + # as this points to a running target id minion + if "tgt" in line: + try: + resp = json.loads(line) + except ValueError: + raise SaltCommandFailed("Failed to parse find_minion output: %s" % output) + + # Get the job id ('jid') from the minion response and populate the dict, + # using node_id as key and job_id as value. + for key in resp: + minions[key] = resp[key][0]['jid'] + + log.info("Salt minions found: %s", minions) + return minions + +class SaltAsyncCommand(object): + def __init__(self, log, loop, target, command): + self._log = log + self._loop = loop + self._target = target + self._command = command + + self._job_id = None + + def _set_command(self, command): + self._command = command + + def _set_job_id(self, job_id): + self._job_id = job_id + + @asyncio.coroutine + def start(self): + cmd = "salt --async {target} cmd.run '{cmd}'".format( + target=self._target, + cmd=self._command, + ) + + stdout, stderr = yield from util.run_command(self._loop, cmd) + + for line in stdout.split("\n"): + if "job ID:" in line: + job_id = line.split(" ")[-1] + if job_id == "0": + raise SaltCommandFailed("Did not create a job id for async command: %s", stdout) + + self._job_id = job_id + + self._log.debug("Salt command (%s) started on node (%s) (jid: %s)", + cmd, self._target, self._job_id) + return + + raise SaltCommandFailed("Did not find async job id in output") + + @asyncio.coroutine + def is_running(self): + if not self._job_id: + raise SaltCommandNotStarted() + + @asyncio.coroutine + def job_exists(): + try: + job = yield from find_job(self._log, self._loop, self._target, self._job_id) + except SaltCommandFailed as e: + # Salt minion command failing is not a reliable indication that the process + # actually died. + self._log.warning("Ignoring find salt job %s error: %s", self._job_id, str(e)) + return True + + return job is not None + + for _ in range(3): + if (yield from job_exists()): + return True + + return False + + @asyncio.coroutine + def wait(self): + while True: + is_running = yield from self.is_running() + if not is_running: + return + + asyncio.sleep(.25) + + @asyncio.coroutine + def stop(self): + if not self._job_id: + raise SaltCommandNotStarted() + + cmd = "salt {target} saltutil.term_job {job_id}".format( + target=self._target, + job_id=self._job_id, + ) + + yield from util.run_command(self._loop, cmd) + + @asyncio.coroutine + def kill(self): + if not self._job_id: + raise SaltCommandNotStarted() + + cmd = "salt {target} saltutil.kill_job {job_id}".format( + target=self._target, + job_id=self._job_id, + ) + + yield from util.run_command(self._loop, cmd) + + +@asyncio.coroutine +def test_salt(loop, node): + logger.debug("Checking if node is connected") + assert (yield from is_node_connected(logger, loop, node)) + + logger.debug("Running sleep 10 command") + async_cmd = SaltAsyncCommand(logger, loop, node, "sleep 10") + yield from async_cmd.start() + + logger.debug("Check if sleep command is running") + is_running = yield from async_cmd.is_running() + assert is_running + + logger.debug("Stop the sleep command") + yield from async_cmd.stop() + + logger.debug("Check if sleep command is no longer running") + is_running = yield from async_cmd.is_running() + assert not is_running + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + logger = logging.getLogger("salt-test") + parser = argparse.ArgumentParser() + parser.add_argument("-n", "--node", required=True, help="A connected minion") + args = parser.parse_args() + + loop = asyncio.get_event_loop() + loop.run_until_complete(test_salt(loop, args.node)) \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/util.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/util.py new file mode 100644 index 0000000..a3a16b4 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/util.py @@ -0,0 +1,38 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import subprocess + + +class CommandFailed(Exception): + pass + + +@asyncio.coroutine +def run_command(loop, cmd): + cmd_proc = yield from asyncio.create_subprocess_shell( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, loop=loop + ) + stdout, stderr = yield from cmd_proc.communicate() + if cmd_proc.returncode != 0: + raise CommandFailed("Starting async command (%s) failed (rc=%s). (stderr: %s)", + cmd, cmd_proc.returncode, stderr) + + return stdout.decode(), stderr.decode() \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rwmctasklet.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rwmctasklet.py new file mode 100755 index 0000000..b81b2f2 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rwmctasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwmctasklet + +class Tasklet(rift.tasklets.rwmctasklet.MissionControlTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/yang/CMakeLists.txt b/modules/core/mano/rwmc/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..41bbba1 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/CMakeLists.txt @@ -0,0 +1,34 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 2015/08/01 +# + +## +# Yang targets +## + +rift_add_yang_target( + TARGET rwmc_yang + YANG_FILES rw-mc.yang + LIBRARIES + rwcloud_yang_gen + DEPENDS + rwcloud_yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwsdn_yang_gen +) + +## +# Install the XML file +## +install( + FILES + ../cli/cli_rwmc.xml + ../cli/cli_rwmc_schema_listing.txt + DESTINATION usr/data/manifest + COMPONENT ${PKG_LONG_NAME} +) + diff --git a/modules/core/mano/rwmc/plugins/yang/Makefile b/modules/core/mano/rwmc/plugins/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/plugins/yang/rw-mc.cli.xml b/modules/core/mano/rwmc/plugins/yang/rw-mc.cli.xml new file mode 100755 index 0000000..2bd7b84 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/rw-mc.cli.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/modules/core/mano/rwmc/plugins/yang/rw-mc.tailf.yang b/modules/core/mano/rwmc/plugins/yang/rw-mc.tailf.yang new file mode 100644 index 0000000..d527230 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/rw-mc.tailf.yang @@ -0,0 +1,78 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-mc-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-mc-annotation"; + prefix "rw-mc-ann"; + + import rw-mc + { + prefix rw-mc; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-mc:opdata" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:start-launchpad" { + tailf:actionpoint rw_action; + } + + tailf:annotate "/rw-mc:mgmt-domain/rw-mc:domain/rw-mc:launchpad" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:stop-launchpad" { + tailf:actionpoint rw_action; + } + + tailf:annotate "/rw-mc:vm-pool/rw-mc:pool/rw-mc:available" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:network-pool/rw-mc:pool/rw-mc:available" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:network-pool/rw-mc:pool/rw-mc:assigned-detail" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:vm-pool/rw-mc:pool/rw-mc:mgmt-domain" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:vm-pool/rw-mc:pool/rw-mc:assigned-detail" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:network-pool/rw-mc:pool/rw-mc:mgmt-domain" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:cloud-account/rw-mc:account/rw-mc:pools" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:cloud-account/rw-mc:account/rw-mc:resources" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:cloud-account/rw-mc:account/rw-mc:connection" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:mission-control" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwmc/plugins/yang/rw-mc.yang b/modules/core/mano/rwmc/plugins/yang/rw-mc.yang new file mode 100755 index 0000000..a16a08e --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/rw-mc.yang @@ -0,0 +1,519 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-mc.yang + * @author Austin Cormier + * @author Joshua Downer + * @date 2015/07/30 + * @brief Mission Control Yang + */ + +module rw-mc +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-mc"; + prefix "rw-mc"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import rwsdn { + prefix "rwsdn"; + } + + import rw-notify-ext { + prefix "rwnotify"; + } + + import rw-log { + prefix "rwlog"; + } + + revision 2014-07-30 { + description + "Initial revision."; + } + + typedef launchpad-state { + description "State of the launchpad within the mgmt-domain"; + type enumeration { + enum pending; + enum crashed; + enum stopping; + enum stopped; + enum starting; + enum configuring; + enum started; + } + } + + container mission-control { + rwpb:msg-new Uptime; + description "Show Mission Control related information"; + config false; + + leaf uptime { + description "Show the Mission Control uptime"; + type string; + } + + leaf create-time { + description + "Creation timestamp of the Mission Control. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + } + + container sdn { + rwpb:msg-new SDNConfig; + list account { + rwpb:msg-new SDNAccount; + description "Configure SDN Accounts"; + + key "name"; + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + uses rwsdn:sdn-provider-auth; + } + } + + container cloud-account { + rwpb:msg-new CloudConfig; + list account { + rwpb:msg-new CloudAccount; + description "Configure Cloud Accounts"; + + max-elements 8; + key "name"; + + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + uses rwcal:provider-auth; + + container resources { + rwpb:msg-new CloudResources; + description "The list of available resources belonging to this cloud account"; + config false; + + list vm { + description "The list of available VM's belonging to this cloud account"; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + leaf available { + type boolean; + } + } + + list network { + description "The list of available Network's belonging to this cloud account"; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + leaf available { + type boolean; + } + } + + } + + container pools { + rwpb:msg-new CloudPools; + description "The lists of various pools associated with this cloud account"; + config false; + + list vm { + key "name"; + leaf name{ + type string; + } + } + list network { + key "name"; + leaf name{ + type string; + } + } + } + + container connection { + rwpb:msg-new CloudStatus; + description "The status of Cloud Account credientials"; + config false; + + leaf status { + description "Status of Cloud Account's current credentials"; + type string; + } + + leaf details { + description "Details of Cloud Account's connection status"; + type string; + } + } + } + } + + grouping common-pool-attrs { + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + leaf mgmt-domain { + description "Mgmt-domain this pool is assigned to"; + config false; + type leafref { + path "/rw-mc:mgmt-domain/rw-mc:domain/name"; + } + } + + leaf cloud-account { + description "The cloud account to use for this vm pool"; + mandatory true; + type leafref { + path "../../../cloud-account/account/name"; + } + } + + leaf dynamic-scaling { + description "Denotes whether the pool is Static or can grow Dynamically"; + type boolean; + default false; + } + } + + container vm-pool { + list pool { + rwpb:msg-new VmPool; + description "Configure VM Pools"; + + max-elements 128; + key "name"; + + uses common-pool-attrs; + + list available { + description "The list of available VM's belonging to this pools cloud account"; + config false; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + } + + list assigned { + description "The list of created VM's belonging to this pool"; + key "id"; + leaf id { + type string; + } + } + + list assigned-detail { + description "The list of created VM's belonging to this pool"; + config false; + key "id"; + leaf id { + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + } + + container resource-info { + description "Detailed resource information provided by the CAL"; + rwpb:msg-new VmPoolResourceInfo; + config false; + + uses rwcal:vm-info-item; + } + } + } + } + + container network-pool { + list pool { + rwpb:msg-new NetworkPool; + description "Configure Network Pools"; + + max-elements 128; + key "name"; + + uses common-pool-attrs; + + list available { + description "The list of available Networks's belonging to this pools cloud account"; + config false; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + } + + list assigned { + description "The list of created networks's belonging to this pool"; + key "id"; + leaf id { + type string; + } + } + + list assigned-detail { + description "The list of created Networks belonging to this pool"; + config false; + key "id"; + leaf id { + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + } + + container resource-info { + description "Detailed resource information provided by the CAL"; + rwpb:msg-new NetworkPoolResourceInfo; + + uses rwcal:network-info-item; + } + } + } + } + + container mgmt-domain { + rwpb:msg-new MgmtDomainConfig; + rwcli:new-mode "mgmt-domain"; + description "Configure Management Domain"; + + list domain { + rwpb:msg-new MgmtDomain; + key "name"; + + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + container pools { + rwpb:msg-new MgmtDomainPools; + description "The lists of various pools associated with this mgmt domain"; + + list vm { + key "name"; + leaf name { + type leafref { + path "/rw-mc:vm-pool/rw-mc:pool/name"; + } + } + } + list network { + key "name"; + leaf name { + type leafref { + path "/rw-mc:network-pool/rw-mc:pool/name"; + } + } + } + } + + container launchpad { + rwpb:msg-new MgmtDomainLaunchpad; + config false; + leaf state { + description "State of the mgmt-domain's launchpad"; + type launchpad-state; + } + + leaf state-details { + description "Details of the Launchpad's current state"; + type string; + } + + leaf ip_address { + description "VM IP address in use by the launchpad"; + type string; + } + + leaf uptime { + description "Show the Launchpad uptime"; + type string; + } + + leaf create-time { + description + "Creation timestamp of this Launchpad. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + } + } + } + + container opdata { + rwpb:msg-new Opdata; + config false; + list foodata { + key name; + leaf name { + type string; + } + } + } + + rpc start-launchpad { + input { + rwpb:msg-new StartLaunchpadInput; + leaf mgmt-domain { + mandatory true; + type leafref { + path "/rw-mc:mgmt-domain/rw-mc:domain/name"; + } + } + } + } + + rpc stop-launchpad { + input { + rwpb:msg-new StopLaunchpadInput; + leaf mgmt-domain { + mandatory true; + type leafref { + path "/rw-mc:mgmt-domain/rw-mc:domain/name"; + } + } + } + } + + + /* + * Generic Logger Log Events - ID space 120000 - 120099 + */ + notification debug { + rwpb:msg-new Debug; + rwnotify:log-event-id 120000; + description + "Generic Debug Log"; + uses rwlog:severity-debug; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification info { + rwpb:msg-new Info; + rwnotify:log-event-id 120001; + description + "Generic Info Log"; + uses rwlog:severity-info; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification warn { + rwpb:msg-new Warn; + rwnotify:log-event-id 120002; + description + "Generic Warning Log"; + uses rwlog:severity-warning; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification error { + rwpb:msg-new Error; + rwnotify:log-event-id 120003; + description + "Generic Warning Log"; + uses rwlog:severity-error; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification critical { + rwpb:msg-new Critical; + rwnotify:log-event-id 120004; + description + "Generic Critical Log"; + uses rwlog:severity-critical; + leaf category { + type string; + } + leaf log { + type string; + } + } + + /* + * END - generic log events + */ + +} diff --git a/modules/core/mano/rwmc/ra/CMakeLists.txt b/modules/core/mano/rwmc/ra/CMakeLists.txt new file mode 100644 index 0000000..fe900b1 --- /dev/null +++ b/modules/core/mano/rwmc/ra/CMakeLists.txt @@ -0,0 +1,45 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Paul Laidler +# Creation Date: 09/16/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS + mission_control_systest + mission_control_delete_systest + mission_control_negative_systest + mission_control_negative_cloud_account_systest + mission_control_negative_mgmt_domain_systest + mission_control_negative_vmpool_systest + mission_control_reload_systest + DESTINATION usr/rift/systemtest/mission_control + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + pytest/conftest.py + pytest/test_mission_control.py + pytest/test_mission_control_delete.py + pytest/test_mission_control_negative.py + pytest/test_mission_control_negative_cloud_account.py + pytest/test_mission_control_negative_mgmt_domain.py + pytest/test_mission_control_negative_vmpool.py + DESTINATION usr/rift/systemtest/pytest/mission_control + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + racfg/mission_control_systest_cloudsim.racfg + racfg/mission_control_systest_openstack.racfg + racfg/mission_control_delete_systest_cloudsim.racfg + racfg/mission_control_reload_systest_openstack.racfg + racfg/mission_control_systest_cloudsim_negative.racfg + racfg/mission_control_systest_openstack_negative.racfg + DESTINATION + usr/rift/systemtest/mission_control + COMPONENT ${PKG_LONG_NAME}) + diff --git a/modules/core/mano/rwmc/ra/mission_control_delete_systest b/modules/core/mano/rwmc/ra/mission_control_delete_systest new file mode 100755 index 0000000..9c7d177 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_delete_systest @@ -0,0 +1,43 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/02 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_delete.py" + +test_prefix="mission_control_delete_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} + diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_cloud_account_systest b/modules/core/mano/rwmc/ra/mission_control_negative_cloud_account_systest new file mode 100755 index 0000000..5536e08 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_cloud_account_systest @@ -0,0 +1,42 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_cloud_account.py" + +test_prefix="mission_control_negative_cloud_account_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_mgmt_domain_systest b/modules/core/mano/rwmc/ra/mission_control_negative_mgmt_domain_systest new file mode 100755 index 0000000..174f05c --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_mgmt_domain_systest @@ -0,0 +1,42 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_mgmt_domain.py" + +test_prefix="mission_control_negative_mgmt_domain_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_systest b/modules/core/mano/rwmc/ra/mission_control_negative_systest new file mode 100755 index 0000000..407f3b7 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_systest @@ -0,0 +1,44 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v -p no:cacheprovider \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_vmpool.py \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_cloud_account.py \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_mgmt_domain.py" + +test_prefix="mission_control_negative_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_vmpool_systest b/modules/core/mano/rwmc/ra/mission_control_negative_vmpool_systest new file mode 100755 index 0000000..0f21832 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_vmpool_systest @@ -0,0 +1,42 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_vmpool.py" + +test_prefix="mission_control_negative_vmpool_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_reload_systest b/modules/core/mano/rwmc/ra/mission_control_reload_systest new file mode 100755 index 0000000..54f5df8 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_reload_systest @@ -0,0 +1,45 @@ +#!/bin/bash +# +# +# Author(s): Varun Prasad +# Creation Date: 2015/12/22 +# +# Helper script for invoking the mission control system test using the systest_wrapper +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'not Teardown' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py" + +REBOOT_SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'test_wait_for_launchpad_started or Teardown' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py" + +test_prefix="mission_control_reload_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_systest b/modules/core/mano/rwmc/ra/mission_control_systest new file mode 100755 index 0000000..2bbd951 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_systest @@ -0,0 +1,43 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/09/15 +# +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh +restconf=true + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/launchpad/test_startstop.py" + +test_prefix="mission_control_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/pytest/conftest.py b/modules/core/mano/rwmc/ra/pytest/conftest.py new file mode 100644 index 0000000..375016d --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/conftest.py @@ -0,0 +1,202 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import pytest +import os +import subprocess +import sys + +import rift.auto.log +import rift.auto.session +import rift.vcs.vcs +import logging + +import gi +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang, RwCloudYang + +@pytest.fixture(scope='session', autouse=True) +def cloud_account_name(): + '''fixture which returns the name used to identify the cloud account''' + return 'cloud-0' + +@pytest.fixture(scope='session', autouse=True) +def mgmt_domain_name(): + '''fixture which returns the name used to identify the mgmt_domain''' + return 'mgmt-0' + +@pytest.fixture(scope='session', autouse=True) +def vm_pool_name(): + '''fixture which returns the name used to identify the vm resource pool''' + return 'vm-0' + +@pytest.fixture(scope='session', autouse=True) +def network_pool_name(): + '''fixture which returns the name used to identify the network resource pool''' + return 'net-0' + +@pytest.fixture(scope='session', autouse=True) +def port_pool_name(): + '''fixture which returns the name used to identify the port resource pool''' + return 'port-0' + +@pytest.fixture(scope='session', autouse=True) +def sdn_account_name(): + '''fixture which returns the name used to identify the sdn account''' + return 'sdn-0' + +@pytest.fixture(scope='session', autouse=True) +def sdn_account_type(): + '''fixture which returns the account type used by the sdn account''' + return 'odl' + +@pytest.fixture(scope='session', autouse=True) +def _riftlog_scraper_session(log_manager, confd_host): + '''Fixture which returns an instance of rift.auto.log.FileSource to scrape riftlog + + Arguments: + log_manager - manager of logging sources and sinks + confd_host - host on which confd is running (mgmt_ip) + ''' + scraper = rift.auto.log.FileSource(host=confd_host, path='/var/log/rift/rift.log') + scraper.skip_to('Configuration management startup complete.') + log_manager.source(source=scraper) + return scraper + +@pytest.fixture(scope='session') +def cloud_module(standalone_launchpad): + '''Fixture containing the module which defines cloud account + + Depending on whether or not the system is being run with a standalone + launchpad, a different module will be used to configure the cloud + account + + Arguments: + standalone_launchpad - fixture indicating if the system is being run with a standalone launchpad + + Returns: + module to be used when configuring a cloud account + ''' + cloud_module = RwMcYang + if standalone_launchpad: + cloud_module = RwCloudYang + return cloud_module + +@pytest.fixture(scope='session') +def cloud_xpath(standalone_launchpad): + '''Fixture containing the xpath that should be used to configure a cloud account + + Depending on whether or not the system is being run with a standalone + launchpad, a different xpath will be used to configure the cloud + account + + Arguments: + standalone_launchpad - fixture indicating if the system is being run with a standalone launchpad + + Returns: + xpath to be used when configure a cloud account + ''' + xpath = '/cloud-account/account' + if standalone_launchpad: + xpath = '/cloud/account' + return xpath + +@pytest.fixture(scope='session', autouse=True) +def cloud_account(cloud_module, cloud_account_name, cloud_host, cloud_type): + '''fixture which returns an instance of RwMcYang.CloudAccount + + Arguments: + cloud_module - fixture: module defining cloud account + cloud_account_name - fixture: name used for cloud account + cloud_host - fixture: cloud host address + cloud_type - fixture: cloud account type + + Returns: + An instance of CloudAccount + ''' + account = None + + if cloud_type == 'lxc': + account = cloud_module.CloudAccount.from_dict({ + "name": cloud_account_name, + "account_type": "cloudsim"}) + + elif cloud_type == 'openstack': + username = 'pluto' + password = 'mypasswd' + auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host) + project_name = os.getenv('PROJECT_NAME', 'demo') + mgmt_network = os.getenv('MGMT_NETWORK', 'private') + account = cloud_module.CloudAccount.from_dict({ + 'name': cloud_account_name, + 'account_type': 'openstack', + 'openstack': { + 'admin': True, + 'key': username, + 'secret': password, + 'auth_url': auth_url, + 'tenant': project_name, + 'mgmt_network': mgmt_network}}) + + return account + +@pytest.fixture(scope='session') +def _launchpad_scraper_session(request, log_manager, mgmt_domain_name): + '''fixture which returns an instance of rift.auto.log_scraper.FileSource to scrape the launchpad + + Arguments: + log_manager - manager of log sources and sinks + mgmt_domain_name - the management domain created for the launchpad + ''' + if request.config.getoption("--lp-standalone"): + return + + scraper = rift.auto.log.FileSource(host=None, path='/var/log/launchpad_console.log') + log_manager.source(source=scraper) + return scraper + +@pytest.fixture(scope='function', autouse=False) +def _connect_launchpad_scraper(request, _launchpad_scraper_session, mgmt_session, mgmt_domain_name, standalone_launchpad): + '''Determines the address of the launchpad and connects the launchpad scraper to it + Needed because the launchpad address isn't known at the start of the test session. + + Arguments: + mgmt_session - management interface session + _launchpad_scraper_session - scraper responsible for collecting launchpad_console log + mgmt_domain_name - mgmt-domain in which the launchpad is located + ''' + if standalone_launchpad: + return + + if not _launchpad_scraper_session.connected(): + proxy = mgmt_session.proxy(RwMcYang) + launchpad_address = proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + if launchpad_address: + _launchpad_scraper_session.connect(launchpad_address) + +@pytest.fixture(scope='session') +def launchpad_scraper(_launchpad_scraper_session): + '''Fixture exposing the scraper used to scrape the launchpad console log + + Arguments: + _launchpad_scraper_session - instance of rift.auto.log_scraper.FileSource targeting the launchpad console log + ''' + return _launchpad_scraper_session \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control.py new file mode 100755 index 0000000..666fc9b --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_launchpad_startstop.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 06/19/2015 +@brief System test of basic mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + + +@pytest.mark.setup('launchpad') +@pytest.mark.incremental +class TestMissionControlSetup: + def test_create_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Configure sdn account + + Asserts: + SDN name and accout type. + ''' + sdn_account = RwMcYang.SDNAccount( + name=sdn_account_name, + account_type=sdn_account_type) + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.create_config(xpath, sdn_account) + + sdn_account = proxy.get(xpath) + assert sdn_account.account_type == sdn_account_type + assert sdn_account.name == sdn_account_name + + def test_create_cloud_account(self, mgmt_session, cloud_module, cloud_xpath, cloud_account): + '''Configure a cloud account + + Asserts: + Cloud name and cloud type details + ''' + proxy = mgmt_session.proxy(cloud_module) + proxy.create_config(cloud_xpath, cloud_account) + xpath = '{}[name="{}"]'.format(cloud_xpath, cloud_account.name) + response = proxy.get(xpath) + assert response.name == cloud_account.name + assert response.account_type == cloud_account.account_type + + + @pytest.mark.feature('mission-control') + def test_create_mgmt_domain(self, proxy, mgmt_domain_name): + '''Configure mgmt domain + + Asserts: + If the launchpad configuration is created and updated succesfully. + ''' + xpath = '/mgmt-domain/domain' + domain_config = RwMcYang.MgmtDomain( + name=mgmt_domain_name) + proxy.create_config(xpath, domain_config) + + xpath += "[name='{}']".format(mgmt_domain_name) + proxy.merge_config(xpath, domain_config) + + response = proxy.get(xpath) + assert response.launchpad.state == 'pending' + + @pytest.mark.feature('mission-control') + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Asserts : + Newly configured vm pool has no resources assigned to it + ''' + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + + @pytest.mark.feature('mission-control') + def test_assign_vm_resource_to_vm_pool(self, proxy, cloud_account_name, vm_pool_name, launchpad_vm_id): + '''Configure a vm resource by adding it to a vm pool + + Asserts: + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + if launchpad_vm_id: + cloud_vm_ids = [vm.id for vm in account.resources.vm if vm.id == launchpad_vm_id] + else: + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert cloud_vm_ids != [] + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + if launchpad_vm_id: + available_ids = [vm.id for vm in pool.available if vm.id == launchpad_vm_id] + else: + available_ids = [vm.id for vm in pool.available] + # NOTE: Desired API - request for a list of leaf elements + # available_ids = proxy.get("/vm-pool/pool[name='%s']/available/id" % vm_pool_name) + assert available_ids != [] # Assert pool has available resources + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'dynamic_scaling': True, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + + @pytest.mark.feature('mission-control') + def test_create_network_pool(self, proxy, cloud_account_name, network_pool_name): + '''Configure network pool + + Asserts : + Newly configured network pool has no resources assigned to it + ''' + pool_config = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + + proxy.create_config('/network-pool/pool', pool_config) + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + + @pytest.mark.feature('mission-control') + def test_assign_network_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Configure mgmt_domain by adding a network pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Network(name=network_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain_name, pool_config) + + + @pytest.mark.feature('mission-control') + def test_assign_vm_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Configure mgmt_domain by adding a VM pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Vm(name=vm_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain_name, pool_config) + + + @pytest.mark.feature('mission-control') + def test_wait_for_launchpad_started(self, proxy, mgmt_domain_name): + '''Wait for the launchpad to start + + Additionally begins the launchpad scraper. + + Asserts: + Launchpad reaches state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=400, + fail_on=['crashed']) + + +@pytest.mark.incremental +@pytest.mark.depends('launchpad') +class TestMissionControl: + + def test_show_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Showing sdn account configuration + + Asserts: + sdn_account.account_type is what was configured + ''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + sdn_account = proxy.get_config(xpath) + assert sdn_account.account_type == sdn_account_type + + + @pytest.mark.feature('mission-control') + def test_launchpad_stats(self, proxy, mgmt_domain_name): + '''Verify launchpad stats + + Asserts: + Create time and uptime are configured for launchpad + ''' + xpath = "/mgmt-domain/domain[name='{}']/launchpad/uptime".format(mgmt_domain_name) + uptime = proxy.get(xpath) + assert len(uptime) > 0 + + xpath = "/mgmt-domain/domain[name='{}']/launchpad/create-time".format(mgmt_domain_name) + create_time = proxy.get(xpath) + assert int(create_time) > 0 + + @pytest.mark.feature('mission-control') + def test_mission_control_stats(self, proxy, mgmt_domain_name): + '''Verify Mission Control stats + + Asserts: + Create time and uptime are configured for MissionControl + ''' + xpath = "/mission-control/uptime" + uptime = proxy.get(xpath) + assert len(uptime) > 0 + + xpath = "/mission-control/create-time" + create_time = proxy.get(xpath) + assert int(create_time) > 0 + +@pytest.mark.teardown('launchpad') +@pytest.mark.incremental +class TestMissionControlTeardown: + + @pytest.mark.feature('mission-control') + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + + @pytest.mark.feature('mission-control') + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Unconfigure mgmt domain: remove a vm pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain_name, vm_pool_name) + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_remove_network_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Unconfigure mgmt_domain: remove a network pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain_name, network_pool_name) + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Unconfigure mgmt_domain: delete mgmt_domain''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: remove a vm resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_delete_network_pool(self, proxy, network_pool_name): + '''Unconfigure network pool''' + xpath = "/network-pool/pool[name='%s']" % network_pool_name + proxy.delete_config(xpath) + + def test_delete_odl_sdn_account(self, proxy, sdn_account_name): + '''Unconfigure sdn account''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, mgmt_session, cloud_module, cloud_xpath, cloud_account_name): + '''Unconfigure cloud_account''' + proxy = mgmt_session.proxy(cloud_module) + xpath = "{}[name='{}']".format(cloud_xpath, cloud_account_name) + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_delete.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_delete.py new file mode 100755 index 0000000..91833c7 --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_delete.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_delete.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 06/19/2015 +@brief System test exercising delete of mission control configuration +""" + +import pytest +import rift.auto.proxy +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.fixture(scope='module') +def mgmt_domain(mgmt_domain_name): + mgmt_domain = RwMcYang.MgmtDomain(name=mgmt_domain_name) + return mgmt_domain + +@pytest.fixture(scope='module') +def vm_pool(vm_pool_name, cloud_account_name): + vm_pool = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + return vm_pool + +@pytest.fixture(scope='module') +def network_pool(network_pool_name, cloud_account_name): + network_pool = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + return network_pool + +@pytest.fixture(scope='module') +def sdn_account(sdn_account_name, sdn_account_type): + sdn_account = RwMcYang.SDNAccount( + name=sdn_account_name, + account_type=sdn_account_type, + ) + return sdn_account + +@pytest.fixture(scope='function', autouse=True) +def launchpad_setup(request, proxy, cloud_account, mgmt_domain, vm_pool, network_pool, sdn_account): + def _teardown(): + launchpad_state = proxy.get("/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name) + if launchpad_state: + if launchpad_state in ['configuring', 'starting']: + launchpad_state = proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'started', + timeout=200, + fail_on=['crashed']) + + if launchpad_state == 'started': + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain.name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + launchpad_state = proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'stopped', + timeout=200, + fail_on=['crashed']) + + if proxy.get_config("/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain.name, vm_pool.name)): + proxy.delete_config("/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain.name, vm_pool.name)) + + if proxy.get_config("/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain.name, network_pool.name)): + proxy.delete_config("/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain.name, network_pool.name)) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) + if pool: + for vm_id in [vm.id for vm in pool.assigned]: + proxy.delete_config("/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, vm_id)) + + if proxy.get_config("/vm-pool/pool[name='%s']" % vm_pool.name): + proxy.delete_config("/vm-pool/pool[name='%s']" % vm_pool.name) + + if proxy.get_config("/network-pool/pool[name='%s']" % network_pool.name): + proxy.delete_config("/network-pool/pool[name='%s']" % network_pool.name) + + if proxy.get_config("/mgmt-domain/domain[name='%s']" % mgmt_domain.name): + proxy.delete_config("/mgmt-domain/domain[name='%s']" % mgmt_domain.name) + + if proxy.get_config("/cloud-account/account[name='%s']" % cloud_account.name): + proxy.delete_config("/cloud-account/account[name='%s']" % cloud_account.name) + + if proxy.get_config("/sdn/account[name='%s']" % sdn_account.name): + proxy.delete_config("/sdn/account[name='%s']" % sdn_account.name) + + def _setup(): + proxy.create_config('/cloud-account/account', cloud_account) + proxy.create_config('/mgmt-domain/domain', mgmt_domain) + proxy.create_config('/vm-pool/pool', vm_pool) + proxy.create_config('/network-pool/pool', network_pool) + proxy.create_config('/sdn/account', sdn_account) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) + available_ids = [vm.id for vm in pool.available] + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool.name, + 'cloud_account':cloud_account.name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool.name, pool_config) + + mgmt_vm_pool = RwMcYang.MgmtDomainPools_Vm(name=vm_pool.name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain.name, mgmt_vm_pool) + + mgmt_network_pool = RwMcYang.MgmtDomainPools_Network(name=network_pool.name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain.name, mgmt_network_pool) + + # Teardown any existing launchpad configuration, and set it back up again + _teardown() + _setup() + + + +class DeleteResources: + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain, vm_pool): + '''Unconfigure mgmt domain: remove a vm pool''' + # Can't remove vm pool without removing resources first +# pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) +# if pool: +# for vm_id in [vm.id for vm in pool.assigned]: +# proxy.delete_config("/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, vm_id)) + + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain.name, vm_pool.name) + proxy.delete_config(xpath) + + def test_remove_network_pool_from_mgmt_domain(self, proxy, mgmt_domain, network_pool): + '''Unconfigure mgmt_domain: remove a network pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain.name, network_pool.name) + proxy.delete_config(xpath) + + def test_delete_mgmt_domain(self, proxy, vm_pool, mgmt_domain): + '''Unconfigure mgmt_domain: delete mgmt_domain''' + # Can't remove vm pool without removing resources first +# pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) +# if pool: +# for vm_id in [vm.id for vm in pool.assigned]: +# proxy.delete_config("/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, vm_id)) + + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain.name + proxy.delete_config(xpath) + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool): + '''Unconfigure vm_pool: remove a vm resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, assigned_id) + proxy.delete_config(xpath) + + def test_delete_vm_pool(self, proxy, vm_pool): + '''Unconfigure vm_pool''' + # Can't delete vm pool without removing it from mgmt domain first + with pytest.raises(rift.auto.proxy.ProxyRequestError) as excinfo: + xpath = "/vm-pool/pool[name='%s']" % vm_pool.name + proxy.delete_config(xpath) + assert 'illegal reference' in str(excinfo.value) + + def test_delete_network_pool(self, proxy, network_pool): + '''Unconfigure network pool''' + # Can't delete network pool without removing it from mgmt domain first + with pytest.raises(rift.auto.proxy.ProxyRequestError) as excinfo: + xpath = "/network-pool/pool[name='%s']" % network_pool.name + proxy.delete_config(xpath) + assert 'illegal reference' in str(excinfo.value) + + def test_delete_cloud_account(self, proxy, cloud_account): + '''Unconfigure cloud_account''' + # Can't delete cloud account without first deleting all of the pools associated with it + with pytest.raises(rift.auto.proxy.ProxyRequestError) as excinfo: + xpath = "/cloud-account/account[name='%s']" % cloud_account.name + proxy.delete_config(xpath) + assert 'illegal reference' in str(excinfo.value) + + + def test_delete_odl_sdn_account(self, proxy, sdn_account): + '''Unconfigure sdn account''' + xpath = "/sdn/account[name='%s']" % sdn_account.name + proxy.delete_config(xpath) + + +class TestDeleteFromStartingLaunchpad(DeleteResources): + pass + +@pytest.mark.slow +class TestDeleteFromStoppedLaunchpad(DeleteResources): + @pytest.fixture(scope='function', autouse=True) + def launchpad_stopped(self, launchpad_setup, proxy, mgmt_domain): + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'started', + timeout=200, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain.name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'stopped', + timeout=200, + fail_on=['crashed']) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative.py new file mode 100755 index 0000000..e22e3fb --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +import pytest + +dirpath = os.path.dirname(__file__) + +options = '-v' +pytest.main([options, os.path.join(dirpath, 'test_mission_control_negative_cloud_account.py')]) +pytest.main([options, os.path.join(dirpath, 'test_mission_control_negative_mgmt_domain.py')]) +pytest.main([options, os.path.join(dirpath, 'test_mission_control_negative_vmpool.py')]) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_cloud_account.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_cloud_account.py new file mode 100755 index 0000000..a3dd58f --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_cloud_account.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_negative_cloud_account.py +@author +@date 12/04/2015 +@brief System test of negative (failure) mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import GLib, RwMcYang +from rift.auto.session import ProxyRequestError + + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session + class + + ''' + return mgmt_session.proxy(RwMcYang) + + +@pytest.fixture(scope='session') +def cloud_account_type(request, cloud_type): + '''Workaround for the mixed labeled 'lxc' and 'cloudsim' + + Arguments: + cloud_type - The cloud type supplied via pytest command line parameter + + ''' + if cloud_type == 'lxc': + return 'cloudsim' + else: + return cloud_type + + +@pytest.mark.incremental +class TestCloudAccount: + '''Tests behaviors and properties common to all cloud account types''' + + # + # Test cloud_name + # + + def test_create_cloud_account_with_no_name(self, proxy, cloud_account_type): + '''Test that a cloud account cannot be created if no name is provided + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_type - a pytest fixture for the cloud account type + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'account_type': cloud_account_type, + } + + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_cloud_account_with_empty_name(self, proxy, cloud_account_type): + '''Test that a cloud account cannot be created if name is an empty string + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_type - a pytest fixture for the cloud account type + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'account_type': cloud_account_type, + 'name': '', + } + + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_cloud_account_with_null_name(self, proxy, cloud_account_type): + '''Test that a cloud account cannot be created if name is null + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_type - a pytest fixture for the cloud account type + + Asserts: + TypeError is raised + + ''' + properties = { + 'account_type': cloud_account_type, + 'name': None, + } + + with pytest.raises(TypeError): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + # + # Test cloud account type + # + + def _test_create_cloud_account_with_no_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if no type is provided + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'name': cloud_account_name, + } + + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_cloud_account_with_empty_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if cloud type is an empty string + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + gi.repository.GLib.Error is raised + + ''' + properties = { + 'account_type': '', + 'name': cloud_account_name, + } + + with pytest.raises(GLib.Error): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + def test_create_cloud_account_with_invaid_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if the cloud type is invalid + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + gi.repository.GLib.Error is raised + + ''' + properties = { + 'account_type': 'Nemesis', + 'name': cloud_account_name, + } + + with pytest.raises(GLib.Error): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + def test_create_cloud_account_with_null_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if the cloud type is null + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + TypeError is raised + + ''' + properties = { + 'account_type': None, + 'name': cloud_account_name, + } + + with pytest.raises(TypeError): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + # + # Test change cloud type + # + + def test_create_cloud_account(self, proxy, cloud_account_name, cloud_account): + '''Creates a cloud account for subsequent tests + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + cloud_account - a pytest fixture for the cloud account + + Asserts: + None + + ''' + assert cloud_account_name == cloud_account.name + proxy.create_config('/cloud-account/account', cloud_account) + + def test_change_cloud_account_type(self, proxy, cloud_account_name): + '''Test that a cloud account type cannot be changed + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + account_type_map = { + 'cloudsim': 'openstack', + 'openstack': 'cloudsim', + } + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + updated_cloud_account = RwMcYang.CloudAccount.from_dict({ + 'name': cloud_account.name, + 'account_type': account_type_map[cloud_account.account_type], + }) + with pytest.raises(ProxyRequestError): + proxy.merge_config(xpath, updated_cloud_account) + + def test_create_cloud_account_with_duplicate_name(self, proxy, cloud_account_name, + cloud_account): + '''Attempt to create a cloud account with a duplicate name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account_name == cloud_account.name + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_delete_cloud_account_with_vm_pool_with_vm_resources(self, proxy, + cloud_account_name, vm_pool_name): + '''Tests that a cloud account cannot be deleted if it has a vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the primary vm pool name + + Asserts: + A cloud account exists for the cloud_account_name + Newly configured vm pool has no resources assigned to it + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data + post assignment + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # pool contained resources before any were assigned + + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert len(cloud_vm_ids) >= 1 + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + assert len(available_ids) >= 1 # Assert pool has available resources + # Assert not split brain + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + with pytest.raises(ProxyRequestError): + proxy.delete_config(xpath) + + +@pytest.mark.incremental +class TestCloudAccountNegativeTeardown: + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool resource(s) + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + Resource is assigned before unassigning + Resource is no longer assigned after being unconfigured + + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) >= 1 # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # Assert resource is not assigned + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + None + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_mgmt_domain.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_mgmt_domain.py new file mode 100755 index 0000000..d1aaaa6 --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_mgmt_domain.py @@ -0,0 +1,497 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_negative_mgmt_domain.py +@author RIFT.io +@date 12/4/2015 +@brief System test of negative (failure) mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang +from rift.auto.session import ProxyRequestError +from rift.auto.session import ProxyExpectTimeoutError + + +def start_launchpad(proxy, mgmt_domain_name): + '''Invoke start launchpad RPC + + Arguments: + mgmt_domain_name - the management domain name string + + Asserts: + Launchpad begins test in state 'stopped' + Launchpad finishes test in state 'started' + + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=10, + fail_on=['crashed']) + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = proxy.rpc(start_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=120, + fail_on=['crashed']) + + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session + class + + ''' + return mgmt_session.proxy(RwMcYang) + + +@pytest.mark.incremental +class TestMgmtDomainNegativeSetup: + '''Stand up object needed for the lifecycle of this test script ''' + + def test_create_cloud_account(self, proxy, logger, cloud_account): + '''Configure a cloud account + + This creates a cloud account to test other objects + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + logger - a pytest fixture to an instance of Logger + cloud_account - a pytest fixture to a cloud account object + + Asserts: + None + + ''' + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + A cloud account exists for the cloud_account_name + Newly configured vm pool has no resources assigned to it + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 + + def test_create_mgmt_domain(self, proxy, mgmt_domain_name): + '''Configure a management domain + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + None + + ''' + domain_config = RwMcYang.MgmtDomain(name=mgmt_domain_name) + proxy.create_config('/mgmt-domain/domain', domain_config) + + +@pytest.mark.incremental +class TestMgmtDomain: + '''Test negative cases for the management domain''' + + # + # Creation tests + # + + def test_create_mgmt_domain_with_no_name(self, proxy): + '''Test that a mgmt domain cannot be created if name is not present + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { } + mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/mgmt-domain/domain', mgmt_domain) + + def test_create_mgmt_domain_with_blank_name(self, proxy): + '''Test that a management domain cannot be created if name is an empty + string + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'name': '', + } + mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/mgmt-domain/domain', mgmt_domain) + + def test_create_mgmt_domain_with_null_name(self, proxy): + '''Test that a management domain cannot be created if name is null + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + + Asserts: + TypeError is raised + + ''' + properties = { + 'name':None, + } + with pytest.raises(TypeError): + mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + + def test_create_mgmt_domain_with_duplicate_name(self, proxy, mgmt_domain_name): + '''Test that a management domain cannot be created when a management + domain with the same name already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + management domain exists for the mgmt_domain_name + rift.auto.proxy.ProxyRequestError is raised + + ''' + mgmt_domain = proxy.get("/mgmt-domain/domain[name='%s']" % mgmt_domain_name) + assert mgmt_domain is not None + + properties = { + 'name': mgmt_domain.name, + } + duplicate_mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/mgmt-domain/domain', duplicate_mgmt_domain) + + # + # Launchpad related tests + # + + def test_verify_launchpad_not_started(self, proxy, mgmt_domain_name): + '''Verifies that the launchpad is not started + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + rift.auto.session.ProxyExpectTimeoutError is raised + + ''' + with pytest.raises(ProxyExpectTimeoutError): + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + def test_start_launchpad_when_no_vm_pool_assigned(self, proxy, mgmt_domain_name): + '''Verify that the launchpad cannot start when the management domain + does not have a vm pool assigned + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + rift.auto.session.ProxyExpectTimeoutError is raised + + ''' + with pytest.raises(ProxyExpectTimeoutError): + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=120, + fail_on=['crashed']) + + def test_start_lp_with_empty_vm_pool(self, proxy, mgmt_domain_name, vm_pool_name): + '''Tests that starting launchpad fails when vm pool does not have a vm + Configure mgmt_domain by adding a VM pool to it + + Arguments: + mgmt_domain_name - a pytest fixture for the management domain name + vm_pool_name - a pytest fixture for the vm pool name + + Asserts: + rift.auto.session.ProxyExpectTimeoutError is raised + + ''' + with pytest.raises(ProxyExpectTimeoutError): + pool_config = RwMcYang.MgmtDomainPools_Vm(name=vm_pool_name) + proxy.create_config( + "/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain_name, + pool_config, + ) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=120, + fail_on=['crashed']) + + def test_launchpad_starts_when_vm_pool_has_a_vm_resource(self, proxy, + cloud_account_name, vm_pool_name, mgmt_domain_name, network_pool_name): + '''Tests that a launchpad can now start when the vm pool has a vm + resource + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the VM pool name + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + Cloud account has available resources + VM pool has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data + post assignment + Launchpad reaches state 'started' + + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert len(cloud_vm_ids) >= 1 + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + assert len(available_ids) >= 1 + # Assert not split brain + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids + + # Create NW pool + pool_config = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/network-pool/pool', pool_config) + pool_config = RwMcYang.MgmtDomainPools_Network(name=network_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain_name, pool_config) + + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=400, + fail_on=['crashed']) + + def test_delete_mgmt_domain_with_running_launchpad(self, proxy, mgmt_domain_name): + '''Test that a management domain cannot be deleted when the launchpad + is running + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + with pytest.raises(ProxyRequestError): + proxy.delete_config(xpath) + + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Stop launchpad before we leave this class + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + +@pytest.mark.incremental +class TestMgmtDomainNegativeTeardown: + + @pytest.mark.xfail(raises=ProxyExpectTimeoutError) + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Test that deleting a management domain while a pool is attached will + fail + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, + vm_pool_name): + '''Unconfigure mgmt domain: remove a vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + vm_pool_name - a pytest fixture for the vm pool name + + Asserts: + ''' + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % ( + mgmt_domain_name, vm_pool_name) + proxy.delete_config(xpath) + + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Unconfigure mgmt_domain: delete mgmt_domain + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + None + + ''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: remove a vm resource + + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + Resource is no longer assigned after being unconfigured + + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) >= 1 + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % ( + vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_nw_pool(self, proxy, network_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + + Asserts: + None + + ''' + xpath = "/network-pool/pool[name='%s']" % network_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + None + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_vmpool.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_vmpool.py new file mode 100755 index 0000000..b78c283 --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_vmpool.py @@ -0,0 +1,528 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_negative.py +@author +@date 11/23/2015 +@brief System test of negative (failure) mission control functionality +""" + +import logging +import pytest + + +import gi +gi.require_version('RwMcYang', '1.0') + +from rift.auto.session import ProxyRequestError +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''Fixture which returns a proxy to RwMcYang + + Arguments: + mgmt_session - mgmt_session fixture - instance of a rift.auto.session + class + + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.fixture(scope='session') +def secondary_vm_pool_name(request): + '''Fixture which returns the secondary vm pool name''' + return 'vm-pool-2' + +def show_cloud_account(logger, cloud_account): + '''Helper method to output vm and network ids for debugging + + Here is a sample cloud account resources dict: + resources= {'vm': [ + {'name': 'rift-s1', 'available': True, 'id': '1'}]} + + Arguments: + logger - logging object which to send output + cloud_account - cloud_account object which to interrogate + + ''' + logger.debug('Showing cloud account. name=%s' % cloud_account.name) + logger.debug('account.resources=', cloud_account.resources) + cloud_vm_ids = [vm.id for vm in cloud_account.resources.vm] + logger.debug('cloud vm ids: %s' % cloud_vm_ids) + cloud_network_ids = [network.id for network in cloud_account.resources.network] + logger.debug('cloud network ids: %s' % cloud_network_ids) + + +@pytest.mark.incremental +class TestVmPoolNegativeSetup: + '''Performs module level setup''' + + def test_create_cloud_account(self, proxy, logger, cloud_account): + '''Configure a cloud account + + This creates a cloud account to test other objects + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + logger - a pytest fixture to an instance of Logger + cloud_account - a pytest fixture to a cloud account object + + Asserts: + None + + ''' + proxy.create_config('/cloud-account/account', cloud_account) + #show_cloud_account(logger, cloud_account) + + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + A cloud account exists for the cloud_account_name + Newly configured vm pool has no resources assigned to it + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 + + # def test_stub(self, proxy): + # '''The decorator is a fix to prevent the test script from failing due + # to the following error: + + # "Unable to resolve dependency ('launchpad',). failed to order test:" + + # Arguments: + # proxy - a pytest fixture proxy to RwMcYang + + # Asserts: + # True + + # ''' + # assert True + + +@pytest.mark.incremental +class TestVmPoolNegative: + '''This class is a container for testing VM pool negative cases. + + The following aspects are tested: + * create a vm pool object + * assign resources to a pool that have already been assigned to another pool + + ''' + + # + # Create: VM pool name tests + # + + def test_create_vm_pool_with_missing_pool_name(self, proxy, cloud_account): + '''Tests that a vm pool cannot be created without a name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account - a pytest fixture for the cloud account + + Asserts: + cloud_account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account.name is not None + + pool_config = RwMcYang.VmPool( + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_blank_pool_name(self, proxy, cloud_account): + '''Tests that a vm pool cannot be created without a name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account - a pytest fixture for the cloud account + + Asserts: + Cloud account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account.name is not None + + pool_config = RwMcYang.VmPool( + name='', + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_null_pool_name(self, proxy, cloud_account): + '''Tests that a vm pool cannot be created without a name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account - a pytest fixture for the cloud account + + Asserts: + Cloud account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account.name is not None + with pytest.raises(TypeError): + pool_config = RwMcYang.VmPool( + name=None, + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + #proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_duplicate_name(self, proxy, vm_pool_name, + cloud_account_name): + '''Tests that a vm pool cannot be created with a name that already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the vm pool name + cloud_account - a pytest fixture for the cloud account + + Asserts: + Cloud account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account.name is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + # + # Cloud name tests + # + + @pytest.mark.xfail(raises=ProxyRequestError) + def test_create_vm_pool_with_missing_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created with a name that already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_blank_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created with a name that already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account='', + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def _test_create_vm_pool_with_null_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created if the cloud name is None + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + with pytest.raises(TypeError): + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account=None, + dynamic_scaling=True, + ) + #proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_bogus_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created if the cloud name is None + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + Cloud account does not exist for the bogus cloud account name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + + bogus_cloud_account_name = 'bogus-cloud-account-name' + cloud_account = proxy.get("/cloud-account/account[name='%s']" % bogus_cloud_account_name) + assert cloud_account is None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account=bogus_cloud_account_name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + # + # Test VM pool assignments + # + + def test_assign_vm_resource_to_vm_pool(self, proxy, cloud_account_name, + vm_pool_name): + '''Configure a vm resource by adding it to a vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the primary vm pool name + + Asserts: + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data + post assignment + + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert len(cloud_vm_ids) >= 1 + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + assert len(available_ids) >= 1 # Assert pool has available resources + # Assert not split brain + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]} + ) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + def test_create_vm_pool_2(self, proxy, cloud_account_name, secondary_vm_pool_name): + '''Configure vm pool + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Cloud account exists for the given cloud_account_name + Newly configured vm pool has no resources assigned to it + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # pool contained resources before any were assigned + + @pytest.mark.skipif(True, reason="Assigned VMS are able to be shared between VM pools") + @pytest.mark.xfail(raises=ProxyRequestError) + def test_assign_allocated_vm_to_vm_pool_2(self, proxy, cloud_account_name, + vm_pool_name, secondary_vm_pool_name): + '''This test tries to assign a vm from one vm pool to another vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the primary vm pool name + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Prior to Pool 2 assignment, verifies that pool 1 has assigned id(s) + rift.auto.proxy.ProxyRequestError is raised + + ''' + pool_1 = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + pool_2 = proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) + assigned_ids = [vm.id for vm in pool_1.assigned] + assert len(assigned_ids >= 1) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':secondary_vm_pool_name, + 'assigned':[{'id':assigned_ids[0]}]}) + with pytest.raises(ProxyRequestError): + proxy.merge_config( + "/vm-pool/pool[name='%s']" % secondary_vm_pool_name, pool_config + ) + + +@pytest.mark.incremental +class TestVmPoolNegativeTeardown: + '''This class serves to do cleanup for the VM pool negative tests''' + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool resource(s) + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + Resource is assigned before unassigning + Resource is no longer assigned after being unconfigured + + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) >= 1 # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # Assert resource is not assigned + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account_expect_fail(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + This should fail because we have not deleted vm pool 2 + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + with pytest.raises(ProxyRequestError): + proxy.delete_config(xpath) + + def test_delete_vm_pool_2(self, proxy, secondary_vm_pool_name): + '''Unconfigure secondary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % secondary_vm_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + None + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_static.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_static.py new file mode 100755 index 0000000..5b6e2cf --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_static.py @@ -0,0 +1,396 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_launchpad_startstop.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 06/19/2015 +@brief System test of basic mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.mark.setup('launchpad') +@pytest.mark.incremental +class TestMissionControlSetup: + def test_create_cloud_account(self, proxy, cloud_account): + '''Configure a cloud account''' + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_mgmt_domain(self, proxy, mgmt_domain_name): + '''Configure mgmt domain''' + domain_config = RwMcYang.MgmtDomain( + name=mgmt_domain_name) + proxy.create_config('/mgmt-domain/domain', domain_config) + + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Asserts : + Newly configured vm pool has no resources assigned to it + ''' + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name) + proxy.create_config('/vm-pool/pool', pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + def test_assign_vm_resource_to_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure a vm resource by adding it to a vm pool + + Asserts: + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert cloud_vm_ids != [] + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + # NOTE: Desired API - request for a list of leaf elements + # available_ids = proxy.get("/vm-pool/pool[name='%s']/available/id" % vm_pool_name) + assert available_ids != [] # Assert pool has available resources + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + print(available_ids[0], assigned_ids) + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + def test_create_network_pool(self, proxy, cloud_account_name, network_pool_name): + '''Configure network pool + + Asserts : + Newly configured network pool has no resources assigned to it + ''' + pool_config = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name) + proxy.create_config('/network-pool/pool', pool_config) + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + def test_assign_network_resource_to_network_pool(self, proxy, cloud_account_name, network_pool_name): + '''Configure a network resource by adding it to a network pool + + Asserts: + Cloud account has available resources + Network pool has has available resources + Cloud account and network pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_network_ids = [network.id for network in account.resources.network] + assert cloud_network_ids != [] + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + available_ids = [network.id for network in pool.available] + assert available_ids != [] # Assert pool has available resources + assert set(cloud_network_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.NetworkPool.from_dict({ + 'name':network_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/network-pool/pool[name='%s']" % network_pool_name, pool_config) + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + def test_assign_network_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Configure mgmt_domain by adding a network pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Network(name=network_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain_name, pool_config) + + def test_create_port_pool(self, proxy, cloud_account_name, port_pool_name): + '''Configure port pool + + Asserts : + Newly configured port pool has no resources assigned to it + ''' + pool_config = RwMcYang.PortPool( + name=port_pool_name, + cloud_account=cloud_account_name) + proxy.create_config('/port-pool/pool', pool_config) + + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + assigned_ids = [port.id for port in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + def test_assign_port_resource_to_port_pool(self, proxy, cloud_account_name, port_pool_name): + '''Configure a port resource by adding it to a port pool + + Asserts: + Cloud account has available resources + Port pool has has available resources + Cloud account and port pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_port_ids = [port.id for port in account.resources.port] + assert cloud_port_ids != [] + + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + available_ids = [port.id for port in pool.available] + assert available_ids != [] # Assert pool has available resources + assert set(cloud_port_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.PortPool.from_dict({ + 'name':port_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/port-pool/pool[name='%s']" % port_pool_name, pool_config) + + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + assigned_ids = [port.id for port in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + + def test_assign_port_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, port_pool_name): + '''Configure mgmt_domain by adding a port pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Port(name=port_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/port" % mgmt_domain_name, pool_config) + + def test_assign_vm_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Configure mgmt_domain by adding a VM pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Vm(name=vm_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain_name, pool_config) + + def test_wait_for_launchpad_started(self, proxy, mgmt_domain_name): + '''Wait for the launchpad to start + + Additionally begins the launchpad scraper. + + Asserts: + Launchpad reaches state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=200, + fail_on=['crashed']) + + +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestMissionControl: + + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=60, + fail_on=['crashed']) + + def test_start_launchpad(self, proxy, mgmt_domain_name, launchpad_scraper): + '''Invoke start launchpad RPC + + Asserts: + Launchpad begins test in state 'stopped' + Launchpad finishes test in state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=10, + fail_on=['crashed']) + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = proxy.rpc(start_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=200, + fail_on=['crashed']) + launchpad_scraper.reset() + + def test_stop_launchpad_redux(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC... Again... + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=60, + fail_on=['crashed']) + + +@pytest.mark.teardown('launchpad') +@pytest.mark.incremental +class TestMissionControlTeardown: + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Unconfigure mgmt domain: remove a vm pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain_name, vm_pool_name) + proxy.delete_config(xpath) + + def test_remove_network_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Unconfigure mgmt_domain: remove a network pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain_name, network_pool_name) + proxy.delete_config(xpath) + + def test_remove_port_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, port_pool_name): + '''Unconfigure mgmt_domain: remove a port pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/port[name='%s']" % (mgmt_domain_name, port_pool_name) + proxy.delete_config(xpath) + + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Unconfigure mgmt_domain: delete mgmt_domain''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: remove a vm resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_remove_network_resource_from_network_pool(self, proxy, network_pool_name): + '''Unconfigure network_pool: remove a network resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/network-pool/pool[name='%s']/assigned[id='%s']" % (network_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_remove_port_resource_from_port_pool(self, proxy, port_pool_name): + '''Unconfigure port_pool: remove a port resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + assigned_ids = [port.id for port in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/port-pool/pool[name='%s']/assigned[id='%s']" % (port_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_network_pool(self, proxy, network_pool_name): + '''Unconfigure network pool''' + xpath = "/network-pool/pool[name='%s']" % network_pool_name + proxy.delete_config(xpath) + + def test_delete_port_pool(self, proxy, port_pool_name): + '''Unconfigure port_pool''' + xpath = "/port-pool/pool[name='%s']" % port_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) + + def test_create_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Configure sdn account''' + sdn_account = RwMcYang.SDNAccount( + name=sdn_account_name, + account_type=sdn_account_type) + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.create_config(xpath, sdn_account) + + def test_show_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Showing sdn account configuration + + Asserts: + sdn_account.account_type is what was configured + ''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + sdn_account = proxy.get_config(xpath) + assert sdn_account.account_type == sdn_account_type + + def test_delete_odl_sdn_account(self, proxy, sdn_account_name): + '''Unconfigure sdn account''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_delete_systest_cloudsim.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_delete_systest_cloudsim.racfg new file mode 100644 index 0000000..20f1ed3 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_delete_systest_cloudsim.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_MISSION_CONTROL_DELETE_CLOUDSIM", + "commandline":"./mission_control_delete_systest", + "target_vm":"VM", + "test_description":"System test targeting deleting mission control configuration", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"], + "timelimit": 2400, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_reload_systest_openstack.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_reload_systest_openstack.racfg new file mode 100644 index 0000000..cbe7b08 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_reload_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_MISSION_CONTROL_RELOAD_OPENSTACK", + "commandline":"./mission_control_reload_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for mission control reload(Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2200, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim.racfg new file mode 100644 index 0000000..d2131be --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_MISSION_CONTROL_CLOUDSIM", + "commandline":"./mission_control_systest", + "target_vm":"VM", + "test_description":"System test for mission control", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","MANO","cloudsim"], + "timelimit": 1400, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim_negative.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim_negative.racfg new file mode 100644 index 0000000..eda42cd --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim_negative.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_MISSION_CONTROL_CLOUDSIM_NEGATIVE", + "commandline":"./mission_control_negative_systest", + "target_vm":"VM", + "test_description":"System test for mission control negative cases", + "run_as_root": true, + "status":"broken", + "keywords":["nightly","smoke"], + "timelimit": 600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack.racfg new file mode 100644 index 0000000..8bb59f5 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_MISSION_CONTROL_OPENSTACK", + "commandline":"./mission_control_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo", + "test_description":"System test for mission control(Openstack)", + "run_as_root": false, + "status":"broken", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 1800, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack_negative.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack_negative.racfg new file mode 100644 index 0000000..6db3394 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack_negative.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_MISSION_CONTROL_OPENSTACK_NEGATIVE", + "commandline":"./mission_control_negative_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for mission control(Openstack) negative cases", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke", "openstack"], + "timelimit": 1600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/test/CMakeLists.txt b/modules/core/mano/rwmc/test/CMakeLists.txt new file mode 100644 index 0000000..47b7219 --- /dev/null +++ b/modules/core/mano/rwmc/test/CMakeLists.txt @@ -0,0 +1,15 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS mission_control.py + DESTINATION usr/rift/systemtest/mission_control + COMPONENT ${PKG_LONG_NAME} + ) diff --git a/modules/core/mano/rwmc/test/README b/modules/core/mano/rwmc/test/README new file mode 100644 index 0000000..2e91ca0 --- /dev/null +++ b/modules/core/mano/rwmc/test/README @@ -0,0 +1,10 @@ +To start mission control run the following command: + +./mission_control.py -m ethsim -c --skip-prepare-vm + +To run the mock +./mission_control.py -m ethsim -c --skip-prepare-vm --mock + +To invoke the mission control UI +http://10.0.106.51:8000/index.html?api_server=http://10.0.106.51#/ + diff --git a/modules/core/mano/rwmc/test/mission_control.py b/modules/core/mano/rwmc/test/mission_control.py new file mode 100755 index 0000000..9d13a3b --- /dev/null +++ b/modules/core/mano/rwmc/test/mission_control.py @@ -0,0 +1,299 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import ipaddress +import logging +import os +import shlex +import socket +import subprocess +import sys + +import rift.vcs +import rift.vcs.demo +import rift.vcs.vms + +import rift.rwcal.cloudsim +import rift.rwcal.cloudsim.net + +logger = logging.getLogger(__name__) + + +class MinionConnectionError(Exception): + pass + + +class MissionControlUI(rift.vcs.NativeProcess): + def __init__(self, name="RW.MC.UI"): + super(MissionControlUI, self).__init__( + name=name, + exe="./usr/share/rw.ui/webapp/scripts/launch_ui.sh", + ) + + @property + def args(self): + return ' ' + + +class Demo(rift.vcs.demo.Demo): + def __init__(self, use_mock=False, skip_ui=False, disable_cnt_mgr=False): + + procs = [ + rift.vcs.RiftCli(), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + rift.vcs.RestconfTasklet(), + rift.vcs.Watchdog(), + rift.vcs.RestPortForwardTasklet(), + rift.vcs.CalProxy(), + ] + + if not use_mock: + procs.append(rift.vcs.MissionControl()) + if not disable_cnt_mgr: + procs.append(rift.vcs.ContainerManager()) + else: + procs.extend([rift.vcs.CrossbarServer(), rift.vcs.DtsMockServerTasklet()]) + + if not skip_ui: + procs.extend([MissionControlUI()]) + + super(Demo, self).__init__( + # Construct the system. This system consists of 1 cluster in 1 + # colony. The master cluster houses CLI and management VMs + sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + clusters=[ + rift.vcs.Cluster( + name='master', + virtual_machines=[ + rift.vcs.VirtualMachine( + name='vm-mission-control', + ip='127.0.0.1', + tasklets=[ + rift.vcs.uAgentTasklet(), + ], + procs=procs, + ), + ] + ) + ] + ) + ] + ), + + # Define the generic portmap. + port_map = {}, + + # Define a mapping from the placeholder logical names to the real + # port names for each of the different modes supported by this demo. + port_names = { + 'ethsim': { + }, + 'pci': { + } + }, + + # Define the connectivity between logical port names. + port_groups = {}, + ) + + +def check_salt_master_running(): + cmd = "systemctl status salt-master.service | grep Active | awk '{print $2}'" + salt_master_status = subprocess.check_output(cmd, universal_newlines=True, shell=True).rstrip('\n') + if salt_master_status != 'active': + logger.error("Salt master is not running on the host.") + logger.error("Start the salt master (systemctl start salt-master.service) and re-run mission control.") + exit(1) + + +def clear_salt_keys(): + # clear all the previosly installed salt keys + logger.info("Removing all unconnected salt keys") + stdout = subprocess.check_output( + shlex.split('salt-run manage.down'), + universal_newlines=True, + ) + + down_minions = stdout.splitlines() + + for line in down_minions: + salt_id = line.strip().replace("- ", "") + logger.info("Removing old unconnected salt id: %s", salt_id) + minion_keys_stdout = subprocess.check_output( + shlex.split('salt-key -f {}'.format(salt_id)), + universal_newlines=True) + + minion_keys = minion_keys_stdout.splitlines() + for key_line in minion_keys: + if "Keys" in key_line: + continue + + key_split = key_line.split(":") + if len(key_split) < 2: + continue + + key = key_split[0] + + # Delete the minion key + logger.info("Deleting minion %s key: %s", salt_id, key) + subprocess.check_call(shlex.split('salt-key -d {} -y'.format(key))) + + +def is_node_connected(node_id): + try: + stdout = subprocess.check_output( + shlex.split('salt %s test.ping' % node_id), + universal_newlines=True, + ) + except subprocess.CalledProcessError: + msg = "test.ping command failed against node_id: %s" % node_id + logger.warning(msg) + raise MinionConnectionError(msg) + + up_minions = stdout.splitlines() + for line in up_minions: + if "True" in line: + return True + + return False + +def construct_lp_public_ip_env_var(lp_public_ip): + ipaddress.IPv4Address(lp_public_ip) + os.environ["RIFT_LP_PUBLIC_IP"] = lp_public_ip + +def construct_lp_node_env_var(lp_salt_node_ip_ids): + format_msg = "--lp-node-id parameter must be in the following format :" + env_node_ip_str = "" + for node_ip_id in lp_salt_node_ip_ids: + if ":" not in node_ip_id: + raise ValueError(format_msg) + + ip_id_list = node_ip_id.split(":") + if len(ip_id_list) != 2: + raise ValueError(format_msg) + + node_ip, node_id = ip_id_list + + # Validate the VM ip address provided + ipaddress.IPv4Address(node_ip) + + if not is_node_connected(node_id): + logger.warning("Salt minion id %s is not connected", node_id) + + env_node_ip_str += "{}|{}:".format(node_ip, node_id) + + env_node_ip_str = env_node_ip_str.rstrip(":") + + os.environ["RIFT_LP_NODES"] = env_node_ip_str + + +def main(argv=sys.argv[1:]): + logging.basicConfig( + level=logging.INFO, + format='%(asctime)-15s %(levelname)s %(message)s') + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser(conflict_handler='resolve') + + parser.add_argument( + "--mock", + help="Start the DTS mock server", + action="store_true", + ) + + parser.add_argument( + "--no-cntr-mgr", + action="store_true", + help="Disable the container manager" + ) + + parser.add_argument( + "--skip-ui", + help="Do not start UI services (MissionControlUI and Composer)", + action="store_true", + ) + + parser.add_argument( + "--lp-node-id", + help="Use provided vm ip and salt node id's as launchpad VM's if " + "no static resources allocated. Pass in as :", + action='append', + ) + + parser.add_argument( + "--lp-public-ip", + help="Use provided vm public/floating ip as launchpad VM's public ip. " + "Pass in as ", + ) + + + args = parser.parse_args(argv) + + # Disable loading any kernel modules for the mission control VM + os.environ["NO_KERNEL_MODS"] = "1" + + if args.lp_node_id: + construct_lp_node_env_var(args.lp_node_id) + + if args.lp_public_ip: + construct_lp_public_ip_env_var(args.lp_public_ip) + + if not args.mock: + # Ensure that salt master is running. + check_salt_master_running() + + # Clear salt keys to clear out any old/duplicate keys + #clear_salt_keys() + + # Initialize the virsh ahead of time to ensure container NAT + # is functional. This really should go into cloudsim container + # initialization. + if not args.no_cntr_mgr: + rift.rwcal.cloudsim.net.virsh_initialize_default() + + # load demo info and create Demo object + demo = Demo(use_mock=args.mock, skip_ui=args.skip_ui, disable_cnt_mgr=args.no_cntr_mgr) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, + northbound_listing="cli_rwmc_schema_listing.txt") + + confd_ip = socket.gethostbyname(socket.gethostname()) + rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip) + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-nc.py b/modules/core/mano/rwmc/test/perf/dts-perf-nc.py new file mode 100755 index 0000000..5f97b9a --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-nc.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python2 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import subprocess +import contextlib +import rift.auto.proxy +import sys +import os +import time +import rw_peas +import requests +import argparse +import socket + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('YangModelPlugin', '1.0') + + + +from gi.repository import RwMcYang + +# stress the system using netconf + +yang = rw_peas.PeasPlugin('yangmodel_plugin-c', 'YangModelPlugin-1.0') +yang_model_api = yang.get_interface('Model') +yang_model = yang_model_api.alloc() +mc_module = yang_model_api.load_module(yang_model, 'rw-mc') + +@contextlib.contextmanager +def start_system(host, port): + print("Starting system") + + + # Retrieve the necessary rift paths + rift_root = os.environ["RIFT_ROOT"] + rift_install = os.environ["RIFT_INSTALL"] + rift_artifacts = os.environ["RIFT_ARTIFACTS"] + + cmd="{RIFT_INSTALL}/demos/dts-perf-system.py -m ethsim -c --ip-list {host} --skip-prepare-vm".format(RIFT_INSTALL=rift_install, host=host) + rift_shell_cmd="sudo {RIFT_ROOT}/rift-shell -e -- {cmd}".format(cmd=cmd, RIFT_ROOT=rift_root) + remote_cmd="shopt -s huponexit; cd {RIFT_ROOT}; {rift_shell_cmd}".format(RIFT_ROOT=rift_root, rift_shell_cmd=rift_shell_cmd) + ssh_opt="-o ConnectTimeout=5 -o StrictHostKeyChecking=no" + + + cmd='ssh {ssh_opt} {host} -t -t "{remote_cmd}"'.format( + ssh_opt=ssh_opt, + remote_cmd=remote_cmd, + host=host, + ) + + fout = open(os.path.join(rift_artifacts, "dts-perf.stdout"), "w") + ferr = open(os.path.join(rift_artifacts, "dts-perf.stderr"), "w") + + process = subprocess.Popen( + cmd, + shell=True, + stdout=fout, + stderr=ferr, + stdin=subprocess.PIPE, + ) + + # Wait for confd to become available + while True: + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((host, 8008)) + sock.close() + break + + except socket.error: + time.sleep(1) + + print("System ready") + + try: + yield + finally: + print("Killing confd") + process.terminate() + process.wait() + +def run_rpc_perf_test(proxy, num_rpcs=1): + start_time = time.time() + + for i in range(1, num_rpcs + 1): + start = RwMcYang.StartLaunchpadInput() + start.federation_name = "lp_%s" % i + print(proxy.rpc(start.to_xml(yang_model))) + + stop_time = time.time() + + print("Retrieved %s rpc in %s seconds" % (num_rpcs, stop_time - start_time)) + return (stop_time - start_time) + + +def run_federation_config_http_perf_test(num_federations=1): + session = requests.Session() + + start_time = time.time() + for i in range(1, num_federations + 1): + req = session.post( + url="http://localhost:8008/api/config", + json={"federation": {"name": "foo_%s" % i}}, + headers={'Content-Type': 'application/vnd.yang.data+json'}, + auth=('admin', 'admin') + ) + req.raise_for_status() + stop_time = time.time() + + print("Configured %s federations using restconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_opdata_get_opdata_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + print(proxy.get_from_xpath(filter_xpath="/opdata")) + pass + + stop_time = time.time() + print("Retrieved %s opdata in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_config_perf_test(proxy, num_federations=1): + start_time = time.time() + + for i in range(1, num_federations + 1): + fed = RwMcYang.FederationConfig() + fed.name = "foobar_%s" % i + print(proxy.merge_config(fed.to_xml(yang_model))) + + stop_time = time.time() + + print("Configured %s federations using netconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_get_config_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + print(proxy.get_config(filter_xpath="/federation")) + + stop_time = time.time() + + print("Retrieved %s federations in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def main(argv=sys.argv[1:]): + + parser = argparse.ArgumentParser() + parser.add_argument('--host', required=True) + parser.add_argument('--port', type=int, default=8888) + parser.add_argument('--output', default='dts-perf-results.tsv') + parser.add_argument('--uri', default="/federation") + parser.add_argument('--num-conn', type=int, default=5000) + parser.add_argument('--timeout', type=int, default=5) + parser.add_argument('--low-rate', type=int, default=20) + parser.add_argument('--high-rate', type=int, default=200) + parser.add_argument('--rate-step', type=int, default=20) + + args = parser.parse_args(argv) + + with start_system(args.host, args.port): + nc_proxy = rift.auto.proxy.NetconfProxy() + nc_proxy.connect() + n_fed = 10; + n_fed_get = 100 + n_opdata_get = 100 + n_rpc = 100 + config_time = run_federation_config_perf_test(nc_proxy, num_federations=n_fed) + config_get_time = run_federation_get_config_perf_test(nc_proxy, num_gets=n_fed_get) + opdata_get_time = run_opdata_get_opdata_perf_test(nc_proxy, num_gets=n_opdata_get) + rpc_time = run_rpc_perf_test(nc_proxy, num_rpcs=n_rpc) + + print("") + print("..............................................") + print("CONFD Performance Results Using Netconf Client") + print("..............................................") + print("Rate of config writes: %d" % (n_fed/config_time)) + print("Rate of config reads : %d" % (n_fed_get/config_get_time)) + print("Rate of opdata reads : %d" % (n_opdata_get/opdata_get_time)) + print("Rate of rpc calls : %d" % (n_rpc/rpc_time)) + print("* Config read is reading a list with %d entries" % n_fed) + print("* Opdata read is reading a list with 5 entries") + print("..............................................") + +if __name__ == "__main__": + if "RIFT_ROOT" not in os.environ: + print("Must be in rift shell to run.") + sys.exit(1) + + os.chdir(os.environ["RIFT_INSTALL"]) + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-system.py b/modules/core/mano/rwmc/test/perf/dts-perf-system.py new file mode 100755 index 0000000..741001c --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-system.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import logging +import os +import sys + +import rift.vcs +import rift.vcs.demo +import rift.vcs.vms + +logger = logging.getLogger(__name__) + +class Webserver(rift.vcs.NativeProcess): + def __init__(self, host, name="rw.perf.webserver"): + super(Webserver, self).__init__( + name=name, + exe="./usr/local/bin/dts-perf-webserver.py", + args="--host={}".format(host), + ) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s') + + # @HACK over-ride the mode (if is not important for this system) + argv.extend(('--mode', 'ethsim')) + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser(conflict_handler='resolve') + + args = parser.parse_args(argv) + + # @HACK There should be one host IP provided; Use it as the host of the + # webserver. + host = args.ip_list[0] + + # Construct the system. This system consists of 1 cluster in 1 + # colony. The master cluster houses CLI and management VMs + sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + clusters=[ + rift.vcs.Cluster( + name='master', + virtual_machines=[ + rift.vcs.VirtualMachine( + name='vm-mission-control', + ip='127.0.0.1', + tasklets=[ + rift.vcs.uAgentTasklet(), + ], + procs=[ + rift.vcs.CliTasklet(), + rift.vcs.MissionControl(), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + Webserver(host), + ], + ), + ] + ) + ] + ) + ] + ) + + + # Define the generic portmap. + port_map = {} + + # Define a mapping from the placeholder logical names to the real + # port names for each of the different modes supported by this demo. + port_names = { + 'ethsim': { + }, + 'pci': { + } + } + + # Define the connectivity between logical port names. + port_groups = {} + + #load demo info and create Demo object + demo = rift.vcs.demo.Demo(sysinfo=sysinfo, + port_map=port_map, + port_names=port_names, + port_groups=port_groups) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, + northbound_listing="cli_rwfpath_schema_listing.txt") + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-test.py b/modules/core/mano/rwmc/test/perf/dts-perf-test.py new file mode 100755 index 0000000..71d989b --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-test.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import collections +import os +import socket +import sys +import subprocess +import time + + +class ProcessError(Exception): + pass + + +def check_dependency(package): + requirement = subprocess.Popen( + 'which {}'.format(package), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + _, stderr = requirement.communicate() + requirement.wait() + + if stderr: + print("'{}' is required to test the system".format(package)) + sys.exit(1) + + +class Autobench(collections.namedtuple( + "Autobench", [ + "host", + "port", + "uri", + "file", + "num_connections", + "timeout", + "low_rate", + "high_rate", + "rate_step", + ] + )): + def __repr__(self): + args = [ + "autobench --single_host", + "--host1 {}".format(self.host), + "--port1 {}".format(self.port), + "--uri1 {}".format(self.uri), + "--file {}".format(self.file), + "--num_conn {}".format(self.num_connections), + "--timeout {}".format(self.timeout), + "--low_rate {}".format(self.low_rate), + "--high_rate {}".format(self.high_rate), + "--rate_step {}".format(self.rate_step), + ] + + return ' '.join(args) + + +def launch_remote_system(host, port, autobench): + # Check dependencies + check_dependency('autobench') + check_dependency('httperf') + + # Retrieve the necessary rift paths + rift_root = os.environ["RIFT_ROOT"] + rift_install = os.environ["RIFT_INSTALL"] + rift_artifacts = os.environ["RIFT_ARTIFACTS"] + + cmd="{RIFT_INSTALL}/demos/dts-perf-system.py -m ethsim --ip-list {host} --skip-prepare-vm".format(RIFT_INSTALL=rift_install, host=host) + rift_shell_cmd="sudo {RIFT_ROOT}/rift-shell -e -- {cmd}".format(cmd=cmd, RIFT_ROOT=rift_root) + remote_cmd="shopt -s huponexit; cd {RIFT_ROOT}; {rift_shell_cmd}".format(RIFT_ROOT=rift_root, rift_shell_cmd=rift_shell_cmd) + ssh_opt="-o ConnectTimeout=5 -o StrictHostKeyChecking=no" + + + cmd='ssh {ssh_opt} {host} -t -t "{remote_cmd}"'.format( + ssh_opt=ssh_opt, + remote_cmd=remote_cmd, + host=host, + ) + + try: + print('starting system') + + fout = open(os.path.join(rift_artifacts, "dts-perf.stdout"), "w") + ferr = open(os.path.join(rift_artifacts, "dts-perf.stderr"), "w") + + process = subprocess.Popen( + cmd, + shell=True, + stdout=fout, + stderr=ferr, + stdin=subprocess.PIPE, + ) + + # Wait for confd to become available + while True: + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((host, 8008)) + sock.close() + break + + except socket.error: + time.sleep(1) + + print('system ready') + + # Launch autobench on another process + print('testing started') + test = subprocess.Popen( + str(autobench), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + (stdout, stderr) = test.communicate() + test.wait() + + if test.stderr is not None: + print(stderr) + + print('testing complete') + + except Exception as e: + print(str(e)) + + finally: + process.terminate() + process.wait() + + fout.close() + ferr.close() + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument('--host', required=True) + parser.add_argument('--port', type=int, default=8888) + parser.add_argument('--output', default='dts-perf-results.tsv') + parser.add_argument('--uri', default="/federation") + parser.add_argument('--num-conn', type=int, default=5000) + parser.add_argument('--timeout', type=int, default=5) + parser.add_argument('--low-rate', type=int, default=20) + parser.add_argument('--high-rate', type=int, default=200) + parser.add_argument('--rate-step', type=int, default=20) + + args = parser.parse_args(argv) + + autobench = Autobench( + host=args.host, + port=args.port, + uri=args.uri, + file=args.output, + num_connections=args.num_conn, + timeout=args.timeout, + low_rate=args.low_rate, + high_rate=args.high_rate, + rate_step=args.rate_step, + ) + + launch_remote_system(args.host, args.port, autobench) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-webserver.py b/modules/core/mano/rwmc/test/perf/dts-perf-webserver.py new file mode 100755 index 0000000..e178f4d --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-webserver.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import uuid +import sys + +import json +import requests +import tornado.ioloop +import tornado.options +from tornado.options import options +import tornado.web +import tornado.escape + + + +class FederationHandler(tornado.web.RequestHandler): + def get(self): + headers = {'content-type': 'application/vnd.yang.data+json'} + name = str(uuid.uuid4().hex) + auth = ('admin', 'admin') + data = json.dumps({'federation': {'name': name}}) + url = "http://{host}:8008/api/config".format(host=options.host) + + response = requests.post(url, headers=headers, auth=auth, data=data) + if not response.ok: + print(response.status_code, response.reason) + print(response.text) + + +class OperationalHandler(tornado.web.RequestHandler): + def get(self): + headers = {'content-type': 'application/vnd.yang.operational+json'} + auth = ('admin', 'admin') + url = "http://{host}:8008/api/operational/federation".format(host=options.host) + + response = requests.get(url, headers=headers, auth=auth) + if not response.ok: + print(response.status_code, response.reason) + print(response.text) + + +class ConfigHandler(tornado.web.RequestHandler): + def get(self): + headers = {'content-type': 'application/vnd.yang.config+json'} + auth = ('admin', 'admin') + url = "http://{host}:8008/api/config/".format(host=options.host) + + response = requests.get(url, headers=headers, auth=auth) + if not response.ok: + print(response.status_code, response.reason) + print(response.text) + + + +def main(): + tornado.options.define("host") + + try: + tornado.options.parse_command_line() + + if options.host is None: + raise tornado.options.Error('A host must be specified') + + app = tornado.web.Application([ + (r"/federation", FederationHandler), + (r"/operational", OperationalHandler), + (r"/config", ConfigHandler), + ]) + + app.listen(8888) + tornado.ioloop.IOLoop.current().start() + + except tornado.options.Error as e: + print("{}\n\n".format(str(e))) + tornado.options.print_help() + sys.exit(1) + + except Exception as e: + print(str(e)) + sys.exit(1) + + except (KeyboardInterrupt, SystemExit): + pass + + finally: + tornado.ioloop.IOLoop.current().stop() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/test.sh b/modules/core/mano/rwmc/test/perf/test.sh new file mode 100755 index 0000000..938328c --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/test.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + +# This script tests the throughput of get operations. +# change iter and loop variables + +NETCONF_CONSOLE_DIR=${RIFT_ROOT}/.install/usr/local/confd/bin + +iter=100 +loop=30 + +for i in `seq 1 $loop`; +do + echo "Background script $i" + ${NETCONF_CONSOLE_DIR}/netconf-console-tcp -s all --iter=$iter --get -x /opdata& +done + +wait + +total=$(($iter * $loop)) +echo "Total number of netconf operations=$total" \ No newline at end of file diff --git a/modules/core/mano/rwso/plugins/cli/cli_so_schema_listing.txt b/modules/core/mano/rwso/plugins/cli/cli_so_schema_listing.txt new file mode 100644 index 0000000..3031b19 --- /dev/null +++ b/modules/core/mano/rwso/plugins/cli/cli_so_schema_listing.txt @@ -0,0 +1,31 @@ +rw-base +rw-mgmtagt +rw-manifest +rw-vcs +rwlog-mgmt +rw-dts +rwmsg-data +rw-dtsperf +rwshell-mgmt +rw-debug +rw-dtsperfmgr +rw-memlog +mano-base +rw-sorch +rw-restportforward +mano-types +rw-yang-types +rw-log +rwvcs-types +rw-netconf +rwcal +rw-pb-ext +rw-notify-ext +rw-mgmt-schema +rw-cli-ext +ietf-inet-types +ietf-yang-types +vnfr +nsr +ietf-restconf-monitoring +ietf-netconf-notifications diff --git a/modules/ui/composer/CMakeLists.txt b/modules/ui/composer/CMakeLists.txt new file mode 100644 index 0000000..2331bd0 --- /dev/null +++ b/modules/ui/composer/CMakeLists.txt @@ -0,0 +1,62 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Kiran Kashalkar +# Creation Date: 08/18/2015 +# + +## +# DEPENDENCY ALERT +# The submodule dependencies must be specified in the +# .gitmodules.dep file at the top level (supermodule) directory +# If this submodule depends other submodules remember to update +# the .gitmodules.dep +## + +cmake_minimum_required(VERSION 2.8) + +## +# Set the path to the top level cmake modules directory +## +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/modules") + +## +# DO NOT add any code before this and DO NOT +# include this file anywhere else +## +include(rift_submodule) + +## +# Submodule specific includes will go here, +# These are specified here, since these variables are accessed +# from multiple sub directories. If the variable is subdirectory +# specific it must be declared in the subdirectory. +## + +rift_externalproject_add( + webapp + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/webapp + CONFIGURE_COMMAND echo + BUILD_COMMAND + ${CMAKE_CURRENT_BINARY_DIR}/webapp/webapp-build/scripts/build.sh + INSTALL_COMMAND + ${CMAKE_CURRENT_SOURCE_DIR}/scripts/install.sh + ${CMAKE_CURRENT_BINARY_DIR}/webapp/webapp-build + ${CMAKE_INSTALL_PREFIX}/usr/share/composer + ${RIFT_SUBMODULE_INSTALL_PREFIX}/webapp/${CMAKE_INSTALL_PREFIX}/usr/share/composer + + BCACHE_COMMAND echo +) + +## +# Include the subdirs +## +#set(subdirs +# api +# ) +#rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +## +# This macro adds targets for documentaion, unittests, code coverage and packaging +## +rift_add_submodule_targets(SUBMODULE_PACKAGE_NAME "composer") diff --git a/modules/ui/composer/foss.txt b/modules/ui/composer/foss.txt new file mode 100644 index 0000000..e69de29 diff --git a/modules/ui/composer/manifest/LICENSE b/modules/ui/composer/manifest/LICENSE new file mode 100644 index 0000000..e69de29 diff --git a/modules/ui/composer/scripts/.install.sh.swp b/modules/ui/composer/scripts/.install.sh.swp new file mode 100644 index 0000000000000000000000000000000000000000..96fed75253b19bee5e95b539c8e6e2c806a049dd GIT binary patch literal 12288 zcmeI&Jx;?g7zW_4%(Ni5KnhtHQuj}Es+f_0jY>?&and$QnnZR|#x6+QgG(?WcDMi> zfD^z0_)_%;P}PA6&}-?H72Ap9XN&T=_1^KR)v2|K+cwcb=dAZ|b4|}jL}4h+P)1=f zwXW%_B=t=uRp<c!mtUe(&&IbXdD zqY!`q1Rwwb2tWV=5P-nH6v)yNP55LRv(uGled$|2yF>>82tWV=5P$##AOHafKmY;| zfWRLVkRH)}g{Zv6FVFw~=imSDeEr6G<-Bm7IFFn=&Mjxcx!|-p6;5&gCI<}y5P$## zAOHafKmY;|fB*y_@cRW+YB?vmZ`mfvwel~$U{QT;*&f%p7bCQ46_>JSmhCF3hSS?V yFVguB9<|tQ6!M;ylYV*na&WPZmYa9&7R|@Ic9WK;>Dmph{jCZ&h=t45{qzB5Nt8?g literal 0 HcmV?d00001 diff --git a/modules/ui/composer/scripts/install.sh b/modules/ui/composer/scripts/install.sh new file mode 100755 index 0000000..a52a3fb --- /dev/null +++ b/modules/ui/composer/scripts/install.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +source_dir=$1 +dest_dir=$2 +bcache_dir=$3 + +mkdir -p $dest_dir +mkdir -p $bcache_dir +cp -Lrf $source_dir/dist $dest_dir +cp -Lrf $source_dir/scripts $dest_dir +cp -Lrf $source_dir/dist $bcache_dir +cp -Lrf $source_dir/scripts $bcache_dir \ No newline at end of file diff --git a/modules/ui/composer/webapp/.editorconfig b/modules/ui/composer/webapp/.editorconfig new file mode 100644 index 0000000..c308ed0 --- /dev/null +++ b/modules/ui/composer/webapp/.editorconfig @@ -0,0 +1,13 @@ +# http://editorconfig.org +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.md] +trim_trailing_whitespace = false diff --git a/modules/ui/composer/webapp/.eslintignore b/modules/ui/composer/webapp/.eslintignore new file mode 100644 index 0000000..f89c3b3 --- /dev/null +++ b/modules/ui/composer/webapp/.eslintignore @@ -0,0 +1 @@ +react/ \ No newline at end of file diff --git a/modules/ui/composer/webapp/.eslintrc b/modules/ui/composer/webapp/.eslintrc new file mode 100644 index 0000000..7c4493d --- /dev/null +++ b/modules/ui/composer/webapp/.eslintrc @@ -0,0 +1,34 @@ +{ + "plugins": [ + "react" + ], + "ecmaFeatures": { + "jsx": true, + "modules": true + }, + "env": { + "browser": true, + "amd": true, + "es6": true + }, + "globals": { + "module": 1 + }, + "rules": { + "quotes": [ + 1, + "single" + ], + "no-undef": 2, + "global-strict": 0, + "no-extra-semi": 1, + "no-underscore-dangle": 0, + "no-console": 0, + "no-alert": 0, + "no-debugger": 0, + "indent": [ + 2, + "tab" + ] + } +} diff --git a/modules/ui/composer/webapp/.gitattributes b/modules/ui/composer/webapp/.gitattributes new file mode 100644 index 0000000..176a458 --- /dev/null +++ b/modules/ui/composer/webapp/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/modules/ui/composer/webapp/.yo-rc.json b/modules/ui/composer/webapp/.yo-rc.json new file mode 100644 index 0000000..fc40714 --- /dev/null +++ b/modules/ui/composer/webapp/.yo-rc.json @@ -0,0 +1,8 @@ +{ + "generator-react-webpack": { + "app-name": "composer", + "architecture": "flux", + "styles-language": "scss", + "component-suffix": "js" + } +} \ No newline at end of file diff --git a/modules/ui/composer/webapp/Gruntfile.js b/modules/ui/composer/webapp/Gruntfile.js new file mode 100644 index 0000000..f253ae3 --- /dev/null +++ b/modules/ui/composer/webapp/Gruntfile.js @@ -0,0 +1,172 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +var mountFolder = function (connect, dir) { + return connect.static(require('path').resolve(dir)); +}; + +var webpackDistConfig = require('./webpack.dist.config.js'), + webpackDevConfig = require('./webpack.config.js'); + +module.exports = function (grunt) { + // Let *load-grunt-tasks* require everything + require('load-grunt-tasks')(grunt); + + // Read configuration from package.json + var pkgConfig = grunt.file.readJSON('package.json'); + + grunt.initConfig({ + pkg: pkgConfig, + + version: { + project: { + src: ['package.json'] + }, + src: { + options: { + prefix: 'semver ' + }, + src: ['src/**/*.js'] + } + }, + + webpack: { + options: webpackDistConfig, + dist: { + cache: false + } + }, + + 'webpack-dev-server': { + options: { + hot: true, + port: 9000, + webpack: webpackDevConfig, + publicPath: '/assets/', + contentBase: './<%= pkg.src %>/' + }, + + start: { + keepAlive: true + } + }, + + connect: { + options: { + port: 9000 + }, + + dist: { + options: { + keepalive: true, + middleware: function (connect) { + return [ + mountFolder(connect, pkgConfig.dist) + ]; + } + } + } + }, + + open: { + options: { + delay: 500 + }, + dev: { + path: 'http://localhost:<%= connect.options.port %>/webpack-dev-server/' + }, + dist: { + path: 'http://localhost:<%= connect.options.port %>/' + } + }, + + karma: { + unit: { + configFile: 'karma.conf.js' + } + }, + + copy: { + dist: { + files: [ + // includes files within path + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/*'], + dest: '<%= pkg.dist %>/', + filter: 'isFile' + }, + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/images/*'], + dest: '<%= pkg.dist %>/images/' + }, + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/images/logos/*'], + dest: '<%= pkg.dist %>/images/logos/' + }, + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/assets/*'], + dest: '<%= pkg.dist %>/assets/' + } + ] + } + }, + + clean: { + dist: { + files: [{ + dot: true, + src: [ + '<%= pkg.dist %>' + ] + }] + } + } + }); + + grunt.registerTask('serve', function (target) { + if (target === 'dist') { + return grunt.task.run(['build', 'open:dist', 'connect:dist']); + } + + grunt.task.run([ + 'open:dev', + 'webpack-dev-server' + ]); + }); + + grunt.registerTask('patch', ['version:project:patch', 'version:src', 'build:dist']); + + grunt.registerTask('test', ['karma']); + + grunt.registerTask('build', ['clean', 'copy', 'webpack']); + + grunt.registerTask('default', []); +}; diff --git a/modules/ui/composer/webapp/README.md b/modules/ui/composer/webapp/README.md new file mode 100644 index 0000000..94bbe81 --- /dev/null +++ b/modules/ui/composer/webapp/README.md @@ -0,0 +1,46 @@ +RIFT.io UI +=== +Currently this repo only contains one module. + +# Development Setup + +## Requirements + +``` +npm install -g babel +npm install -g grunt +``` + +## Helpful + +``` +npm install -g yo +npm install -g generator-react-webpack # https://github.com/newtriks/generator-react-webpack +``` + +# Build Steps + +``` +npm install +grunt build:dist # production build +grunt build # dev build +grunt test # run tests +``` + +# Development Steps + +``` +grunt serve # start webpack dev server source-maps +grunt serve:dist # start dev server with dist runtime +``` + +## Known Issues +`grunt serve:dist` fails for unknown reason. workaround use python -m SimpleHTTPServer 8099 + +# Useful Libs + +• [http://numeraljs.com/](http://numeraljs.com) + +• [http://momentjs.com/docs/](http://momentjs.com/docs/) + +# How the code works see ./src/README.md diff --git a/modules/ui/composer/webapp/codeStyleSettings.xml b/modules/ui/composer/webapp/codeStyleSettings.xml new file mode 100644 index 0000000..9d12ff8 --- /dev/null +++ b/modules/ui/composer/webapp/codeStyleSettings.xml @@ -0,0 +1,95 @@ + + + + + + + \ No newline at end of file diff --git a/modules/ui/composer/webapp/karma.conf.js b/modules/ui/composer/webapp/karma.conf.js new file mode 100644 index 0000000..db495be --- /dev/null +++ b/modules/ui/composer/webapp/karma.conf.js @@ -0,0 +1,62 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * + */ +'use strict'; + +var path = require('path'); + +module.exports = function (config) { + config.set({ + basePath: '', + frameworks: ['jasmine', 'es6-shim'], + files: [ + 'test/spec/**/*.js' + ], + preprocessors: { + 'test/spec/**/*.js': ['webpack'] + }, + webpack: require('./webpack.config.js'), + webpackMiddleware: { + noInfo: true, + stats: { + colors: true + } + }, + webpackServer: { + noInfo: true //please don't spam the console when running in karma! + }, + exclude: [], + port: 8080, + logLevel: config.LOG_INFO, + colors: true, + autoWatch: true, + browsers: ['Chrome'], + reporters: ['dots'], + captureTimeout: 60000, + singleRun: false, + plugins: [ + require('karma-webpack'), + require('karma-jasmine'), + require('karma-chrome-launcher'), + require('karma-phantomjs-launcher'), + require('karma-es6-shim') + ] + }); +}; diff --git a/modules/ui/composer/webapp/license-flat-icon.pdf b/modules/ui/composer/webapp/license-flat-icon.pdf new file mode 100755 index 0000000000000000000000000000000000000000..4969d07aadc015615d3536f07c1d6589e9b4cbea GIT binary patch literal 38247 zcmW(+cOca7A1`}`?7b4DoRz%^l|sr$oKXlF*_@M;y(1&Dl#y{ZS%)*TQaCe*jGS@C zan9ZOeZIfHp68yA`*=Rj=lwkI*ZcK)orIZz;cYn?1qO+}^}}^c*-`P&z5#|iBC;ah zPVNjE8X_Q*mtL;^ZX!y+&|?vh;VV!7mv4a9)6xH>!AocF7cWJ$v>5#S-@J5u%@BOK zDg0ro^u0HKNhF3sZnGQWhf&u=wu&QbyZ_L*>ufIBC)4yMUD_o^IE*cc-#cMEdTjNb zP0a+s@rv{K*rx>x$HPa2K^FlwGA-3$a_HRtg3&TjI70QJ4&sUWlfAGG!q0k>9hc8W z)+e$fNW$3{jTgdQbqhfeL_|b8QCf&Zx**!N6VI1rA}(l&`|yZ%#R~+2MEF+Nq{_l&%?pk% z7sK$-xrpowuZ!2GF%QE#$*q4rK~L9c9KBE1otH&T!vYs(&R@1KX%l|!(371#1Wjv% z7k+t9NFr9}4(h#Vp&Po%7Ir?(;tcU-i8)U`9}U2KB{y z<@~(MGDeuxHE^00Xg7FDxIE(5sqYQPE3l5RWAJX)%!Qu9=b@T!10bH&bMQIs`2gF4 z&b2kSmZiE&k(%DAUg6$&R_N$P1Qy@AAKsrK4LuLVyY39L*tX>|fC+0B%5LuMgtD65 zmR@x&!sR@N<@Si3({_5Ah!!|X_~dVMn}cV-^_4+N>(%hcB}jNPbSE)a*mj!X-{n`N z?)G^T9A-yagZi^SxR?#J4i5X}h!X6&?4evmga7LMH1YNA^E-@W zu*~=oJ(LhWDBS!~v(QfaWMRo*Q7cp%tI56ghVp&x72C^Pv=8ZS9H(Nm3U4p4fymA9 zV3uHy!7+PeiZIKKLKZ0Xugm9`Y<;v2xqp9peu8dZQAeeQ9kJ+Sp5Ms|j_N+(-kIrs}D>#`Z4JXwXrw7u>Ggp&vGL5X-WaqiJiI z^LbDq&g$5`zfwCu%|bknNp+4*0k`aIdt<3=yZF8GYJ!yXIxWw`j@EcBG(X z?cy|R9;k=MNn&lpX|t1!m1i0f%*k=mt<++)G|hAo)^a(&>R=DcEg;*|J^!Dj4%~5XPt^0xWr19U!dOL>glW&*%Aw3i$Z_Q@n zxe6Wp7P4!{gw!GiF@qj_6PKucK^{C=gMk8YkvKa4f6lwLaj90RR(}2PcKv;!33M*j zvmZI~dT#VXZY&g%tr-SI(X%v*XL)J!P&8|vAenF^%0Pv}o*btOG(9x?#?G#@`p#>u zqDAfb@F3(_FpWph(uuSuO%;8T)}J^Vov;{Or$+nQ?WLVLb`}wB?e^P({9dnIqKbbG zt{{{P&w_TNbLtl!JKWoK{+Xx#*5UUXNg|1)xADpbaIIx=#6CvA26yl&pV@pFy@VQ zQCTqf@#aCJUp%y*jXTK|1ThxXOQNVZW(tq5u(&UWX&^0ovU*K@t1ws9n~-&cj4r2u zE@Z70ma1mn@|~dTh0?Vxa-^p7L>7PM&qB|{cy?2!!H8Y&W13|Jiq6Pxrwqj{@vJDs zi|gxeKd4`Ny?+qc@H$yCFQPnLU?Z)Fr8tEVdNc4RKB@C*aA44V+CXR&cDK6O`Je>{ zIWbea_?G^b`)in%f-cx`pqP#J*20E%qDGXvz-ivU#px{Opv@H0-ZDfB7TyC2p+>Hj z8qKbsPS=iWhbRQt1hsN%sB&31=e-(9Iel%qXUMffujTse@;pl{wfdRRgY4(eVzFpd;fWpkor!JJ8D^T0cBjOXu8Z1u;M z_s8n-m~3??>{7f*w3y)EB0;aaFi8h}Vw6jf|yDTkOxX|sM1MH&Y+LvRkmQ#euC{KlRb+_^1S{G{RE6X!TAv%>tulQQxngWw220xor*kZ&y@h4&QU-d>^JzZ`n&H&iQMz zr^H$>?Vl{1Ey7{3SwTTBWIr3eC>-f?CEn}cj+^Ym;yI27$+e%>!WTuq4Q7=6(JY)x zts{ATX82-3Hv#NC^l9!_T0E^r8Sho84bH$prLsAbmgj4^nvwBwrnqT)F{SG%Qq3fi z=3L02a;4z%&YW*+5zGvowWF(T@*Mw6<1K01hxVZQ$UN%a@dSE-e{@1N9~3c~!~1sE z!}@*5o%1KlIxdhov={ls<);1&yl={1)19zcMOsL8xHp8skU}!q>e`x-3e&u+Py|ao zbzRcaNV7}B#y9N0**wp=T0V%$7@P=$Fvwq;3{nVkIQ^dK(^pHo29K5^p&TCu<~O2# z@mDp#&#yF}1)Fce!!%UE|6J6^RS3VYcm>;*S~;(wi)0f^8s~jzr_=)N3Dv;OK$(=Wft}sC(AL+qygdryY5a`=xTvk)f8ahh z1{^)vHn^~LN9nwcf*@2^JmRg6DUYg#U*pl7tO8sZd5=3%*VE1RRxlU#vJ0di`jf_d z-la{c^}cN-uU1K4^V5gu6RIrf!bOv6k#Vn)BfEIoliOP#4w{bAHM?yFQn+1Yd>i|F zI0xR;CR2}3A!z2>czY?(c_jT){*%hvY;w9CFCnirnFb zy&!B~(1gW@d_z5cU$Fh)kKQwtrt>TN&DuNHSnk5~=5GAZYH3|OIIQax4vS*(&1Mwr z?QSZPK%*JM57CNx&^JiBjr?TmbIItL)ae!Fmsdz!Rc*O-RLM=kzqvfA z%N6p|(nI|oDDU91CGkGUM-2$>BG^b&jnJ&c36%>d=LFZRcksdDOS=RhZo&w8M6|`)mkFpkJQ|!UX+UBY#M_72U>4~R+ z&My79=802rIXVv506mP+J5f(Gv^k)Yb@0SkiP*L_GSM(nzm6?VNcW{wV<#l%Nfp1g zCOJ~!s>x}~A&>SZXuOZ+sw7shxN)|YD}vOHiqJZwMUZiarU>HJ$z=%DMZ5S??HV)j z(CUlvt?`SIBAJWYTMy0$M?e?8XPaO#iKAul3!lFujE9c=zkrvFKpl(??gf5y#Cz~A zuXc_wNR;Ajd6!!S(7vUPnB@_?tZuE5*_(ujsRQ4=tqmb5sGSxw$Dg%66AcvMaHWkc z+&C+ua&145E=mrP9n7kAnZ?w8zq1Rvr9EC2Pq^4u(;_OC2Ye<6;0z2nc9b$A>>|`4 znDmb=7ZzD8k8}*>S_xCt`m}3(3lFgo)u3hA(*E3cy5zSmE-__Zc=h-9g8t^?2^ZdW ziUZ3QgO4McJdgwnBsii;n!;$-I{sz3dSG9VuPo(ADZ!#;R651&`#N`(uxrJlJPPM} zSAakAeP09JF>Soq3yP~}2ik?rRB61$(P85WT`>*t8TSQ+D0s?D!t1~`_-7)PlQstCb~$TSH9fjg@!XKGfsW< zdw~tWdB#2Id3zlXL!F}wJxKwKlWAGHYMN{piP88{M>m$e;Tcd`YX86@Qi0e;D0xip zNUyGTm1v)BY6MYjl|+KKnW(pm@>&QdZ@k)HY<${VVf$vpB{?}E6O1U&JQk4-LnK}j z7Sd9lfHg#i;QI)4Us8*oVNE|5JP?eCK1%4miiE45PCp9F^{tVp0d}}u^bA?kdx0KGEmH1UA?90nr>=UR9yUL?_sDZd| z;Sd(_?+qBsFU2o*sKO?922qeYO7IQ=O{ddBWD5(gIAXZbC55suFJM=& zWuS0i(FoHP;CZ3Xh6{vw>B&Jh3mAfV{Z*sKu!|+}5ymdTkSLri(fj@3GP=Yc`TI5# zLMCELd$PI-lEK;~v-IOeZHo%j4yS(qviQe?>yyPk3Urc4?v7`PgRX@wUb&OsGp`nX z-D@?YR;=eVQKRVBFtb~mgvMNy?F({)o7e|GRoe>XuP`T+05t}9SOCb@-YLnNq^bbo-cQ0C5)K{P~Z%PMDL7O zL%-Sgxc|18e5|a!b@%-56H!@~nUjwNH>t)2lj~G57hY8-{o$V~+%xNCkDf;$+%k>s zD4%wb^6or`kI*(ebfyT{UB1PbcN@%lG^gX~w`tv?mDj}iWxURds4Tm%s5-+U!><+~ zC0*tHoMShYNzeik>Bf2`Zq*?M#LE1!#sVQtr###GE{I^~Av^K8=fM)_^OYrJ#q;oO z^^vv@ehAxB+nJNb;--&qgx6eEk@)?sxKIYSya4Mjlg7G@gQ3j7-Tnq)D(wXr2fjjj z`Kk6%w;DEOJ4mxt{qehX?DfQA(q zG)5f1I?lygeIh9xi;wxyHwuq&?vIuT&WPlNp0pikm@RVkAG`><4ddhTe$w*GJO&(O zZ^bjAXe#MY$OFq(5RN44-&8$f+tS`}3V-7QSCGCYeRV^e&MM>|7hiY8&84ex)Rtom z?hkLfLUdC&AV;^BLRqxL?yYs-#Ztp@OwR1bp*$?&S>vnUcoEZU!E{Rb{ZAFT&?>R3 zx1W3=_xr4C2EZD0ht zq@o4_{!+k9Z;`k2~7Wbw#cRE{u|6-ePiP1lJ*`^)yl#FCnASP!^ZJbt)*)f`zlPsl z+DE|Q5&d)27RJ?GegoWn0+#&0MvF0}J$!N&?qWlq&qUb0DP1w}_CWp@F!}-d{VL%^ zS;^B|qlMsVk@V*IxLc-;qgaM9-LoIY?^ubULa+Pl#9S?51uyUYa{DKS;V#!s;Vd}} z8(5~=j^fMJxM_CUVfkDofpr5XeN3AXXT(O)124NNt9Q=6VbMaI8Ok%fLscP_w4!Zs z*FY;^qHC^W(wLKtsMavwO*Goo1QtVbZ)Uhe9=DsQi*PkUbwG4~1|riE&7JkIm<{Hj zBZ%H2)nU)BO^Mq3DnwVOEG3?O)#7w7u(DS@YCI79AnB9yjzMjw7UL-0?DWYi2SY4$ za{j8bVH||2e@tmx^QAY%sfR^j^?H8&rhNn=i>c9ZRNK_5lAYFLJ^6)5k9WYTOZLaU z^u!g^9a`?l|Ax5VF-`tRgx*^tQ6Z++y(Q?(A){E#AuTB0GJ*ocI-!tfhz-*eI~gry zgGA!_Q@*A@fU;{D-ZqG5>EY&Zb?!Tli(?~RPrDedq=Rr@yTyu4dK3a%-VS6(#U{T6 z65+o6^I+LvdL-PcxQS9Cuh0Vek^ke*Jyj;T|KWs7d}ZpSuz^bl&k2~PBT9Lw3Nhs;sR}lu^Q^vxfz(v5iq|7IUCD5b5Wpjj|RD(ty1I01}|S9 z-Jj*^;)mu_St8xS0j^YF)=0&hA9im?27dmoEsuGD+g+!~gPws~7dN~~hx-7hs<}6D zb`TqJ@G5p>9ZbfayJUlX+8^?mA1AMzs6FABD@1Dh6igzBf7IVFtrtPRcQoE)&Tnu! z9b6+6z_+!cnlXdU1-pZ$oIQ4LS!%LYBfh8QR|yB)W3xvtkU~gcnQN~2&A&{ri*+n_ zPm67LhfMrR#D_{bhzVD`2=Z1DXd#Ao*~NrKMtv>f`&l7cJd71Z%XS6{akz$T8L>jv znQk9V|D}zL{g?Vz%!Zu(P_k!myN&h1QS2J*t@&2r zLr$T&DjB#EH@A1QLj>x5ZogMzckchaSP$f_hpJ>BO&vA_cP+kCLc-A`xerOV5#&&- zq|W${vN0FaQxZ1X?)KtM07&6mxrPs{SP!{b@b}`W%L6RitGJJ{&Cp1qZ%y1e8&8)G zS;~Y$vsvZZoIS-E`mjFzQtkl?K9C+q+$D3sAx;?XA=+L48$s+UgPb4k@t&T&*?1pE z8#QADbK|-=obF|UrIz&5v0sHm5@w&&AI)mLUWN-T3=mdiGDGJYbcdGnf4s^MV7joQ4>6o5)sw=rZD>Kk?^Y zSl~OzbB@%N{YZ{0#+q zXgFxDZ8^YI%JxF_O*)#fsx0~SNQkXn#Fa{!dm)aRrM@E9GX|@+&oQ$R9w)vcI$@ejmSCtq>8@R#r-y38^v&c(bWlwLDNGt zM^njn6&M4BB=7+lNi_Kl_#`w+pZVu`g9gyC%fito9V)=U*@z`E?KfyDgb<6{R(E{f zo8fcL*|8a+WDX#$WiM3e%3txP0*e$~p-!e`1CIUF4=u@*+&w_x-MJ$E%LI$DIf>zm z4VtILaPXwN)RWaDyrqE1aDjn-3O_%TO}Ryd*bC<0xfrTvC#u2SeJ)MD9o>o2xu~Z6 z&3^mnkeV%$#$Nu>b1JY!q>|XWLf2a*Tk84pt2qaVMG@5ocJH)~haKA^xD$o??1b^5 z&O(U%fWq_DBoO*zc{{J4y*Cx2>uZ$|R8|iLUViM>U6=9s0|`jl4|FFEq?kmoIMl*V zKLO;<>>)K5Z?tlW!iTDhO5`_}{!hB_b8h_V$n0kV%~MEyvMLbO5$O- zC@hoST`cE2+C$TmELaM<86lv)#l`M{QrG*@$8oCTc! zlKV<1Sgdz;nB2O44M^@Rzo^a#n7xa zNRjKeq@l8L&F~zzC#Pe0*wvh|826X1+?SOXeGL0uv2^#32h^_cv6|q6 z8yBM_hCiTD*8wP%ge?EYmO<&Otd#CEFFM<#8)^WQbx0;8HUuW4BXHyfRnN{(?;RCs zSc&lQ&W{JXY6o&7Ff^*5opUCYLKOy^eeO;8c7zfEmB`55Ll>A6gH)6uf*|ggmCJ*p}U!% z17C?l)R1X==R>wWkwbOWwdpmbW>QN6{BK=7n_AnT)#JbS_=$)B>rtkfu{%aP(PQfM zh2tI1Q%V$z-0Jg|*MY*J+o9Ch)+^ytr4w!9DpmrrVU$vWQlDMvwP9Oh0MdQ z_<%FTBnD<7!ed<2SSFTCD~RSdd2vW#@QO zb(hy1-*YRn7Y5Sc$Pm%zK1ZN#qa0_?wT}U%X%DEsOu4I5;W-qMSy#8t*q6$vTY`yS zK?yzp&ksF8FbZ!+xd|z22K=6xe9Ad^w;rl13tv;2e@tS_92Vi>O`d&0g8Aqe$;$K9 z(RaO7Ip(xcq_~Y{T3XGwzchr!WDVZz6B{4JQs0p;XHC4Of`t2RTp20yn{Qh;pi#Ke z{^AT4h)$tE=gR}TWZb}9$7lxRMr)V^85MBBX+P_)N;bbhu?ud;Vk_FYXl z426N*PV6V!ubO+tfKi26nf2z=AQ%7?tTJL6X?O_5%)z?pAjdF_*B5&h_2akGdAdYvP6@FAVMnYp*@9m0q= z-Gu2(u+;hPO-z}wk-X5QgBqp#qKuaG#+C-I*D_2C z@ZRNr#=e`{k(E@GKGMz-Fw;Hi<$i@D6$`O~Q>y|Qc;?8zA&Ep_x!hWc|_$L`X6ZvwSbT5F2!#u;5W125#<6Vb|%FIY{PE$@K9 z=o#?#rVH__ebKSN-j6;DQ;P*3M7qZ{3|yM**D7rjaWUSQS5%_~WeSqOS9{GqQ&+bH zB0yGqpA2x0gsB++l;4GCyUlI3o~e`NFL0egb)hN1KCCVri|p5aHGNAs`uO%|REJu? zfH%di>bYia@OIS~n-)mz)p}yv!K_)ei(_gXJ;UX?Y*q=-T4 zZ{o#@YmE6;-j=EaA-69TCEQsnsMHcmGDnme4|M6!|8Ip+^29CS{xtc%-ar3>)V<8_ zzcO9I2Jwuif6MC^_i4BMtoYcD3IR~MUWG!FS7G2X&CRqMrTGEv|MZK+*zQ__(#A<_ zVOae>i#RW%Vlw;xg1pe)I{;wXpo}2lau`sMlLf zdv6Y~+4*Iwy{L?wV5$J+O#>GOkOM;;xlR2O9)~xkw7~vgNdSp_G#<-Ucf@|gQoPT$ zxF~Yh9~*)-OiU+rC*3NpPW|_f1I%g>?De#s+$KNIbPL4x5X8|cY=59*QdvDuSMi~1 zkt$&hv8yQDASGxIk*jfnVR* z_9Wf>hUiS*o*1%Sl2Ds2N~fk&Sb0#K1a$}ShO@U(yh;ljzD*A5NC5C5<5)J+%WHU? z^KHnzn+2gsZF7yb80A^+dO|4Aw(r3;q?K&J(dxYz#8gw64dUHH%vU_cSqZq;HP#Fg zNvsC<#&LwS!={~Qc2$g6iEXdkCj9!6KvFxkXPke=bk@bKwG9K4{rWnfy8N|cgIL2y z&Cve6ULpg_V+&&2?|Q#?cizYcU!H(GM;aVpG#XaK7~n&n@d3Y68B-n=noN~-g{yld zU=wb&k=qChw+)Ev4E_yk_Z{{Xixc6tj}TE(#`s&rYJ1KpIqn;q%mM2=3&s)hXHtI= zsp=^_UENAS9(QWh<9NE9NN!XoPC+qtv z>lt&32^r>@%lr!K6dnVjdml`p>{R}jG_#w^{A%d?Jrb!VE^Tx13mIzl1QA>&k%16F zI-$^C7L5Q}ve};72n9~bP6CXvN}IPfqqUf%WdTUVK`?06lp?)8KRhZyN!R2%^2xu zDJQia$O%7D$IQG&I*jB)W=vC*Hbf+gD0bfkoXi_t7mf}DPTEIW)bsI#R7KhAL$$>N z|4eETceR5WtG@iTa3BF~M+Rg*Ckps7rImVb?+sOsv!WHxYp^* z3q_CFqoJRL<-cBsbq`0mgh+*$kDjDZqKoc&}Y?ue8>iZuHC4 zH=&@~8|)#A)k0idVy@W(OH>PKyJ952#^{-BQ@>(N2i2_goGveEh()93Uh2;e zZ#v75v8f!!AS3WL^~BJ{d3e2_@*_0HtHaT$zkEglPw~LJ`IZKK51_Z*EAwpzs@KDL zpXIr78&N#KzAR3TaMFhsL|dcgu-6p2dMBPWx#j1(Lhe>KK20pmt9fVs%cR(%qa!)CnuP^ck*kPgSnSUlfP-GkIs$D%%1yw8*dQF#4 z#wx(U-C>X%)|9O$WZ0v3+3r?AA?&xM1X23DN(FX+=y)nj*q231!vg80ISSD83x*`t z7v}9^x>?QA^2E05pJn9g!t^c(siv~dS`1T9$<{6F1?Ur%yLiFIiIkp1g6%GZZRCZ++#(}yYcGLXGPz<^ss`|Zl{zG1h8!H z9py^4qq`YRuKyAgN8QI#$C`7@_k6OJ5Jtj<4fk^-cVes0)@5*Ub`6Pz;aZn$!?^8YUqWdB(&Q8 zQ6VRRRM28QYNcF&EX)3`KT@|=MWu&(Yji15eQ}lbOv;nu@ThQI1jC)i3fia31NLAx z0u}UbOcWn*^{{pL-@|>B$4c+@4p-8q-6KyDOy2-$5?}7+L=dMFag=pAmXU=-k~KDf z+Y>~;ijJsJ1RtGqjv5?>xM)3rRfEyLtM#bo_}!N|2+l{Nfq6E4GoV?7>`%Ey*b2&a zpKt!N(IrnPg5rE`vROc1dX~T`AzLnw{d5%&MJ@!nc~}OtM~v9?>lcden1Nbm%SoOo}2}E!^xy8fkMf?00Zq{?~5=i zCBoc>6M}UV2h!IWlNJ)vc_w*)qM_$oQ!)n58^n5?UKOX@r*pF&IvFC!&E9cDST8^` z!oeaqn&{wWA*6+Nb3L44_EO9In`BS0f$`^Q)LPVibsKlCOAv^B4gIrAmDX zPPsz{DD%c+uH9|D3j?*e)=zj^aFo6rg2~?IW2dioXwh1r<%M>_20gonqlne*$d$qkag$73c8!} za4_2JH|f@5GC?aI~cWcbpNZ>DMAe@DZayIvyYThAEUfMa~L#ic3uB(@5%C!ij4 zKlE*699ULm2$Vk*XjKrm5Y8_3vtQ?RJtT6!{z|_tRs1n#e!1Fz*-siMhk0cMzn;rw z0F49Q%!j+17Fos0V9sD)5`4`&Q_- z8Ol$j+YJ*q=OqTG!uFFk5}0kq*sfi~y}nm#P{ma&B05}u7n<1w=s_3H9EMat}q$~NCw*0;b2u@#UAug_h^)s{ftQ4%v5Kb zqSZ~#TwqAC%bRlnRY<_C%y4x*N`uC-F<|ZHHHJeufebWbjqW19*Rz#@kP6x-L9&}$ z_r)r?^E(20E;4x;8eL-#Qd4ls)xvX=8ZVw|H)`*#IE=vAk(-(VOb8+01vZ8FP^%bl z3rXGSx~g0ie~Tdd!DFX#GgJTKrH)Ve!)1lub8xHs3mjQ1z9Y1@Z92#0N{Nr{VnAl$ zObh%0(nq=G=s=by4PP4#bktcg|7%%yw9!_7a(`%;d2#J-?~rC*ZARvpWNMDVt$<5D zmw)ryOh-C|jDOo6i>77r6Oyaim)Th~y;_Getf6E}c9Xd_FD^oY-ZSj|w(WAJGYn>W zBT>oSS9!(f(A?r~CPFAx!-vwM(5=dEgED?pQRsSbyA@N&iHxBbH1Z!}6;`HbA?d(u z^(6O8m651W()VE-(tBrcq=5-8J-c|v7L9oT8bf%0P zTHSiP6d`vo75>2J>TSswiUVuvyHi}_tK4RXUYs1oh50u0e>7WI?1kc`b#D8FGFhq0x3dPyEXRnk-Aqu1t zSglf<8jQ@tny+HD_8OdS8jOc;cllELj(@+mq+HY5eoY%UGab>tBvGQ_0vvs2#Doe> zF6sOp&Nh~jeD+GhA3Jj`HtxYR94Nr4ebp&5ofbHY%9-2uidc&X-(TL1_#4-k0eApX zE0{~VBGD+#{e&ASs($5GbX7woUq^L)WR`SBjlL8OmC0N%n`fA>06O}*t()5LcYTsW zTV&4W;wLX+HSrd17nZX^vzE)|8cpbir2Miv48?3XBKsQHqygFo*E2LUISml7wD8@w z^)5Sfm(r7>uU&RJOG;*&i!F?!l#gGPG(``AYr}O5jB+Uf(PJ}3TJW&d&`zt}PFZ+5 z5zx!@lLMbC7Yz_GQ|z7oMZrOlc=f}nijd0K@W4#bq?k<`B1K@6_t0^d++J;opYs#& zIEFi?NU(t4_l~M#3&l}`c+JXm_R%)AdGgXXf%?|BPJW{0n%_WxPl7jFa3JJ(ofXov zrw3jt@5(>mJk)*d(-wkhzK~srk5boEyXl_!vsrGN)#ZS$bvkl&DJSc`QnnK*hbw*X z&UP0@XVC9QHPyh1&U5k9ys7UCk;TAgjzgoLA^sZdqzd7OEDGf3XP83L+x3aH>$u`y zk#q4q5b+ab0V3s`(NBBA`3G>6Ol($HnM7<4#!2xOWXp)W65K5j zCp^GnNH*DJCNfV%Y=x2YBNS09s&WJ04YnUOE34AB?W<5W6P~|5tnAQHM zzKlm%Z7osnQmciy%=4}6z_WGJ>Ja7ST7;rn`(ukmjI?Yu>IDZ zH@ZL$_i*@OxX*=z!x1a)@GTX9R0Wee_5kL7qe=rF25|8q3=nhw?h= zi)mi@$Ao}Beq~%{Ad|kr*vE2lHP#=tOm!F;5&vAQhuh3|fBA6SIR;MmUMKu9k#LS9 zS6416jR7qBq?^hYg{`*r#+ln+ba`8KWTbQw6s3?>20X>5(30zbXG6OEjohzdDg+Am z3vO^sxC4W$d@j+N66c|^_ti5B2sgL{=nLUycEZ!ZB*q!kzhTy>#;};G+U1{t|1Rsn z;K7}(tf*}SPpGEf@7rByNq+#YC7mx~laA=VtXYYLqNISDAk0#6+K|MJgnL{tjO@fK z(12TBOW!{&fhM4(DKv^lCmR5{+sNH*ygb|$Fmp2g5YWU>fz67d>sI_XWdd5Z*pZGo zH@a4*#SZh@m)e%z&7}5Gv32Tc)sCd+IN)IgjO_yP^e}irwcvi|sDHu1hwR*|!T(;C zQh|%{TrPgG>WoGXlV+~YayR|96uOS(EnuL|(aaT8%k|90V+5Tp)ZGzS^iso zXF66STp?!jWhoPw^^0E1BAwV60VI+?R(c#UQa=(}*`m0uDZVtoZUAMBq^@l!t2GcKu5FU1=E(qTnL75jNC2g= z)bu61e_0Rh`C!8Qdf%MS0Lxj(+NsWF?F`U=YdZo~&2j+!mwqMTmk11`lBaBPlKQJS zXr%~wi@s}>w7PLg-AL9SFw{F*Qqk~2H4j%MzC*{I@BuBeRo76oYI2Ys!mh}s1cC`% zOp+Q+@DxMBZ8uZKf5t7!L8wjD%O1)Dp|%_Nsl;`Su1lmgsWR|v=4zZ!0F(LQNR|w1 z=ZC+cFMuavG96@7+)$MxsvZ10x2Xp-n$qe8foCi|vXGwhXvgvR4`|Y}hc%KMfDfm1 zP~N5DC088n+Gi(LN!uKBNCPJeN7(9IK9f^MSVp28(O)}=uK)Lr* zxc0DI&k3K*bi?N~k! z`7fB4v^;Cca@2WfM!x$ypO<*tfi%l3!M|Fj8dARC+Bli=MihASr%SQ}>IofyS^2_C zWK%V)e4&N;ZuhGv`m7PlKS9~`w2={W4i_bh@xuVbVR}0JnGd&-%6zALa<$tMRC{Ge z>#jIpkMUbzPW~10q{N=mvw=fAY+wf5LgxQ`-F~kRjrrT>)tfw`Lr=C;zM_;M``(rc z{N(5|C7~306V@@6%KkH0n!TQ~zvGQ=uMp+mp9oKGOF1GTtG`<6|Kc8f{L4>H5NRYF z^Cg*v=UwCwV$Y|u^5$n|Kri@JRq6hY5;3#g?d(4-pUC*0xgy~fLfG6j5;YjG`YNh+NfV8P)%)vZ<#C2w^Ys$e? zheMGb4Bq}Jjs|#u(|u697!2G}lrAOq6KAzh6#tnQoOXFX?rw_pL=q_OPRF$bz!n)$ z82?#$MciVg!7pc<@yTzs7DzKQ5}sF>GE5e?yffljokvb#sLKpxh!GFsuX+D?Lha7Z z%(G|{5$$JF>lg2Dd|Zd~>^mOe#eU<2AR~1EoqxWzW-BHj3kS&;Us?=V{_Z^N0G zLpo@hvp!7S&Tg*JgjFg-_aNld%X7rw|QR~!a%`@ z)|KF?N_M~>lIFsDcNy>)+;Xq$;GymL1J}`-u>J$65+A?r$GtSDN=Bo`%2=FIycB`+ zdZJ+~Y=GOR#mhNIe((CMJ=aZ>?|C{JTsI-&K@F<+#5t4_!Ji71VpZ3@0&vHw_t?Gx zE>9{2&qt46gwO+K+3MCc6;~_3o>Bbe$pt3bRl463*I&2R~AyBOLigf5xBgL3o z2rnVaCwEian0nxOv7ArSRn;ppker?0N^TZb0c=ls!sy(+&LPLUoK`;`QSCT(J|l6l zJuH~5QL*~cXHe&#d24_g@V~+cg`7d~B(FvrEf~&L_sm#+91puTMYCE2p#GV80JOhP z2V9^{N{U1S1(8U&dKRh2z7L#)NwJ;ye*cU|)k<-d3M-&(JW2UQvGpoAxtg4=A zewCmA*6)ejo9BS{Zrvzahws?@<&BMz+jqHDJQeDd9_V5*>L(KaZ?^y6U?Og)020%; zcopl}k<()F1hqxc9{7HFOy$skZVDQ;TW4pWx&v5aJWTZ;#?TJC9$U6#YgtIXIRV)3 z&DS^5%y$5O#+&=pF?Db06z!cVa+Cxr4?c84|HUzURL@(N5~~1+FrT8yqXcRf0`-qn z=N~EqV;HL!mxQfu)#4bZj(qzmkZ{_S1?N^_BzI2ic}PD+5BK8RqYkG?G~-8cU(;(j z*Ubyswgx#FnL27t1t%-a%hf^lvR6Obz7d11{Rd?$f!(1X&;wVu11Y9#$pdz)WtmS& zdd4tts-wSvh2cga8l`|)in(cUoDDqFGoy+{sVCR)0qwtUpE<@Fz+`?3$&%S@uLD)w z#u!T4C;;h>94-9{V zK|2#VB);t$fw4)T;sSm?lY!^yyx`1dJ%S+Zi8ucOkZ+$|5|>b6VS`-uP+=8K{Q|(3 z4x7acwRtHX;P**0KZUDe(TscM&0+Nl|4^zddchmNeFdBR6|^@T0bp@xQp*bb z_9hi9jSegV(&t1st&xNf5}wiQnoWa*FRaJZU9e#s@wig+_Lpi-ngD|ycANUmDF+9V zdrN!Vpd!FxD|Jl&K#LhLfs#dx*d}kr0K9Fg7BdXq>M~EWslE&0CaZ#C(5Hk?y?b=? zr*m*rI@^~f2518jh+l?_{3*bW={w}=hz!F{^isyyr}j6Nfv^4Ip1mUw=!})LMReDE zx&pd_VFIrNIRVe!hhadF z5(EV)g@c4N2RM{;hjgb%cY}b^CEZB3K{oX+8q8Y&Hg^iyOn^SnM*;<_fW z9w2OQLC_ltULP2m-)bo6_&BTf#t4S=ux4GWny=4m9^Nf)a_}IR4+IQKlfA@D>2QMQ zE7jE`IM@x@z;@gH5?yqC??)3Q&QZQXbRINXR-;LKBErZB^*9xiAy$Us((qYdLNXO0 zOGSIAop;lf!cmD2QT9ZB;w^y3kEYWJA1u9CRH_hFs_PGZK;3F9E(JelD5PjECAHk_ zB$RLT@YUjY)gY>~6-jGr8OBk8{dQfx?%V!^se1F-K78QV$=wR`c1if=wUIMmClAgl(occuyD>1NJ{S}iQSoopL zHD*iQS!<5LO&2Q_=}D?-sL)S%_JzZt#C#%5;4QuA{6+2e+5%A!Z>41c0V}Srv~$(e+Dw14q2O}o(U&Z)Z+=Lziyrd!Z)GsX zLaacO$#RGuyRrl+3%0;Wo>|xEXAhCMH%bz_@H+L3@guptfuqlwgf2Rqfg|&1vyVKF zf=2I7WXr?lHePz2$}S-qxU$miSUlA)@53gAVjJWAfz2kvx;$vv!f&-cKBXUE0JfoK zbG5jFxB9cNniY~1%YcZFLeGkD5@j9YM7!<_`pW(OtQQSprdXEYd#Iay4|FV$ooYP6 zat(>umHQSTi9_^AGi5DSmLE>-M=hJE`4AT8sZHLPSqOjN+Vk>`$rtQ^QFToU zb~F}~;q%lQk_wt<%MbPb1T-E|<2i=rB40oiw&P-Wq1lZ6bp8NZHm=>>l5c;#k=M ziHZC#c8_^uzu&80FEh=pFvqZRt$G1g3-jL{dit|8Ynq)l>sgDiZ6mfR&}~@1q&8^z z07QykpTK>|93lIm>JUUNdgQkSoJjcvqbnpF3J$MJ{b1{3PRLxfp<4HiIUrsWIz8^$ z#tr75Bk?QpxKG}worn14#IrV2JV&k8=F!(u70eiT^9@ko4>NqUbfbyeGA{5;p(VHw;=Im9Lno6_02e!fCiz1r_#yr=!e9;~5XBUGK> zTi&7xVCXzWVdx6@=5uUivmno!%#p87F!JPa3UDAs`y?hRu0QT*B8^*>QBcOJt{^rf zA{>!#->fCd(;?!`BY70L?No}~uTh?{&ds}8sVPP+{$HbkjS8o590t{MBVn($9Yuz< z?T&!~)G_8!qZ9D$&@+DF(Sj{jKJd*o0Qh#N7uZ^BFO67X``~3}OD?)MLSLa5?=KOr zmj+{esa%M~E0hix3bceq%nyC2cSf-v=G?{Yrcoz0!sY<0&_W}k$wPa3KT-IKTUTBc z+xY;|!p#WakWb?YZ;tl*=nAmip*Up6wLS|=`GvvM9?7i7tB%Czes#WT1El$7&KP;a zd1c%#0HV5x?aM?bO`JPt|6!cyoQJY=u>(clLhmx|5AY3|EeM(-2zq5;gD@~cA46}L zuB8%EaI(?|(aUHXUi@#PZ(#?5{7x$*r*CU%Z=dNcefz$zHg2Etr1${?5kS2&; zz|zdpM&3$WS04n;Y$RaI0Ajmno|_v)FXU(^B5wy|HM)2vV#|oa!0;Ql2vClJ@t=ai z0BOI=FoGDM)Bxm(Fo4*v3j9mcMFUqv{eJm>vCzx&f$VJTFVZJnzQYBQ|6X1a1f~Ns zg08iHow=8JniwuQDg|N!(epu*HCaI4iC$3urLnHQoCyE_oWm*Ut%HL6$b&cCHLNu~ z_a1}DcJW^!gZel`vEgO?#oT4!e7(tE3L*$&VNxpTDSUwydGlCT2n*HAQ)dWy8$+hK zURF?Oemuw&c_WW`XCnTzMRjsyv3zL6aKmNjGmOlMC;V4C6MGghw`cG=O;^Hzzn@jfB>D=|IMrrC~$ZZGD-NGNU3XtB0K@l2Y z;dCX(nTLy#FKNPKJcn=JjizMuWng@ehMb=Jc($>?Z2#i|VD7Oo1kmR5$ znTdF^>5L%wG>urYDZ>nBMEx2Cv3QvI1B&n84-+L|z*uowor~mYX8w#qSa|FH+i);Q zQH|`?Ht2yz>_*8jjvox?0^t3{bUt7X#Tw5qwHo}|Dv>|ZVW5s`kev2&l<3|qy%Pe) zaWAlZD+EiXY9414zLxiJZB2D>!RV-NWRlQT{*sUWl;`o5W<3S>Z&7?0h|Ka0@XM&xbchFNe)sdzIrMddnBQnTJ-c6}TFV z7R>qZ<#&gIs@zu^(5A_^9IL4&ur@sJ*4u9CI1|^RxIJ_uoD3yuu6q)OtOjrDd6)Pt zh}4W!0Rtat5$V($^D9KwBU9S$u`h;GqlOM;QOGb~wb(;)sYom_Us4=7Qp&he4+Sxk z$C~6^RZBO+?Y;aY1pG)VHrhv4_?ndy)L{X4qNsB)e;vbP>7q8@X;&l>uFNc!Q@jCuGUNg*1vSif}9uejDq_cQ`7WR?7C&uUh z{@$stqExc<=@0!M;71~??~cDm{%qPMTB_Ma)+f_fpQo6AIFCPH(k^%VEBM7BlN{dr zpva)ApwJ!2LS^@C_b5w&@$>U)%IG613o7U6$I%ASu+gE}5{i}sSOZDf=GoHyU=wl^ zZIcm`;{m#y;O>vz4$0j~T9EU(lQ+wSpZ#A|{4&{VSms)e#E-|X#mB=}#E*eAGvmcu z#J9!I$1g%!N{!S|%iz`K)fUU1gQMftI~5GbN&?x_^77X{()Mz{8_(g&)6FbVG0c!E z>AJ5OL8s2jSFTYm@4zqpj9!Iat3Y-mjaxP)FXdgGNS(S{`@MU}V90OrclbVb)z&@AGU*H4PRi+!9-QthJbLhH&vy@Ruf(@%?Q(`!9l@z5rm zPLD1~%h4*WUvV^gR&C{1r{qxZ;>$+!#_?gUk<*G}>buYGhKmHYM1720#P3FZ?iW*_ z9IjkY9&w}?LU{)- z46lU9jmn6855XS3p&qur%9Dt`Pm5He{{bBTiNH%APGsggn%KW=ap`Cvx#X)(6^f3o zjt=m&Izoa1ja8zdt$nQ`fx!=+J#Y|i6KN6}5RnvmEfgd4EJlS!wPa2>NG%BTf$ISg z0rqRsmo`}kedEvG%&yL$%&yZhX}m32{b5r!pViTwAEs5WYE4B*K=f!5S-n}`jJ-<_OTY7zX$KTiYcg>?siALxWPk-ew;KAPidF!#9Y{&i=VUTc+|GjcI?V*!0cdeSo`wK_oSYA z-4s2~XE0x5v{K4Jv;On+UhPV#ImldeWV1uKBUqnc(js7@Up7^?u=a&f$;Lu6eJ69Wwzl(qv;1E0d}&%17g!``&9c@AuyyJ50Sq zau^%``FTerPDl0mNKgKhX5M_~cKUXCmGWw>lD4|_i@V05dZDF^nZ-5Y1tuZ`AD7=P zd*Fx0FVrl*E?-Of(K0$zH)-zn9X9w@6CyVu*28MI)&m*SJU;U#?Xl_HhVGU0<-6B3B?_0ix+7EE_53y#lMzMSij%Y5LwQjLIo!S$4eLv@?3{ytsAv_x~ z`_@d$A<;(i9nKM_0GI5aPfkkjY2PE24=ad6${Z}!;=^v8MgRN0REbLm`PQhs6nM2AOA!Sk(3o2=2A z;}gZ09K(EeO%9hNmzp)-)8IkEs^qs?tFFdB%Qnnwep;2BzMahrsYP&E{H1wJKe7MW zWj^FQiQrb??O*G3wXe^gk8%BUnKTLQG6ltpDY@WQ+fx{t# zteV}`)S(yMC4PpXz7+!W=g~(6(hUgRS>4parNYZ1c_Q6GM?vGSigueeJAz`q&-&C+ zo_#uq?#)SYbvUju)M;znH0@o9;}M_8tY!Z-xNp9ww>0!~mVc%7HYm=JC+!>z`9#sJ~$Qw+8^U3SwnvyqMJ61M_hP5dAY_JzHSPf7#yk zY4Y`SroX4*0@`-kW|oE^dTDz*Gh+*&{onKZ3+Kf4(kuCQq78j`MU=3yjV*xhG9Rjh zHuUBnrv*w1>vhzBx+VHAK@7h=4*K5}e>)98R#j-5jF--YfF-mUS_t5{7~7fYgD8aM zgoHp6Vgf=^@ zCIIz6FYq$`A@u)(S4vvp689xKroY8}0l0zgqVX#+|3T?>Ui_h>muhr_jo0n+s-@qi z_Agf%uD$dB)wjDan!nzI>9-F4i~Ry{g9R6cdR0|WA2744E`nY~RSm?%N(X^}*uW4v zW)_fzy_wnXI04iE|CPm+=_v@wNy>xxqy#|%(o%wAP}`FSOz*-Zn0_mo!}J_WvsyejCCS zA0WTg`(JDqfE!X`x~@6Ddqc=&^yN1Zv_LfD!d@VkUC%}T3AtqYWj`se$S*G-Cnf_G z%s>4lp9s)fsa~ha0MDq^~H|*)+`IY5d@c#enu?uotE3U+K zgH_kn_fOTis&YfZH>{5N4`ut;^8e`lZ}h^n?4Y%PnJ)8% zC1|41LnqR|TN0E~;8RkimNGWivA31iwveFZw=~lOCU%yJ7RDF3`k8-s82=J=0k~rB zRj&WbIm!Q&>|ZnNn@J=1j`xNu%A@i(O;ucPct_+~(|90jw;b2Rz>s3uM1hIgV)ail z9(M_k2!hS%8)m;0g}Fq|iD1P)My!u#*-E8eXkBoYCjPbk+B9BEgGFiDS_}29$45a; zL%-Fk$EM*)=y!=Vt(D7^k;7V<9&yv_$~V(4($h@wKly%u17ciO9q}c6lX|MM zpCRW)&%J-APf$G3?eP&-p}`_?h+*vdy8#DjwHnwrZfn{7yXD5N_j>^c@@?8Hk>)iV zoA!Z^NtOlydL=iV7#1iWpuAxOvt9Wo7hJ#M*MF3vfS4du8qjBqp#Kfw0OjSKZ0+>T z#ViagFMe`CPT$ZNSXyxcQSbq4DEh!uSK3BT-v+7@6jv$%0OjqitjzSyq01;>U_J+; zR|c_M8it@UurvV%qtHVsfohJ8v6Y>r4T$CM9_yk4^mWt$7fKzd&j4a%WdpGSGkg{n z)_<;081xww&IU|%px28J0M`UDFheI*Z0sOr!1OhN^&jB&q7~pN6a@W1Ycl~gp;zcf z6DV*Mwf7wV@wqu|He-$9cSXy_>?h#()ckNNJlx zS494cu>e19VF+~rFN*R5YfdP@niDM};GMApw6Q`UfYT`kxR1uVd=`d)dj?o6AJjB~ z#Vp``FYOfY-xz=alwLsFN>tz2&^SG4pW1>Un&I%6x7OtiGJChkWI(0NxKDWYnZyq| z(;!Jl%D~T^ubT0hH7y5NXbU8LDtBsa>l>u?$=&)(i$B$B8-9u_a;FIzNdQmP4aXdmrxA_!7f;8T5`5fEb9;DN z@@*ZvY$VGmD@c-(x32X4ew@;S?ucFp(G-*^i@YblFhH(Y6XY}d?v4};G8hPEa ze`@MEZsCAS+GhB<*T{}^`?;r7!S|oL%P%KYx5&0PLw|8uDk+}L^-hm#C1>tMB7Uqc zDnId2qgGwf%BC4+kuN~qR2^$H6^ZebkVo zs#h+;)~T)0r`sds`|eCP!p0mc__ogqVS@O>N^+$TbmZ~IRv|Nus^`u9>?*S@u^#f& zAir0|?W~+Qb6Pf%A})%PoB{V;iUe-&Cc(Mn4HIMj!mq52cs`@X$4n#^5N`p&vrFge z<;H{udH#|iB%Ptnp>?aMTP$%2^fwr&t94D?V80hqYjuc5&;Bx$VN>WON+iQcbLRK<)|f;oIFudZ|hPvh=AuPx}Z;Dy~(DcRfBKWOXDF=zEoPzEmx@2 z+w#I|Ov8~vd&p`<*xDVE+PAV>m_r(8^xYt^-?T>jP&L5Aas%?mJST5Ixi*K=M zU_Jejs$1vT1aF1J=>CQniAOdwgFRXHD0eY5N<+GQ?pqzG4hr#4W0$&}TIGE#ieiK#8yi^|hQnMa;Y_pS1Evn{3^f;>XF^J9v!*df4f9E=s#W=BCUwP3i1!FR*X6Os z%EGKV)20!4pKrmMgFDZW&CS`0tOz%eqcT#V10>HkD6RHyx%0?$?WyhYtVjW z96m*m5c77|T zTZ^XrbFaS-&Ufb_58XzY=|5P*_66m|bN3pZY8HIhwQt6x<=Q2xN;zNaIC{Ih_H5Bm zb>?FbC^m;p$ngYS>uVI; z?BpQ9r(~09ND~$L%Y5{HxX$v7?oXsNkSH{6Vy^i|BJoRved)aR7%C4aBg2jo2eu>YNbrJWTM9sV8-Xnaw znl02bu#d$~O>%2jE$Q4u(o8QsvXl?D#Ng~iPV?(Tdi}0qeCE#u(pNsu;*cr71(3D8 zn#Gm7wh}Y2)h%!|?{3m@FPK^z&h8 zB;*aku~dvRRt3yJWUp|6jk@ZwQXbph>SL53KUB1ioXnEkwNturN!lJm-((5g|;}M0}J;(<3r%hr;?d2EYUt7pf=GU@<6@_iQ&F?Y(4szQN%6! z`lr;q{Q*MTkJ3!y_8zx6Oq^#`@hI9k*{nSK`ZL?gUcq&d3R~IU3spyf=Bt!^%g(*+ z9Iq#Lg_Z@^LpM>s(FOR4Qc7fgIy{n2_p^U%j;c%~Ic?};^+L(xJ1>L>1flDdryQ%2 z{fwGHNoD0;!P5)6lnyL?+M33_DEi3s`-59!dC827jzuI!Ze^ zpbie+c;!}jZeZQXkxfCLpl;IM*|t{Gch-5#@j&*d)TwI^r<|Tt?L6z`*w(VO?)-qo zWaR|=AW#OYp91!bEGGNSlArU>eXThUM6I@sJ~0$RS8wq&tz0~Ia;1#MQKr7;A;fmu zCg(%!Lk#H#wPXxu(hRY{#5AwxJ>S+r(LX1^RE+{y@sT+U5@?|*_%k%|mO^CNb*3e8F&%FU0&5Q6KByex?)jQHPK;oa&aLrR0|n z+?*0Fn+11oM~ME|e0a+fALG`Gme_~sO>DtS6Xs6Okq`ojw?_6OA&NMup%O)(lGucg z-Y(*3t!9X+*|a{%s?oP5o_{+a890b29TJdu{|O|KTfk=Mdz&o2L#$K!2K*#4cfGcu zmTMM8y(_83+YmYj?m957xwcH|(#q`OEjx73b2=DJp45g0smh<93tPUEj}aUN?`;Kgx8H%ny5tRIihIi}Pc?$et3YT;`3l7+ZdEDydeD5IQDZ4@2q>42DlY(Yf7a(b)!Ze~Xi?D6Gb~P(|M!1>jo_5*+1LK+HIX09ON4cvP+bG zroo^3F(p?~-FD|GHEsW>V#VPIO4NNgAPx0se}kV5>Xa0U3cb9hEmNbYos--r9hYcLYW@Kban;@m~Qmk*X|2&lJv<2K@O?) zN)-?qPv%1N+|bh}7WCMCPk%g%@e%Wsv_eMo+4)NEL$8QFaC_i&c7Y7*!c61(z3xEP z0n9_fu{>94lyHM*PI5`5>QCu350H0Nvr_N1B>IWCT9=+LzBj;&Z7K(Su>KWVcRwLu zVz}Z9`S;E(2h7>F*^PUsA0M-%RH{-lHoy85z+6oJ$wR1EoP|Kks$_(5L5FOX3pKET zYsc+gC#|hFoc#pqYMwBo zAu?4<6{{+4k+agssh)=mjt)zGq+#kYbS`-?*E#%4W3$^G>s#(w9V}jEkjZ}thCtm+ zsO!nZ!oYeJ4Y}C=c;%1&$7sm^Sxk%NBIfuoF}LJYs)6rp59*-uQ1! z>jH2^)s>fg8D#wbeN5|W4dGf$>w@c7{Q8el{BJR>|0bOOt1+$L5r6vs8q@kaq{(s> zvHdru1&tpu0&yc|Aie^POhNB2KEOTn3XN%9!JyBf@g?XLS_ioP5r?{pi$UX1S9Sil zUbe}4i3eKlst&ZRtM8g1Ad;_sf%+ocbx9fkhKAFiltNp$!UCn{Iu?M!3#y>yuEQ_B z129c!@CXR#0YN9vR^?#0`{WtmlhZq_y z5b2_21pZ@ZWCx;X4F5ff2B_n;C>pRdcs+{7073a3MT5rO{*9smH~&V_z`%=Mi=qK< z3tf-+w|}?*TVj>pcjLIi3O(edeAE;*c^~vrr7@gGq0?*v`e9X@adrSSi4G#b#9py zhb~TCf&3`t+*Z;tT-lo4Lc*lwz2_6-8))2$ z*-8-HpHj$`a;wfe@Q*_2KSkrF%4PheY!?d$BddmU$3q1IagIa$m7C5z^QeQml?QVk> zV&~?Fv8;^Yq62H8m$5ePK6gm`7qEp~o?ZO&0@l`flu8X>W@cB&gievlWAmSVq%lHA z?;W$Mt6sEo=1|%h|9D6wd+q>B%A7|mg1Q+sMxy`OoRa`Uk)?%5&y}IR@ypZ*fi(f` z`WKX_Oe@uB3Dh6?`Rf*RdQ$t|m~Ftf5{UTh zEnNTG2l4DmCo`DtQ@KC9{SpOhpea)<)TT0QQrJ)X#gLNAx!=FaQOgiTlI*8yYBA0RI+t>Jhb;g_ASw$Y0F=}|8jEjV4ux9x_psl#5kF| zq1yE4jOKz>drM=QX(ESBYxEW$)zWiDQeNW1TL%iyKA?XG;ib95=dG#|;2kAMD_1`^ zga_wysbXpoW5`sMl?`^&+mpqhP;yLIj-9-@OYC&K=WA$L7Ps#kcR<%M;si7GCAm70 zOJ7^zDW<^CuxZ5sX_cL8BTSS>KxUzFd@B3uk?h2M0k^XBlfzdTy|2b>_DRauQ`JYG zyK|{OW37k?Y+FcWcbp2^zl)z-`FR|k)vaTv$8EpnE^@!x(sQ&-r0>94NQZAw!yJ;?LSpK`L#V%jc_WY+LA*IM}SUcSCK$eF{D`$u9ftg2N+ zEt;~qKJJzyr^jUI-U%WV{Zf_Y=zs~fNsZvpkGv4oOm#W<-8?P1JL83Qk~}=@6At3? z)7#4`mf2t4k(7cdF}}#@?L4GZ#v0~SoiwkiSGPC;;DF#z|`ZK@836N zD6)t$@6FF0WQTkXw&yDtBTIbKD8xdXW4qWs@ykeeyse$JR_4)`IbEkC`4`iNRvMye zs&k#i{cXHc7LK*dohqfCyv^$eDaI8ZG~5n4PO2JqFO0?I`OTHUyrN1zvJ%=JHa;%y zSd?e})S`GbzoHQ1-xX(iDtbRZ-V+6h1{kRxFg+QO$QG-$@@@$)=9IKX`TUl z!r!JT{z&KkNJxL>NS6G{8fPz>{9qs2bjjFBsVpC(bWG6V48mtmrFC?hmktjib-jIx z6&IJD)>d?>^qO->KCRKt*6=U|<>)ht6S}qy-!J^Zx9}4WoWBtF_HF)s4N*fp7)YR? zjPk%Jq{7`LF2CQK=QeD|($CTtOY0vn{EBPdaMfg%!L=Xt=~K4v?V}RWy-Vrh_Varf zTq2|*D~1_KyaB1|W(lY3^*^f`v!jFMZC@XX3NL#CJn7_dz$?|JS8VB2KNAf?{6Y9aTg8b^|@bc`vQ zNqtdqvc9Z^*FW)Yv|deKA0ZZx$zEH?tZ-%sV{CW3SQ#gzz4IaKx6qB0IrZXR`^s8(G(sU~`ZM#wugT=|_yC-wf_Y;>kvCX^U#g9h%Q>F+)R2+?P&doae zB<>}eOS-Pz>5%;OeFYO{IvR2Nh<b2JSz6h!VdjxI>wJHY8QQStXlbaC_UZ4D2kl8YwxNoGe?ea1{*CM!wcamJLT zx80~Q7>(m@t=xf`s!#H?1#wKEts4eTRS{_1+H{-S*df{}4=0hTFu_^AUmKmOqxuU` zjg_?vPTTOfKBm^RjRp`DgaZE|w4`)=L6W{y( zm7wxPBVCnqX&;e4qH3!AbdyMQS}E7oN1E~Gn1Dd^Az`^AyJ)o#?+u@XB5DNK_jwO7 zF)T^zp8TtK9nQj=j$~-OYmaefRMqGvZkmj9>27m zN&HYQq{Z8Web%5`utoHoQd+Fn;?>WACax)4`*~{c`4_K+Zw?2B`i~AXe~!FHT#ya; z8aC=CR143zwCN!Mmi6;m|2iL@F(e?HVB?*Q(ZtVzv1~6l7xVQI`qmZq}RT-=}$g z$5!jG!PocfU96yriV>Ix2RA)wijoO_!wF#~h-FW+sPNQ&v^M{uMJuX{ zeMo~A)R|IZbvkAhT1aAVNZ;T9YX(DnpEEePJNH)g{TNx6Z=$yXEthXWQh)>mFf`v8 zAdP4`p&ZGURAmShOw|*(IaBm>9j9+csNnZbDsiY zrsbbE3sHzg>Cb{cVP1y2KM6OKZmS?pMD(8O3BIJla6Upp2>yqMnG&z1cqCCj*;~Cn z@m3v-3w$E=5b+ZYzq7ORTl_oEY6hsp&l&n(^SrZp*e6|M>d$)50N1*}$BMr_+B1J` zE^Yq1eUIUfv$GGu8ygb0qNbmS4cNcf&S{%#zUwSEqK<=o8W>4kxm98EbX3%+l>N4{ zq7f;I|JPKo(H(q$*cp+@zAb3xR+r;;(EPM$5{Fkfmrl9LCTPh+lyjB6L2Gs+$w$q5aolYB3-+rs z1Oa8CaX;^Rm3mHC-%%2ZoyMxK>b^rAL#VJw-P0yKZ}_Q~Kb3t&@KYaBR5^MX^>o!3 zj}(%W)n>-EditsQ(Cvu$tV1^(p{YX(y|f%H*2`4&K=}lPV@NRW9;C^XrOBeQipXo# z{l_47!D+l16U1d_g5m5;5mh123Fl|Kz-RGrI(+*a7ci-S$n68A)S9WW9vfm>>CRhf zva2$F5uS&kwG&4aHC9eX+m%G85so5tv$;{yKeJSi)HD^m_l%CdauLTH%SoN=DYK(0 zZ`Q4FHEFTX#|ExTs1=i0i9KINxMsw39U*hWccnh}R(j(>*y80Ao=KTzm|Po=+}kf3ze z9%H3&U|z#IpWdsDn6=)QSoHIc`s0rW-Wbc=XJ1qsVEac|dG_k3e|(}TSQ52L#^p3# z$cpdFuxAL*xjTom9;mN1l2r2i_Ke@XWqkPe@xf>uUnUjc#GZ5usP*+f-ejwLq3kl! zzbXWSOduWg;L#_1V)nrEfOB{%@ex}vbJbIp<_JQVH!m9%y3mBtY4NxjgjlPIbv&bt z_ww`eZ+BVBw1~4NPDB*2&sLg^jw;C>__H249F3lAgcA8U#v+%1R~xXWnC?AWhR^Y! z4pVV`=q$ec={q;cukNnaNMn`WbQAe+p9mc5d--S6MDFVtV(zyU0PqAqzMpG~2tKDuOTXe?6Za zNrK_fiZ?`~qm5G>SFJvp|I*J9SdN1S!+lQ5*ucSw-|8zaAM&-n7n4$`>f3Wci8N7T zL^YIXwFrbJL&uY9G$H2(_At-y2={m!tFiIT&P}Us(Qh6(ZnHafAv;QuF~tezf!Lb- zsFt5BWnxgnhhcSw5t2!F``rpdTp~wN#Fq=IU>A4k)5E)+6n$)CbC4H5{~(I}OGU`wtNx#2gZmoVyEdLM-=9jb&*+pgjx7Yy5QgBqD$$h>W`4Qc6ru&^m%>0s-F? z$luL%ud1&aE;_&pU66G53BroBG$o%;J(G;jnZIoZ`sc>CEbj}P=oz0j`cReGG$)h^ zwJguTq$@ehxfaGh{843BqvJPX)Y}8Ev(-7Gp9OoCwN(1;HJvo#!dTydR-1bN8LI8( ziH|kGtM=`?>KU;64Mwprg~hQOWGmS-y2@QioMf-41tIvkuGYGEwi~2XmBuxQ}dh7fI> zv}lP{MmlW5%XLB*;RN9n{_#wa|4)8PMW$8dDOrYjuTZ6S?1R0L*zbm-AJN7!6QUt6 z?jBd#NO>T?(I2!92GX|vpU zr@`{9=;gQaNzT$K6Q(V}{vqcXuR^NFwT<+%_vs?HmI8z4345I%fXT;(kVVpmX7=H6 z6l|1_CBqY?)R^wd$JFl(lI9uFH28Hqsbvdboj*5sFVCMwBziD0pHNqp-{jIEL&%^1 zka-k)?T&%-d4^f#Allx^xpZ8M=_FsraSCZyTafm#_|B`8sSyg3kh#4~6U7ofFV^9A z9tD*^)*h0!8mZC-JOm(Cx28WN4eVsa}f~2-xv&})aqn|QR?O*aH zFeZd4h4=*6^s2D=oy!wTP>w#M!blLbGh~fR7?k{o?fUHFNBmE?avuXfZ|tOajk+Au zixY>gAw+|99i1n3?HXDsRzF7QOqg-lx@A2(9J-^}pW;k0zZC4|F)OpsK$O@}+cw#< zrS6;kCYwxw$@N4G)c>)f8{P_tcWJ!9-*Rn2F^>*HAH7#Z=&^Cz8s~e!hMluo(PX3@ z1MT5{6T9SL?VB}3>m=6AB(ip$Z6Tc4=gx_R+;>wPooNmmXKc}ib~Eg)DRjR&;D)@0 zUCi8c!5{h@G>=Y0q{|g&H|9|OE)MxC`@paRT+2$~YKSe)R4iw%AzJwxow)pRcw==> zOOg=zX%1u}ae-UhdBX;zX#$~E7C7s(9^I4=Cw({K4nOSNFFLucf%{p~g7H1B;T$rJ zt!{dp*#d?uPZR0lV@`vXwZ4GTpQER0WKdZl|B-I~L8oKdCAWJ-K;ypQ0$S;-ju4rfc zBCF7}+iisx`?1i#@<}Hy(nC7iVQ$?D8Uu2E^7~CHvDgx{B@vX`0kJwC0=9G>q|xU$ z7}7;?gU@l@D+@FF> zz%F4rb~%_U?99#VEUokmfJ7t=jC2hD7P{L0 zP9gY|5e#g6Wdt)bFfu?OY-(We6X4%JC9fY<_4^vV*D4+Ko*xUMg~@PVEG98_YaJLjTu<7_!Gv+zydv;@<%z~23S)0 z69&`;-)xTsSYGifcC#D< z^pNOZI2eF5IDf;Kq04@Mm1BhNm;M_DJy`Y^j2U`-@-G+@+s(Sb()W#WkpJ+$5U5iB z)gJ4Az`+h|g#Wu78?cZ5FBpW0{YE(^F!b=-pE#Jn(Br>8hu@;B_J zd?8@)&ARNs;@aPJS=n#Gn4pJb{=&fooD}#Q#tH$J@BbevYcBY%MWM{caGy6@R0_t}I2cV`m^oogv z@dodiSeS0~2LS9wKMBBY^btTme!btdv(W|)PuN_Xq>(p%1&kFeAbL4VOW>S7AYEv- zF6coU;JC~0?}ETmIHv&gquZxKaM+BEXV6 bF#Oos0VC --certfile-path=]" +} + +start_servers() { + cd $THIS_DIR + echo "Killing any previous instance of server_composer_ui.py" + ps -ef | awk '/[s]cripts\/server_composer_ui.py/{print $2}' | xargs kill -9 + + echo "Running Python webserver. HTTPS Enabled: ${ENABLE_HTTPS}" + cd ../dist + if [ ! -z "${ENABLE_HTTPS}" ]; then + ../scripts/server_composer_ui.py --enable-https --keyfile-path="${KEYFILE_PATH}" --certfile-path="${CERTFILE_PATH}"& + else + ../scripts/server_composer_ui.py + fi +} + +# Begin work +for i in "$@" +do +case $i in + -k=*|--keyfile-path=*) + KEYFILE_PATH="${i#*=}" + shift # past argument=value + ;; + -c=*|--certfile-path=*) + CERTFILE_PATH="${i#*=}" + shift # past argument=value + ;; + -h|--help) + usage + exit + ;; + -e|--enable-https) + ENABLE_HTTPS=YES + shift # past argument=value + ;; + *) + # unknown option + ;; +esac +done + +if [[ ! -z "${ENABLE_HTTPS}" ]]; then + if [ -z "${KEYFILE_PATH}" ] || [ -z "{CERTFILE_PATH}" ]; then + usage + exit + fi +fi + +# change to the directory of this script +THIS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +# Call function to start web and API servers +start_servers + +while true; do + sleep 5 +done \ No newline at end of file diff --git a/modules/ui/composer/webapp/scripts/server_composer_ui.py b/modules/ui/composer/webapp/scripts/server_composer_ui.py new file mode 100755 index 0000000..e0dd7b8 --- /dev/null +++ b/modules/ui/composer/webapp/scripts/server_composer_ui.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler +import socketserver +import mimetypes +import argparse +import sys +import os +import ssl + +PORT = 9000 + +enable_https = False +keyfile_path = None +certfile_path = None + +DEFAULT_ENABLE_HTTPS = False +DEFAULT_KEYFILE_PATH = None +DEFAULT_CERTFILE_PATH = None + +def start_server( + enable_https=DEFAULT_ENABLE_HTTPS, + keyfile_path=DEFAULT_KEYFILE_PATH, + certfile_path=DEFAULT_CERTFILE_PATH): + Handler = SimpleHTTPRequestHandler + Handler.extensions_map['.svg'] = 'image/svg+xml' + httpd = socketserver.TCPServer(('', PORT), Handler) + + if enable_https: + + httpd.socket = ssl.wrap_socket(httpd.socket, + server_side=True, + certfile=certfile_path, + keyfile=keyfile_path) + + print("Serving at port: {}. HTTPS Enabled: {}".format(PORT, enable_https)) + httpd.serve_forever() + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--port", + default=PORT, + help="Run on the given port") + parser.add_argument("-e", "--enable-https", + action="store_true", + default=False, + help="Enable HTTPS. Make sure certfile-path and keyfile-path are also specified") + parser.add_argument("-k", "--keyfile-path", + default=DEFAULT_KEYFILE_PATH, + help="Path to the key file") + parser.add_argument("-c", "--certfile-path", + default=DEFAULT_CERTFILE_PATH, + help="Path to the cert file") + + args = parser.parse_args() + + # When you want to use the debugger, unremark this before the line you want + #import pdb; pdb.set_trace() + + if args.enable_https: + if not (args.keyfile_path and args.certfile_path): + parser.print_help() + sys.exit(2) + + start_server(args.enable_https, args.keyfile_path, args.certfile_path) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/ui/composer/webapp/scripts/update-node-modules.sh b/modules/ui/composer/webapp/scripts/update-node-modules.sh new file mode 100755 index 0000000..11b7789 --- /dev/null +++ b/modules/ui/composer/webapp/scripts/update-node-modules.sh @@ -0,0 +1,93 @@ +#!/bin/sh +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +# the order of the install is important + +#npm install -g grunt-cli + +npm cache clean + +rm -R node_modules + +# dependencies +npm install --save alt +npm install --save change-case +npm install --save classnames +npm install --save d3 +npm install --save dropzone +npm install --save es5-shim +npm install --save events +npm install --save flux +npm install --save highlight.js +npm install --save jquery +npm install --save lodash +npm install --save moment +npm install --save normalize.css +npm install --save numeral +npm install --save object-assign +npm install --save react +npm install --save react-dom +npm install --save react-addons-pure-render-mixin +npm install --save react-highlight +npm install --save react-tooltip +npm install --save babel-polyfill + +# dev-dependencies +npm install --save-dev imagemin +npm install --save-dev jasmine-core +npm install --save-dev babel +npm install --save-dev babel-core +npm install --save-dev eslint +npm install --save-dev karma +npm install --save-dev grunt +npm install --save-dev webpack +npm install --save-dev node-sass +npm install --save-dev phantomjs + +npm install --save-dev grunt-contrib-clean +npm install --save-dev grunt-contrib-connect +npm install --save-dev grunt-contrib-copy +npm install --save-dev grunt-karma +npm install --save-dev grunt-open +npm install --save-dev load-grunt-tasks + +npm install --save-dev karma-jasmine +npm install --save-dev karma-phantomjs-launcher +npm install --save-dev karma-script-launcher +npm install --save-dev karma-webpack + +npm install --save-dev webpack-dev-server +npm install --save-dev grunt-webpack +npm install --save-dev react-hot-loader +npm install --save-dev image-webpack-loader +npm install --save-dev sass-loader +npm install --save-dev style-loader +npm install --save-dev url-loader +npm install --save-dev babel-preset-es2015 +npm install --save-dev babel-preset-react +npm install --save-dev json-loader +npm install --save-dev babel-loader +npm install --save-dev css-loader +npm install --save-dev eslint-loader +npm install --save-dev eslint-plugin-react + +grunt build +grunt serve \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/README.md b/modules/ui/composer/webapp/src/README.md new file mode 100644 index 0000000..9dec53b --- /dev/null +++ b/modules/ui/composer/webapp/src/README.md @@ -0,0 +1,69 @@ + +The application enables editing of CONFD YANG instances. + +Catalog Panel - loads the NSD and VNFD and PNFD catalogs from the server and updates the internal indexes used throughout + the UI +Canvas Panel - graphical editor of the relations and networks of NSD and VNFD descriptors +Details Panel - schema driven editor of every property in the YANG models +Forwarding Graphs Tray - editing FG RSP, Classifier and MatchAttribute properties + +# Details Panel + + - To get an object to show up in the Details Panel it must be defined in the DescriptorModelMeta.json schema file. + + - only needs the DescriptorModelMeta.json file to define the JSON to create / edited. + + - To make an object appear in the Details Panel you need to add it to the "containersList" in the DescriptorModelFactor.js class. + +# Canvas Panel + + - is coded specifically to enable graphical editing of certain descriptor model elements and is the least flexible + + - To make an object "selectable" it must have a data-uid field. + + The canvas panel uses D3 to render the graphical editing experience. + +# State Management + +There are two layers of state: 1) model state, 2) UI state. + +The YANG models are wrapped in Class objects to facilitate contextual operations that may change either state, like +adding and removing property values, accessing the underlying uiState object, etc. These classes are defined in the +`src/libraries/model/` directory. + +## Model State + +The UI uses Alt.js implementation of Flux. Alt.js provides for the actions and state of the application. Model state is +managed by the CatalogDataStore. Any change made to the model must notify the CatalogDataStore. Upon notification of a +change the Alt DataStore will setState() with a deep clone of the model causing a UI update. + +You will see `CatalogItemsActions.catalogItemDescriptorChanged(catalogItemModel)` everywhere a change is made to the +model. In essence the UI treats the model as immutable. While the object is technically mutable the UI is modifying a copy +of the model and so for changes to 'stick' the UI must notify the CatalogDataStore. + +## UI State + +UI state is managed in a couple different ways depending on the specific need of the UI. The three ways are: 1) a field +named 'uiState' added to each YANG model instance when the catalog is loaded from the server; 2) React Component state not +saved in the Alt DataStore; and 3) module variables. + +Ideally, all uiState would us the later two methods. The 'uiState' field poses a potential name collision with the YANG +model (not likely, but if it happens could be really bad for the application!). + +## ReactJS and d3 + +The components built using d3 own the management of the DOM under the SVGSVGElement. ReactJS manages the content DOM element +above the SVG element. This is a clean separation of concerns. Any model or UI state changes are handled by the model +classes and therefore d3 is agnostic and ignorant of managing state. ReactJS is not responsible for the DOM below the +SVG content DIV and so does not care about any of the DOM manipulations that d3 makes. + +All of the UI is driven by the model which is always passed down through the props of the parent ReactJS Component. The +d3 components provide a way to pass the model and UI state into them. For an example of this look at the +`CatalogItemCanvasEditor::componentDidMount()` method. You will see the parent content div and the model data are given +to the `DescriptorGraph()` d3 component. + +The d3 graphing components are located in the `src/libraries/graph/` directory. + + + + diff --git a/modules/ui/composer/webapp/src/actions/CanvasEditorActions.js b/modules/ui/composer/webapp/src/actions/CanvasEditorActions.js new file mode 100644 index 0000000..792b8c9 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CanvasEditorActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 10/14/15. + */ +import alt from '../alt'; + +class CanvasEditorActions { + + constructor() { + this.generateActions('showMoreInfo', 'showLessInfo', 'toggleShowMoreInfo', 'applyDefaultLayout', 'setCanvasZoom', 'addVirtualLinkDescriptor', 'addForwardingGraphDescriptor', 'addVirtualDeploymentDescriptor'); + } + +} + +export default alt.createActions(CanvasEditorActions); diff --git a/modules/ui/composer/webapp/src/actions/CanvasPanelTrayActions.js b/modules/ui/composer/webapp/src/actions/CanvasPanelTrayActions.js new file mode 100644 index 0000000..7c0634c --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CanvasPanelTrayActions.js @@ -0,0 +1,34 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * + */ +/** + * Created by onvelocity on 2/4/16. + */ +import alt from '../alt'; + +class CanvasPanelTrayActions { + + constructor() { + this.generateActions('open', 'close', 'toggleOpenClose'); + } + +} + +export default alt.createActions(CanvasPanelTrayActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogDataSourceActions.js b/modules/ui/composer/webapp/src/actions/CatalogDataSourceActions.js new file mode 100644 index 0000000..2230d5c --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogDataSourceActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +class CatalogDataSourceActions { + + constructor() { + this.generateActions('loadCatalogsSuccess', 'loadCatalogsError', 'deleteCatalogItemSuccess', 'deleteCatalogItemError', 'saveCatalogItemSuccess', 'saveCatalogItemError'); + } + +} + +export default alt.createActions(CatalogDataSourceActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogFilterActions.js b/modules/ui/composer/webapp/src/actions/CatalogFilterActions.js new file mode 100644 index 0000000..51a9556 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogFilterActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +class CatalogFilterActions { + + constructor() { + this.generateActions('filterByType'); + } + +} + +export default alt.createActions(CatalogFilterActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogItemsActions.js b/modules/ui/composer/webapp/src/actions/CatalogItemsActions.js new file mode 100644 index 0000000..13835af --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogItemsActions.js @@ -0,0 +1,39 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +/* + This class manages Catalog Data State + */ + +class CatalogItemsActions { + + constructor() { + this.generateActions('catalogItemMetaDataChanged', 'catalogItemDescriptorChanged', 'createCatalogItem', 'editCatalogItem', 'duplicateSelectedCatalogItem', 'selectCatalogItem', 'deleteSelectedCatalogItem', 'cancelCatalogItemChanges', 'saveCatalogItem', 'exportSelectedCatalogItems'); + } + +} + +export default alt.createActions(CatalogItemsActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogPackageManagerActions.js b/modules/ui/composer/webapp/src/actions/CatalogPackageManagerActions.js new file mode 100644 index 0000000..d27c59f --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogPackageManagerActions.js @@ -0,0 +1,32 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +import alt from '../alt'; + +class CatalogPackageManagerActions { + + constructor() { + this.generateActions('downloadCatalogPackage', 'downloadCatalogPackageStatusUpdated', 'downloadCatalogPackageError', 'uploadCatalogPackage', 'uploadCatalogPackageStatusUpdated', 'uploadCatalogPackageError', 'removeCatalogPackage'); + } + +} + +export default alt.createActions(CatalogPackageManagerActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogPanelTrayActions.js b/modules/ui/composer/webapp/src/actions/CatalogPanelTrayActions.js new file mode 100644 index 0000000..ec08422 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogPanelTrayActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +class CatalogPanelTrayActions { + + constructor() { + this.generateActions('open', 'close', 'toggleOpenClose'); + } + +} + +export default alt.createActions(CatalogPanelTrayActions); diff --git a/modules/ui/composer/webapp/src/actions/ComposerAppActions.js b/modules/ui/composer/webapp/src/actions/ComposerAppActions.js new file mode 100644 index 0000000..0387f7a --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/ComposerAppActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by kkashalk on 11/30/15. + */ +import alt from '../alt'; + +class ComposerAppActions { + + constructor() { + this.generateActions('showError', 'clearError', 'setDragState', 'propertySelected', 'showJsonViewer', 'closeJsonViewer', 'selectModel', 'outlineModel', 'clearSelection', 'enterFullScreenMode', 'exitFullScreenMode'); + } + +} + +export default alt.createActions(ComposerAppActions); \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/actions/ModalOverlayActions.js b/modules/ui/composer/webapp/src/actions/ModalOverlayActions.js new file mode 100644 index 0000000..c308e65 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/ModalOverlayActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 10/14/15. + */ +import alt from '../alt'; + +class ModalOverlayActions { + + constructor() { + this.generateActions('showModalOverlay', 'hideModalOverlay'); + } + +} + +export default alt.createActions(ModalOverlayActions); diff --git a/modules/ui/composer/webapp/src/actions/PanelResizeAction.js b/modules/ui/composer/webapp/src/actions/PanelResizeAction.js new file mode 100644 index 0000000..bdf0344 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/PanelResizeAction.js @@ -0,0 +1,73 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; +import changeCase from 'change-case' + +/* + This class manages Composer Layout State + */ + +const cleanNameRegExp = /(-is-tray-open|panel-)/i; + +class PanelResizeAction { + + resize(e) { + + /* we expect two types of resize events: + * window resize - invoked by window + * resize-manager resize - invoked by ResizeManager + * + * normalize the data needed by the Composer Layout or ignore invalid ones + * + * */ + + if (!e) { + return false; + } + + if (e.detail && e.detail.side) { + // a ResizeManager event + this.dispatch(PanelResizeAction.buildResizeManagerInfo(e)) + } else { + // a window event + this.dispatch(PanelResizeAction.buildWindowResizeInfo(e)); + } + + } + + static buildWindowResizeInfo(e) { + return e; + } + + static buildResizeManagerInfo(e) { + const info = Object.assign({originalEvent: e}, e.detail); + const name = changeCase.paramCase(info.target.className.replace(cleanNameRegExp, '')); + info.type = 'resize-manager.resize.' + name; + return info; + } + +} + +export default alt.createActions(PanelResizeAction); diff --git a/modules/ui/composer/webapp/src/actions/RiftHeaderActions.js b/modules/ui/composer/webapp/src/actions/RiftHeaderActions.js new file mode 100644 index 0000000..c046a12 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/RiftHeaderActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by kkashalk on 11/10/15. + */ +import alt from '../alt'; + +class RiftHeaderActions { + + constructor() { + this.generateActions('requestLaunchpadConfigSuccess'); + } + +} + +export default alt.createActions(RiftHeaderActions); diff --git a/modules/ui/composer/webapp/src/alt.js b/modules/ui/composer/webapp/src/alt.js new file mode 100644 index 0000000..b87da11 --- /dev/null +++ b/modules/ui/composer/webapp/src/alt.js @@ -0,0 +1,30 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +'use strict'; + +var Alt = require('alt'); +var alt = new Alt(); + +export default alt; diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Black-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Black-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..0229086571f72b196e17df03936018906289d968 GIT binary patch literal 24484 zcmY&<18^r#)a@^}?QCq@Ha51kv2AB#+qP}nPByk}zWl%MRlRyWb*k?@b5Gyy>Z!V2 zGc~UAVqyRwz)#aF1fcxaS6ltB|DXN;H!(3~SpWd2^oPa$4`!-y(BdMZVn5vAk5Bmn z8sIyidGX)!No?I*7m z&5w@oKLA4j)!GCo>CM&fBOH~rAglWnI9W4=_*e8Uq78!g66hvq;IUR zzXM{329Bt2sBfxo>;^qJ;}`pPmvcaxFawNc002-z1ReT`cUvz?cDF)PK^^W%TM;KH z@|G@>hqlNk$_*g;QwKRrvAidgC<@v~O2-(688w$$>PwhmNz{s{5MHmGYyk@f6;`Ae zNkLRPY8c$gzpj9o(jybo(^h5O*9n6ZxfA}JzNoy zPo?S)UXc<)QN^SV!h5JCgOp`zYhH;r>-KM?NBdnNW%ar}7+t>M&pqpg&#pC|6bZJE z0`ya(?3vjoYnXlLnr@PMSG8Z!jT8rRbchk=B{R4a6K$aOEP9iC$+%*w#CKANM-Kvd_ysGcS}dJ%*QmuK2R zR#p2ci`6nYY>~{Nr!H`<++R;k~KE=*BRsa5M7F+F=8%AEXkC2@!WCX~i zoWg14H9cWhWJXY>P>9ypI!Cm(Cxg{$wcUS0@LY|gYeOFQsqIwQ@zL}ilXS4H9fE64uw=`z_rp7kWyiNvVRYyC|s13J$%0Zv8^w1I2 znfPgN=+djYZj&*1ue!`>k}-DZ{cTW4pbu`tm;c#Ppot&&d~r*BTp7rn{^h&yVH&cu z_T{Vby&+K@aMTQvNsOoWYV9s9#;!}5t(Zr?dyjO#bn2^}r$^UstJ!o(9`o(4yZXVB z=T6QN>rLC`TkoG-2h?0^(1TZhDxPKwYWby=L7klYsH=W;lxmfuPWvsDL3^wY#$B?p z)hx9rF`qa{X0Gy7ytF#<;5LbGzZLuhQ5A#Nm^AH>3N@YVsldyG^d+Xgu zqY;AhP4$3dFFgKprl2@9YxMi|+$Lu{Y}IBOA};B%cvBogPrJ6cE1XqHM~vmnaWW5I z@gD^I7}ZQ*_J5dmC7aggWsENuH&qj4)V7C(nX)lKhw*8mbo}zW-!2)0-UCSciG2tL zq7zUH!(|uU0Iz|q`p1KB4<;|)hHTlMCnRl*Be4PDD_?sY=%$W(YfCc(*WBLnr#s+n z@9qnzowy0gD_;K^W`Q6s!VjruaD_8}zC{GH3i`805XSOA$Hg@0U2V0A9PxJmVv#PU z1Rf*g!I=qN-lbxTpY#SgRdRph2&k2(T4Y-aD0cXlXzrNP-(;pM?Bq$pKmT51 z;Z0Z#dMnO&fJ=%xr9pJGaB*E9Ds7xT)&=$80ORHcbIc6T$qZqE&3aF^?|k?VsT zPeQ&=BAh5gcP@8!C>LeLHa_);t-=x5piH%c&OShu;Q&i$$3ShKXFPfcKQ%#gEI;H} zy34dcVyVzS_+3-FP?4~F$~%mpBF)i->_ITth-A!pdDkec$0V(e;2}5CSOJ5xB*V?R zFu6TdR;Ft4CN}~}^<+@lux6PxXHHSU4hsgoxy3nlnU$bmkSJ&We#nAxc{<32G{Mlk zCBlYrc#Jgm8)K}9E?#j@y}(kti);MVgI9N1@iK%@_5-IZ*lgr~YSI7JEkg$VS|fc2 z`+F#Zoe4>rF{4;ReV}mWkM9{kytpazk#R9OlbxTs{v>d+31~P26QrOydWadU3@o1j zI&mO5z|jFUM$ns!2xm;? z_j2|6dMbc^ZHwOA=xYuOi|Z`Y2Mrr}|A~c#sa~nh?zhAU1aM4+|EcKzjjOM3>Zgwd zE(VU+3Ua_0@Ja&<;nxTs2pl^EVtmWA$oOFfYg(z#HS8qrFD)n{C?zN-C@Cl_sMja; zOV=POe;y=co96pFaB3IqOsf|YRAnlcFqbfo5JbfqN^J^KdFti+{CngZ;gkO5`u_QQ zTMZ$apW{LQJJ1`!g@5%{e<1J>A)O#~o4@cT@sV;dSOi3j4ZIayjEt}^s2XtMjIPS& z0xw5rgRjfy10?v%07-$-9!iqZ97}`K9ZZze1Wkq28BUhg8c&DU8$_7c2t|q65k{KY z5=V>M6GEKa3`33G6+xcfmOzjH+pjlwCw8{>CTz6yBz3j*B}gQs`DyxUjQNOGx6ycQ z{*YR&Mzg`?vmFkH-FCCydgO_>(6)@Lf^^U3i2h#1K5>!J+ot!2(nD z!a`;mq|VXunv}Ikj^F9!tVye-W_xwa?gnyT9|9qV*Q~VDJv6VVq^+gGw|}s^Yyq=) zGL1W9_oPlJx0s3$9N-rJ*BJl+A-8OsrCtkw}7QxLk5Pz)Z#rl{$j@nHr8 zjruZX-oK>$-*{-MS^mI-3VX17eCIcvg;jK7@YRzq^weqocc=vd0DXT05ZuFp=&;l)sJ0YDP2lNvy!yFAN!amy{l3GV#(vr5IL-U~;D|rsSil*r_iJhobh=-~ zGAM#&h}Q6^CqJF$(l#812iGCp&sO@yWrN7fnQaKFn?OlO%3cC0>Imk6$f2QID*f?1 zuwaOtgM+jsdViEovgkO<-rlW7kvRIPbc*%hZL!n4LAS_d*GAIFQ zDcVdP68hMm9TXw6k;$N_h6O^~SU*bsFJ&_3SJ{WxazoB^(EZq3Oy~&#hcDyEUksp& zHElkPv7rXQlN%u~GkU}jt=(hm(r(Z`sCYe1^*EdE*cdJJFw6AX))9iKF$P!xlw=_R zRCcG=heSY(amFSX0a6oTzUif`F;ymB?!?2NQ-F?NcR<7{^}i4ZujX+!OTtafwAxx* z4I)T>p8>-mTXhkwBJ3MNHYoB>0hl-`=ikdp148`PJHSa#B_YAS?WhQgH1aq(CbvKg z4>gg#yY<%1GSs+*p6eH<@C4aaQ)qVm7(5R(Lf>z<^giSgJs>^fxjg}nrWwa$j57%l z+vXF@J!vu5Fax}ueyN_QB(icgx<1Y|zGpWy+Hviv^sq6h0tEhu-a+05Zi@rrLs=kS z%Z#QgCwNA>SoiW>M!m-#E2U~;mwqG1+~604Ef*xqSqJdmP<$?DJr|2=@me=P*C6Ni zK^dN{BAGPXgDUnIb{3+bs$q(3Je`M&SNgnWb~rPTp}t|q^|khX$?5dh=$)JQ9t=YcP~T;PrBFNsoIpigV{)GKtG;EjP4x{O$G~y4A8h z!F3bqG=A1rFfxf>`*D~R)4A{YIMeMSw}+1VCALo0HQ5&1;Eycadfw(ndNqk{=BW+3 zyv&q&`8OnAYEGq$wKs1sH#EEWc^Tq>24}f(1JIHsG{*AV`IQkZVQ$#Qcl%g9DDWfQ*-d0f`9WwP3x8F;!SM zz_46`?k-vaa@USjr;9=NQ#*}RmldeAfMJML2ZB{DX zkaF!~BR)#UyJtp%X==duwzJ4|yVGPQ9fwCz&l?SO z8e3I8Gb=~2(k~m7E%eJ6XMcgaWc3(JUanDk3p`6Pay2FYuB&y96a$a0Wyr(v4iTN7Wpd==^iHWp8WI6v4;%} zuPi(G6KwWOyhU27k;bqi1GDOspw!>EiC*Xsl#eE?3E>Z$X(StkJx{~Yp#+_dHL14` zWcBJjkNx~pE54k8sOLE~zszBesIENb$oZx@Td|adH4RIBR4B2JAo82}$%5$j954@k{(1`b?&4Y@T_Gf#k>nh~ zPRf4!gWe;?4G(+5_3QO(U@Z*HEe|%*`mLZcW4?W@KPbPM|6_9e&F16`@ivO&wZ5G; zxbIUO?u)m3IIAK4j=9}L0H+Ya12`;mj*I2ZzO^pb#)39MT2{f?+uO zSz`JN53TlI%d=h{YqO@DbtYO{!yIBuCSlI06uE{)f!f4Gs_QU2YB3=_Xumx_73}9) zsC~1@=nRjsfu5FSdRZefCWbb{%0L)~rgDo01i5$>JkTA?T|^!)my9{OE(TmpFm6iZ zhlO)+suKgn3PB)qi&@K0rL>Q{qLj~*0azkbNP)dg&V#6){FQmVbj|7^3fpg3DtAZ{n`S!vm#P{ zO(OeICH*2n2Fi6n^y-<-w&R>|5D0Czy0peJ;ZzmKXUf-nR)i4}+ysz`Ymbc(P{|1x z0B8)l(HJN|8Lt!dY1{V1e#e^F6&^e@{h|_VUjPZ0GM7Xem=7Qef}Hn3lqbt(a^8|G z!Sj8;gh`&t;&cf90;NyrT=^DtnC9J$oO;W)-JabhTp2Zr(PDfpEW(>Oiqzm~lk_?G z^sb@lVOM-T3E9l)5x;!xs}&;$Tyt|EuqQ|Sn?#de&0^b9DGO(uqH9#T*ZN!9+=Xo~ zS}+=4j=0p>N+rj=Gss%rnIFdz4sP?9iY&$`BqUX4!oZ(B*}4n<8CSg`i9QzJ;3nuM zaMFou+>28yXncpO7u}cx(wGBXd*GLaykm{2PqFI|T~h!PV2Cb;EC@FYKnn|m~W3R5zv z%hJA7(8H;ey7?$$Kej&S*)!|Qd!y!!FgwR@wkA3;(`|SI5XFSF>q*lZjCk9%-J0e9 z44AJ&)7O)9?@Hv3iZCO5a6jsHS^bByl@H6(*4a8Vae_WGOpCA5z>69R*inR^$+NOh za%V>T^2^tt4O3OXN8|(?73a74kCqQqXyT|SD9}d9UFj5ktIKse# z13zz5isC%@`n+_1F=ELiUOww}d^AQ#L$7^RgKhOaI`k`Sgl7i!qZ|I;LUJQS3|+sWr?`3@$vJY(JdB09?oeL&{5e_Sf`m-kH& z=AKH-5~Q0HZH?0A@l&1#r}}r(uF^|fl;;OmsvS-UTc|uOAdlK&goJDC*2t4vhXFtL zv63n9fbCWwQOr<=_VYH28&zPQTVc`8iIasMf`neBkp(EbqBC8|##l^)cip(_*_5j0 zRWaI3&u;xrDa_>hU4+m1)EwhP&*$#%7g$=I-PwU?M2U<>EW6LciT(aaB(`|97DfzQ zr|Vi75MH-hc{*5-wtWjm3V~L%8N11eK z5G}Zc@xdBcu2@JD#EgkjQ+`OQXEaaB6i_@JJK-_Jf-(|QDOAnQunTKBR`{8X@;${j z8g=awl5??l?1+@q`8_1-@*Mmj9Cu*mb_U^ z>7DMB7+97C$Rh$7-``wIc(Y4`C61eoiP?I0P&$2V_C4r@Zh&;FYO9hH$U!CoAPzWQ zk+$;MBZr>21&Ldu=-OS%N5OT*@+0?IU9qL8?)>D?J!we{ra96?ZvhJKT1755R(m-Y zY_5MBj5f%S{+Mje&7?sT?qKmx&$fRZ7TR}J-iVFYiWQWOTB6fu&bj-bv*^%2AE&N$ z`M;}*!;7labenSbI+N3=O&guOgf`v~!laA%M0|AD2ZvgBHxguWNbfQ;l`11ke(63= zyEKvItMo3+hN1hFv67U49dlpS@Q5Oku{$fTjDMSi#zgh|YbKSPohwnV@g;O zfbBrpwrRxemU}oEd3SHN)yNzB_$hq6!Jrmvs7PTpRiqlzt>%k%(f4m2l_^|##>>ih zth9$_MShw5K4$GK9ej$4Lb?XBk}LQli$}(1IpV?O{m~4gaXS66CWTAF`I4~ie8y3% z(-osM$?;L;4e#yqZGA7(lZxm7qWsV{v{UR>PjxhgOEhQYBxLV zgMXrYmOMQr$m=$i-*-D(uH1sn_4L6YdUrIM_0Gmxl%@T9D?H;f-%!Rly(Jz zL6y~xFM_crT9j>}Zr43B(0P>mM048!%}>*Dr5$qT=&*TatwQc9 zR{6s&wAob+g`Q|HC|(0ojn-DJ>Z84q@L=eAmZcNZuzIqx=i18-wxOZTGjovPlS&#F zmqz}yfxPJz$R^B_#iZo{uH8ASkN@J04_;m0htuF62Mv25%$dAd%E`RDHa{iSr)#B* z0p3IzLD+Z{4<$mFMgqcdDr&NsW~S>oH7rVg=sV3Wv)JQ?fqAMhE`iRZa{ZyM+wv8| ze&%W8!$f}{5!3#~<_eQ$Rb8YkyyB4hOM^zHUjM6??sxfmhZ^!TJ3%n5oKZIsbKskX z@cg)0!O-kuTcL~fampuGRt{tk>Bgbm9KxSY!o>Oz@n}|UInCd43hEgKviUb!1nZMm zg}$e9Zz*gZRQ#+tc;GFTY&RClw&f9!C8GNkjfK_CSe+>*O2d@^_3yfCy%aqm{5W~d zT9>4ond!;T_U@;}mMZWlF$FE}TtQ;Xc(FxIh|#P>hDj;%(oRs%g1CAhN-GdST(R;> z;(;+859U`Xy*z$roLN&0{?_xvY5CV%olVaQplZ^zcwLmpF429+NHKeVhbBVOeaJeJ zwG~N=5t&BglePm{JGx*ko6L#5$RYu`f`D}3ASn41_NkIPZBH)zu*+&2hXWsjut7WSKS1S?cCRp(DSI=lHg_z_*@yx}NA9uv^vx{Mu^)vOqc9Sol+!#EUUJ0Y; z)H}YPul=QI*1V!U#NqS&a7#691YYn(T89ohXCeJ*gUT(IujYhG;Ag%|9bDYqBC7XW5V&2*iQG*bGn;&p<6LmaoeWKj3~#l-buowa;!k0x-FKwz z)gk;lhP%aIZ*`dBH4nS3=ZCvI4CVxYu8L=(Tkp^avOU`i~Lp3OhjEY|n z?_tyeBS(ww8ncb?tc9`U4-}6xE^PhcyQ$+%MVzPU zb~&@009~64xforG?2SVt>Qm=^0}G-dZF5?|q#A zjZR$cN7PkFrfFoSfY4Ezp^N$$GPN0vWylc{vA%F3Dri6GzTgP=YM4u4$mb;4FElDQ z%ukJS$GUrHHpanQ-E))hQSb#Uam3h9a0}G$u5=6r2~+Ap8FLmKZ}U0fdJcB$+#Xxck|KJd!3;F*nVw!0KB$Wv{%G_6mrLQ>)1sJNo|)R z#S}cCVR>tCL?bT++$#E)yl9l|<%x)@!7M8imc+)&K#!TX=kr6qc{BU!9y?|2qxBkb z+?;@WL|M?;?53WkBR@ARAVQV=ML(ldkTn&12M!8335{UYv3#wBZ zI5GQhum6JRR`mfq0dC0AhC7pr)k!O`_9fLnQrEk$w3OzwU`=w~?nAW_H z(%uLvlLJHf-v1uCcXNBy*^xK>Lye9PF-=R&0zV1mF$V7u@l{tu=#n+C4}|_FZGbiH z;MU8IiazZ(RFA=CD#Ql3`7;>INW{V9k38un%TSy?DYu-2{V&YX+nrh!UPyk?(h zY_Xz&NDk1Kr9eccyW|-e&5%XSIhu)gu_sEIVYL3W{x88?lb`H);_EJ7nfRE@}q%>ac}oOp$P zvMVB{1L3gV9He9W{O=-F3j0v{AaTK(U9?+W{3bWqw z!;EC}jo=pueeTgPUwHh3-={La#nFC$bWOhAcr?QH{<9%r=xmkdnM5l&67lQG8{-^) z65LN6eoXSq5euf_BR(4!GLs&SEN>|8e!b%M&1z}m>Y_5T*bkrEPg#{{r+GPKZhlJ9)qtb%34sw&OtqjrKmiT*3A9F( zg6^O)=%YW=W_J5#SujGA#$@t(5?2^ajzi~mn)`)P1L{js=Unm$m3W+3^p72azQbhC zO(ULw!yWFI!gvCsdcwyQtKfS|8HYB?oHj~a_1{02ZDdx03>=2lk|7u-jAU9D;b()a z_O%pQsYV_x&P^;7p>NGBtS?mzw(L(WYUEkWv6Hj>lVQ3{j?X?!M&kkP$(5C-P4^}D z4!IGq*vy$ZE+-SxWU|C2{5IWrjX^xB8s0O9VG|O)Lr~}OV`%v#uteDSePOOfs1s>; ziTakDq^p{{VX8qwD=46&A5rB>3f{1M8z{XjZ6wBd7eYqC!MIQ%tJ7F*r}?(-_1`Pi z+xO8;BeWXqu5Ya|^lvR+lxg&_Hz&6!WU<1e{M#-WUlZi%8|_>*Zt|CyOn#Lq^`?_L zqK9E9ewT?At%OHz3uRNlh~-i;`!v0H3U;&_`t-#4#S2_P05SA@N>=;Z2f!g)qX82V z97bLpzly3djwtakv|c2r8r@$ohPIQ=%K~<(;uT|4wIc+a&t?Qu49?G?g(l+xZ{`|l3*c#$3rwf=(brF>J*p{Rnz22m=AW^q zFUC+T~p$v}df zRVg&U1~`WJQKD(Gri3Oj6S95e{mi}*9X&%`%@(3)lPI32q!mR?jEGi_CESq7Qj z!3ESb_k`R^UMEr&+eG)c{0Nh3gCa;x0hxTo26W3=FS{=1tBt#lkY9z)+xH-3Jn$HL zqj<@OSPMf?c--He+f=@9W8^Y9bU>PV00xdeN6B^9R*iRsK`Bc>J#QoUlU$Qy^QKp;sH1zjIodA=FJn#-VlWk$DI-=0u=vV76q7k#T$RkG zviL3My5-yDc5ONAEjaYXoLp2I2B!5eeeV6giu`h7GFGR~B`;!kmD)NnBHl5X%7ybH z@75__x3A_U(upU(Dz(Xnb>Ffe*yA2>aedf zhK$OV4s9$8<7m&N;gLw+Gd+ksUsHX;Z|ugyl}Qt3GkiWnG+Q_v0)}rm5WxJWo)Qlw z6Ej{m%@Zq{vQ9YFIccW-@Niwx6!EJ>*n|8~g5yWpe&;(3aB2@o`u#x_!>* z_=CK47mL|(Z$V*f)pFXs@}ik2C(|RrwC~E7Kig2uAIluC7^|@Tt2rau92+ptke!{%jI!z`kR84U12v}5=%kvWTOwIYj5YD z8EvyfFHn)FPT(3oC4`nOfp!b@JrGSZ(Q{BY(nRHVzoE<-Yd`2^>L)8_0a=%cAwk7HEUs5NSe#Xd0u zTw&Sjc&;G_X7jZ+W<}NCnHh5oJ+E7#y}G~XK1XVtW_D63zRw&6bIAEI-7^gB84^rd zg*Dt1*_tWg7GK_B7C|QkqJ*pk*r}{B;JiwLtj`jTEXK6wnM_}DvD)>#*jz*%FrVG9-<-{lQV^)eq39 zRjn%fbLBHbzD5S^)gi$o1~ls5Y_$0TSx(-_%PUWFfJIVn&0KY+YR*@O5H3A{s%e3; zfBB6iMb51n+HZ+0;q_rQVink@GGY@fY}sM`jhxdXJGM%uCqF*x*cl>S8DF_G;49^q z4{>gC{fv^Cbv4Az2f0wt-Udt!F`$b`QI(?46PGutC464~@pQr11=!ZjrqGA&lDB(> zP;y2}90cKs*brAdB%eHmg0rwBb!mm}x)m73dx#d)w zHKi#rvL?i057ZHsL2>W^HvxdQxgvM;G0i|<)oakEm+wc}(tq zGTL}-AS-kQ&m)#1{dA7iYE4P9FGt`Kqi`TC#Xi=YzH1vHaCgx2(*v&XE+82Kr_j+Y z=`t8UuP{PoF+1(%@`D4(VLflJP7h{F$Z32o`+Qd_+Ssezt(T(9N28N!s@fRUD!#A! zR?w>rd7fjxKr{sR`Gd7B#^EEpE-Ps^vlT6ImGC`;z6HM)^5Q7Cfoht_bo;`RZFWd6 zR4x^0wFGIgW{RDe)&xOntOO0J1wCeGsR-*RHDTej`OiQhN||Z2#zzn|>^AISLLv#C z`gs>LwMsVc`{hKAgYcGAxK)2~S$y^ocv;^<h?}Gv@`QtgS*8H#o}+{7nmmDN7zb9E0MsLMLt4yo8_8q&Ha;YOhF1rbvUHv$;!o>! z6+P6bXx5>LHm5D8x=DE@i&_U}f+Tv;+y^H^%_UmP9X5nM{8;W+bYEL3-`*m7JEwBc zIPdi$hd#nrslWXqfzBA^tQY7DWz<6`;UYKy7aG0VT6zOZ3Lc&%d`}E8!eX)B_-R| zr}nqvqY#{$e_YPT;YI6(6*d2uoCd>=ranT*xwx`d*9-a^tmVQhMG zhu4w5ro7AJwcz^W63x9ozwU8BOa<2a=<8Jrd{L*1=k_>dE9E#6Php$EruU{~3zxln zxL6%zid47+q0!^g-&E`xXJ{sfJn9i=Ff9oVn_Pi7?a-e@sd3>EHBu_{inR&`%%c6Qs2>+H)f`3pW{|HeaxOT^ z4=_oYPjFC+5dNpIr4*p%g{Tr#S*Gf&Xp7E`b*qA~I zi)rY+m6nze^>DECfg<40TgJuXzRlmt+kAPuG^1dX6WV(EzX~SN8X6Gh9{@OIjIbg% zDNNj`rk~@V-CjwtCUo@S5;!_}Cg3RLzqw_0wOSDQ8h*XUs8O$T`|8ATSJ-qJVgBG_ z3Mzeb`ZW0@^``opK=z5@P0bmn{x>)?_dDimQUl?->yym=wvt+o-*fY@O8TJc-cS)&b#){trwL6K@zz#0uC6gFyRSjVgWAAv`&~99+4eL=qGZ17?4v?W|tT*Dr2* zf_V;yk4+JuopRcLnKn}v7(ykoyp{_EfuVzPs&pBvR2yFFYjt_V7Q}S!YsHq$Cmn{4 z)WC6-689lW)D`LdK>6TDh$C^_fES`Vym zr^XTX0w%Yw2Qs^yd6*x;m<{$>m!{8n&UofEi3YMyD|Bdz{o}9wrJEli5Rd3Y4&U#5 z9CagGe*>c4F5+AfT4`G)Emp}{ot_ts{c>p{$m&C8XX+!DY|qSAMAjJ2<#>x)t;rh{XEWO&oaLG6C z{Rs6-^^5xyjV>Z%G%w-`fWnxi`tW>7FXG^T?3maslwd(R#&=Gb&NVbv-G4)QXQXAj zVLv>)#$$fkp31_8s;s&K`|;cBDN&1_&+&+qnr!rW>)rA3rrxfQG2n*wz<1=lwi%7W zTH~6~@2DhwOrBi_bVQ<1WLMdYWS40<(*>0*JnbFaqHHzc_bLo2vRZiA-+d+zn27fW z^m00|Er3F$3HE1jozFYWKoME@WFwfa#;ETqhq3;B>h1`^_M*Cy63VAv%D?K?!(02Q zeX=3jjzp*+pyw3ppZClfDO?lXT3KBf+v>Q>dFqq>P`Zo|Zqo8al z-Dn3Jb$ix4hT0*5%jRzV(UB0 z^==%TG}*s{ zA8NPJUjo&B-c758pWMCoxp+kY!m?^Hn;X_KaFPv&$&)}~XQB^f3Dikv_sa%MwCjdm z?4!^AA_vsZwts+c^l=BGZFTr}yilgg!WdM*pc^*~bf*If|!Yg`XcbRoVAe<6rzn z5lK{eU7GDRm59Ai`|3DN!nt6D&CUc+2SNhw6v2uHneQvGzlG6E&Qstexdv6_IHnw=58`pr@^5shy^8|xiQ5$;pfb;LLsk%LH8c^ zjZ%wo)(#FL7sVN3h%b;=dd}~T-6)7nKFwB-0g7b2FA1#@VcYEcrF*SLq()MK)}|OSRJwj$Zk5nKEJLXu1C|1J*M1kSySk<+?Q1Dsb?~x@e=Ozwm;&59n;<-R$%& z2C_-DmJwVu-L>x4celHnA1PMQOCM*C*N>ST_d*vfC(;(1PZFcD1zkQ(-JbjmuNJaO zom|PmiB^%oFhmf*?|!$nM)PT0VvO`OTzGX)Jn^*Ac*=sU%J63)7=ggNlK zXK2f&p%lB;ot$%ti^S!gweH+@@ z52h2Blz17Z$eJpoSk`t6XH_u~&5@ zfhAm>Dm?TX;!%%?fyv|hN#KcfeY^IrJELX>a}%JxS#K^19Kq-=;ClF>dXOcJnSWu2 zli-FdDToA{fj4lix&aM<=V1lICPsq7%CgFvE6oSOi;TlPp;aXI#n(NEu^IPTTgzCx z%Gz((B?T=C!t<(Lbb{TpS*5nzY%qqM-nHM=r>EE7wUcz)HdbmZFd03bMRx#BJE$#; zl9HUACtL3AGEE?>VxH|sn78E)^@A@=i3_`Ucb_wLtwpg?sVYF} zDkJ}h_WG;O|AumCp{4IhTxKk!j^Py5a}k8B@{c5XsMKhnAw!_)et<>);I`H*5Q=l6%3aPky90x60G)=&dkfGCT_IwM4DHuamDY5Gt# zv)gF_Nb_UQ$t1-|wTenw&w&UDYhZp}RtRh{nRh&~mxU6oEYUi|Kv!Gbmzq1#Ko_49 zHx|q<3v)aR`itHvF{TO^yz%A|2oisM?3VO$evsc3j%xHJM@gR?kFrF7MOYO$BdCLz zjc9TtViC#zCHq|fOvj#VzeF-Gly>@?`?CD7EvnWI0f_&RlioGxKHj*sPb~BgL-wpM z5iK}9?i;z~>LJ25jIS=ZPi=s;qJeVHs(V4@g4hI?xl#K9_?|$lJ9RmQAE!CO{|Vn7 zAmPCI=Oq`l5UyaX<)Jr>o?zkd4!FiHSW|&&(J{u{Es0^ET4NfKFl|TMM@8Dj(vv{~sJpiYfWW&w2~HuJ#qainlCX&FQcaU~8ti`T^?%I}%LPlrGUpl`j% zs8e*CeUL%mCc-&H@IkX;80Jh9d{`hm2v=Kaq;<1c-dKdHNepTAT;pY`zTw>;b3|5yWd zlLM@#bNh7Ms+nLdIO;a(LILrVX z(zzm>6G(B$GI0nAEDQlS1aViex@p7#;|#&%8ssXEIXqw-h!h!S7WG|>r0_WUg8U;X zSFS(mns7|wboOi>HgUQda0-Ffb+Um|v5C_FP$1b68hY}3*hsrPXL7DqA)s04(aH=o zvF4(XV@>vDfTSX32SW@=nh4CsP)i7XeWMz&2g3bqnBTztpS`xw_@h!_M5^PW_w1n- z4LTG89k8l6jJB{N**3%*CB^QDS`gFHKoW_aJD0H%0AgA9Ai1NAz%BWoXpuLY2K44->t|W#1V(SJM=Wq%eAEL6JaCO`+ zk%(JCR&qWYskdPS%VHySq*8zPc~VS@pV!|3meN1eKP2aq8WP>9ze_52>U;FR>?9TX zUgVj1hj&QT(zl>Din$u>IH#fba+6YpxDTSqKJzS^A;fL1#jtapV^iz~C<(xp&?#4~S+jw(WM#_ak>V3ZTz=hEdE+ZCx!xknCJk!`Uj5de%i%Z+^uKNxankdw~mWebJ2NjJqX=D(OEPBGt$L^ zEe;mybAZ_?1n8k)lti-zDvftVqnP_v46BWnNYmaEi`h$s6S&0GaUknS>n>s^rTY8& z&%1W5IrY>vyTCiqyZ-W*F8wP$Yt8IA>p(VcIsCQwvs}d{DN=|$#z+vX9}~N$c9sGs zmePV!I%Xyp)uJ6%GRsl`E9o*OD(02~)kMFKmLJ*`%@d?ULO2>WRKrT#;_@34XUzGL z0YAsWJ*9>ytL5ry&G@&ng*bow6L^Hlxo#j^(uohr`#BQ$pL6x=7s4{e7`rIkzGWC zKLQp2`Lm7IZI`c~)@>XI*sE_l#T@P3ylo+3ukP*jOcXZv)5Cq^D?J;yr6x2_Pc_@P zlQjAG|0+$u4Wj+)j3k8yN!l{9!$oux{dY8>a`sqhLYmkXJbVm95eKty_{mAKB@PiVjO0AAmoYx*ScrO4icQ`*s+;#jgZEwo+7^Ld663a}Qn#!J zb*qxp1qX6qOe+MnTLWshHll|1VnA0d@~Fi~Zq7wxCBeNwj49jM7i_Y#uihzBU@=fAYu9|D&#$f$I2}Yp? z#I$@c3-MTsS{U)sGPHt8F)EtK9$ErRpb9Z9TWZ$^W6d^ul=PgTQc_EK*qj_uPHg)2vBSqs)1M#~;`sa0yPBJCU;6gXT{rRK>Qm;PGr48t z$LE~?^5~`)Hodl=v`m~_v$~MnI%g^W5KoFmH_x6#M&JHW^~gzs2YBTho~;^CS*`#4 zu_n)5SrFb68#`(2@RlcUt8E-uc(rJer;vb@HGTf^ICn-bqw`8I_j6va+z&a{+)vuE z=YB+|k^AYB@u8jB>HYQnV3}&Kj1Qe`a!~9!*+lHZ!6sy7k4z68bMnQmlo==5KS)$x z&?Chou=ya;N8s6IgOsf2U}s@vrrF0=h|~0P{W#RqCehFo2BJ94#fDUX%VhM~)pEhU z<3X7Dm7Ebmgzx)K2wBRku29OH5Yk9`4hj)p1aqtFF*Ys4g!z;IH!w5v=;$z$a%L5M zdNiPg=czf=)n*JiG2!k6xbwNPzHpZy+@Wr`%Olu~bj-9iZ5Q9sVV{~bS*%Be{zGP~ zbp4MxqeV2}e;we*DS@SUXN#T~gb;%q69bG16vea*I4X`(w8u z-gPBKna?wQU2uBHhti*EUZ~W}3(=7r$B=BA!T5U$X_a0AF76nPGef}2Osd_ zwbH-15NOMVMsHs+Xv6_ca4CU&3$k5~S4+=8RvS>v zX1NH8CbQN|DxQP&U^-Yz>+9gDK96AcUOKR$Cv}vt0_suBH@qVG=KmNz=Q zdj8$xUz)P*v~%?*fBHp#m9(6D!pR##r%ycdO!Cer#8FZxjXGoC`ZH(AX*rGq{&6*{ z&z!whe`Dt`yKF1+9f(@DU7)v}zp$J8LbTZJpu5jmoJxbv#NE z?vMM+RgwQbS9O#rE;M|0gi$UwBTsdti7vcq1l3HqJNnB{k^ekDC8qbvPxT~xPj|at z;%`jKpy(0kFT7)Ds!;b57A4s@DRWeyfQmt@KF6bpt7!Xrf0dNn!8B{RH%}ubWZdXF6HnhH%@(X`-onsyW&e zWu;1mSlj6hPOR;Q-obH#7-O|Z&HK{<6vTK85&d0L*>nItcHn@%`rtt!{L@ctFPtV$ z6kjxQRL7_0sK|eiqk7DenxwM+Mv|(|&^IyGnMOywS;Fvo3XHr7fRrv&f9NF6TAxO!(#oB(T$Q!`|`# z{(WR}CLu40(j6D;djtV!T|O?u$W751GGP1oxD1@kcZ4Z2nf)bKvdrWPrhLG9jVq{P zX==qy!`MfiDMO`l-@!7c82PF`^JOHGtP5i>zy8008K)m2!~p~1xEr%UzXdb(9r_uD zQ)lF``T#8)W`fs*`AdK~2kn_7z?|D3%z3@&oKVt(Svf3;6?bQ3{x;l6LEIPi`P~_< zKG2KVWv%{Fa)5A7v|S zuvyS>h%#`@nwrgSIrW<;vTRph3}>=*S6}Q(S+KU}O^O$Dt2*@)>9K7Q*y4d>8-x2p1I-BQ?)0c=7=zPWy*obTv8_u9tOosvo(PIf1 ziJ0T)@kR%{KS~F(Wf=w`ok@?9iH(j88l%|cyp;Q_^%%t#2rE5@#~w#otInK?v)HDc zJ%EsmX^l9ItpTU8S)QQdW7#;iA&mes|8}7Nx7$wd!j`3If_Us}Vlc}Qm*odGa-g48 zIml`hxkZP+wp>l`F;~oO=Hg!S?m@|0&EcKG>0N5fdWVF=9S~X8(j1+b(S57AG^)Si zX?b+UhR0S3N(~OMX%k!N^Dh`sne4Q6muK1?WHPbc{g7-C zcFNWV?_zp-(&4XVoKaWE4dfcJf*5ayj8%Xo0*-a1niOY_RKd&oXd*3IfHX+M_{U)L zF3FhC;ItB)pcBkxD5f`RVpaEuJ?`kS$6ls#u^B3uM0`J=niw_g!ISp?+O?H;oWA6^ zb#teW{rI9&9v?mG@io7I4c)?wljXDdeF6!O89#mkQ=%h=G!0J;9gG}0bY!YYy}}He z0B^Hgxr?(=YwV;x*>Y^ma{!yCMd{#tHekKYWwC-^Y}|Xq1&*?tzM-%i3u{(88>;0t z+blK3VoXc!Zx*}3(LM9)`;8jgl=S*Q%D;p=Tl`G;z4R?-Hg7FA8 zz0B-lW4hz)q^1x*GxY-DP7_}i&H{hJhrOaW!xt=W*`o@$Q$0@F0ltlV!bJEi$oggN?f6J-#D|}36Px*w$$s(+ z_>ofLH-B@1G$`daVc{3D8ss?MGq=~hkh#4HiQTapZ-*?gySz|K))x~3;XChX5`KA+ zEYk0P74H)W=Q*?g^N_uGw`}t~J#VWV5UQJq^G^8janHY?-^qG?!}>vTGO7E|&V>vY zZX&KL;m0l#rMKY*b_e|UXDM6`Zan2GIcA)Yju&#K;HgUV{tXESyMn75YaHh+7_r(i7A#C{TpsNc;~zDc>fzxp#SU80g?fK+4^lE@9-Ci zc_4YfM{-0GqKF;i#uE-BM3P(ST?ULNI-Rh0Z}XNb)}DF!7Ot7JdPJUx~JFDaVGZ(+YpIR}vg4wY#VVjA7AixPVCC2*xx? zV$+fbMAVAC8ZSqqS}u^np8=H1t)P9zI7ux%8n4L3R~1+lFGg`<4v6{!I{8FXB-8e- zi`9j3&^c9!f-&gan6+iJd-|wOY$l_wzy5ks*U_|YbfE6^h7;G5t91HUcXH!twSmSB zLp$|9lc(NV_m=C=B-fLs|KjmoIzJ6wyQiJ%QS@KaJjC*poB3P|h3mu>^qw<8?s{W> z7IDR52`lCExRmCL0zx)ACy2d8k2#hz7T9z#nQng^chm2!X;3J@k|csGjU1#sC5g1| z;Uze@9&B0jh9qqcr#E0^+3;o_VTCAkpr;b91E*TBC=1E&&$*8*PppuaKYD}Q+B#R3 zn%1w^=aGB$S$w{J7g^Z#@sH#H5sBW#?va7_BoiA@Pt*W=;&I1r)&nQrOXt^TWAzVC zuvar!Y*te;V?D;$J6NmzZ;!FZ=@S|3&Uw`F$-sg>9F(kPE1I3&%%;B?_eH5o&t}}C zNcL<`>yz`F!2!AhW6{dex`~E-ka`c1g>2s#n(rNddsuAPoaW21j zzk%1fo_M7U=eI?GwcDIF*-f)S{~Pq=RJ4vfw4d}hxSxN7;?sM0f2V5BFBcFW%QX}q ztO(h~Fw&|98Z*q>l+5y)q98>q?#rkIp2hrOo&)E};UIsCUG1@Ahx5R)qF4Z-mPO-L z)%aOeC48;A3JG3>yFd0C?JCU}Rw6sQbGXNHYBI{oltvfdMFj z0$u?Cl=24E0C?JMlTAn!VHAend*+;(er{H#<_`*?LKnM2nPqC>Pu8WRlG3hbCTR$H z!?-EAF*QWA(29(bC@`cTrETD@HbD~YY7woBf@+bGo5H5^janKm9-i-aICI_^`ii|P z0Qi2-LAkg<{m9~n$m9nI%QQ|%BMNHHKUJSyHk3; zO1Z8SD?5~Y<+yTG**?EtL1*-QDNb`7vUv<$>L_nOtg{|j5(h`p5tR%ak$hbrP=l_h0q#b!^rKmlT-Qm#DoH_%q`9*^ zjv^{X#A`jO`D4 zwi7k;`(~U&GwMVH=p!7RhdC9HSBO^&r@Ft?#Wak~v>M~;fUHHG*ZU?6@Pzhd5>3*E zGX9KM^5de!qmCOeWQS0xbw)T1JGl`ZruSxz0~oP_=-|URW`%LiI)qyKiF(q=>A^kw zDlYj3k*;L>t|6VKlrPAn_v#t++Bvvs?$Q-RD1vhHTt16ZiA62v>GzVLeNTknnN?i) z{{UEg#DM?+0C?JCU|`UJ!YGC=#uCPFOw*Wcn3I?%F&|)l!(zY^!cxLAjpYoh4676C z95xZQD7GE!M(kPapExo&rf?kNxWmc7DaKjD<-t|M^^7}+djj_v9wweLo^?EDcsY3O zcvE=y@c!cS;hVtsg+GS>mH>}{mq3%i34vdNI)X8RU4n}QuLyn<5)d*F+8`_K(u?n#R;ymIx;^!o|BswJSNvcT(NY0WxC&edaBh@9fLh6gO z8xR&qKaq)(c_Hf~TOy|;7bVXl-zUFL{)YSygFMZg(r3~S(O+Z0W8h~n!;s5xijjiR2IDy6Unbj3b4)*&d6_LUXESdy zUuS;Dg3rRo;+Um_000000ssL30ss~O00962W&i*H0C?J6QB5xcK^T6fEmD>sf(SAv zC)WB-DiI$+B;ruQ#ldcEk$$Xfr^Km$LHsJN;_OE_ID2Mxs@v{l-g(}yXP$irz?7JQ zfd0}pW6lI;4N7nhv;VlC=Nz*lm#4Rbd5ZzhO+=oTnTHMc$SBnR* zi#Ln&aPVpI5GL?#v4aKtSUiktF>Ub(E`+jp6pnbecnpK$%i?iNi#E?YCdIGCAMi3x zs~825isXLPFI8kQylYndSYCxeIjXA`ju4>^1xe*TPoJp7*dtyL;{kWLH`!yxB6?MX z^j6_vh1NyO>`O*tG+3|MBy%peXR6k@xBq+ASY2dQz^%9?dm6O*UPTU{IzrTV#;7nB zq1rr24)GS{SbobVqGafWl+CCizf+CIB9 zQ>AB?o%j{G0?w}ex+{j`f_FFRoiYBzQ)0{HHmG!!tH9UV?7Y<b_(4lHof;wAc^e|lHgz z12bI=zzPoU&&F^0=uW!R>4@TQe%DgP|MCeWN}`FzLJU@H#1e;{coIk?iDXhpC5?14 z$Rvwwa>yl*d}a>$Rs8+g{e$qIy0EbEM_x@xy)lem15xt zEwpl-eSBaOyV%Ztj&W3C*v30HvRABPW2eNjg7ghoO%5oRgN zSRVQE3Rcq0D%P-?D;#1SYgx|*zVMB!oZ&3jc*9$bkuq=nsdgt16oUX3OxIx_lOIIMmQq zc-0*~k9kK#L+MaD{{Rcbp2+|J0C?I(&AkqSFcgO2mO|x6LxANa zFu4UgFCT1b$uAs)J%A&eR%3+VD-9Phk{TYuEi`xN>IrF0003}d-j$9 E07}u5 zYo`CHp6>Ecl9U910lu2u9RTA$Ij;P_`hWfZze!4}D*yms6<;jDe=wTS`z9qWA^F7( zed)AcU;w`Xo0C#jQv2er008JH006l=v#wfON=-uy0D$%VvXTFSDajSpZzVNmwl8k% zOV|2>amUM>s+qlsBLDzL^5q}-h24TbVsvKiu3tL*e{tRa1Nl2~xuv6(Jph34pIrk0 z0B)hy%v^3|;_~H-xbS5I`+tFIW#?u2#jOJXnsWd^X+Hte6y^^L6LSE-tp3a9D?We= zB0h8f#~1m&#$;Y^uPQ_{sTA+Sh9nOy~P*z^}S$iU%pO%xJzdof4aB=05%+7+{M?J`!FGl ziH^<|U%qw}Uw*@1yn(nZg}H}?>DN2koqci8|KTB1)#poseE~2jSTysW9J)ed`cF>( zD#!L*Gcz_fHr|FX{RWL{Y-((2Z0-p^G!vK@wZjXPBguhc00IDNsE}U)0G?~5>0Wl< zG%!c{vX-TYOa0_3mEdiP$O?nWel)<2&@AnWrAj~!P%^Pb;>68nRs@jb*pjuQszf)c zr`sSx!9|v;#?p{gjG2bD3$Ll5X7tG?^mY8UA83W0&iJ|6&HuRWGwpHN;j$+KQVB3% z2+$s1EdEyDdkUepV4%}HA;0swG_$$ku`Vppa5c^r_QswhtV+aF@yL~?j7yJ;6qpV-zTn&Kdh%#_mOrg)2>*Eg9qrS9RmoVn>(E!GE|BdTw zaby-psdj&49OBe;nzC7`RKyd{9e(J6)+;1^XaN}`#ok!b|MV|&!~gyF$3~gmZk1{5 zdFLqAsY*_;V#YC&-mjJiyz1N-x(phLx}R<_ot^2Bj2+Dj^XM>nUf2&4O-f!V2_tUZ z8^f{-yg5E+3X&h8N!TpToa8_MEI)MIzQYJyjArY@9u8=2S33$Z^dC}majou22-&8X z*t69rgtv^>GvRUrRsAyt_8p;W9K9`?G5#7jqP)Son}i0UflI>TkQ#>qPXFH(NXU`v zHClzw&m2}j9@T_i8(h7^?}&7Flo3$}ms5thj%2%Ih$D%OY9QhYHVY;G)d|!<PB-k zYMPo_zT3xnwcm_3o1wT})dF4mk%=C2MWx_5<6kdlH+Yi~e{W==5>OsWwIreRb?RGt zAla37CD_g!rV9v_{Xii~(98wr{)^*SzF~h_$@+A5^?QPf{^x#4u0lfSesY!sld#gx zr+dzj9}w+d>Hvy~#01>@Naa~Cz!$jLc)$1Q&E^}>l&{eDfToXqAPM|_5#WRm-_q4+ zZ)=6(QP^Mga09;O*Lw!HoissvAsAH0E)vQ|@+SKTt#T41w18q&&3tkX!CDpKvXBM8 zqpvklAoU7BEj7fECS--(I}t-IcBVN%#6jHcgr%q;53h%a2u=+T@-;y2tb)X>z|1YI z7SUZk;kzIoh{A*FRP~De1&;>Te zSW#=Na4ST4L?IC@@(^l(izWQ}e>u?dV|;zEAJjj{7qgfd6r@5V#%^RTZDwk03^0N9 z?C-x54iqKD8&d~dZ`|I_<6`1^1~k=|zRQ^zZ~w4_*EBZHy_^~A?>8Op1gnXM19WL1 zKYZm&Gh<_8%RpmXXh~?)c8ERJ;AaLzn80S_5b(rd2=i;U1=cq!M9Uguz7bccAURQS zQCU$%Q5jJMQKJD_QbUutqB)50Er!p}kf|N06TN;MNR6pNl0uSSBoG?*a9UHa>Qhgj zr=O#rDDTWqm$#3fTUsdb!aR4zpCNuI?!qhA#=ww$lx*V6E#Z=@)O*^6FmVV;F6eeF zNh*?o&|1Ke8$yC?0UQ~qmh-F>eg$hCwZ08!TLVR z5N-+`e&<|&>TH9fd1fO&4bk9Ydr8UJqA2Y8bkd^NR=2e>?sx?Y{0D zw7jFOI$&_9w{jk*Y%+^KXXmIvtgwuZ1RCI(9OVW8K=MJ=gF%7`0_FkRfD^zq;1diM z3>DA}rU(`U#sS6)CJH7FCJ83`fH)M|f9oX!l`cg+QPd42 z6o{aq6%yVx%%-fw5uaaBa6^l6rsn)%`>@vB&Pg#&V8kLp;z(* zgh>GUe>l^VcKTb2p{M`Z{+%YNSL5`DL$R+Mn z=LE`2@Zyds3rk66*)EfCnd(Oy$Dz$ceM{18cL+P$^Eiiynyo!#_QwM0;;j>=T;fDo z16O8d;|B%CK-jpehFZ(y)7pSRW7Cmw2;Ec=%nDG`3y}yJg$i@7jRM4gg!W)sJp_xE zLIBUZzdrs^BJwPaKGH2C%l|LD1$rcDxxbK0sp(T7N5TFl;oy@F!GK2}t{mUfSoj(a znkm02Z6KVun54#YINnST1N5Hi>x@Wh;Qn<&Kpt%K#frE0ft(RAIOm3y}7+Vi&{=1tGls< z8mZ^|8g&0#6oUc4K0g5{o@+yXFuwR2t&i*0Cp}#gjf`63>2~yuX628Y2*+?is5VEG z^^mZr>8+{CSX}*sgJQ;#?y%;xhTd=t7UOe1?7t!aqz`-ihtn19kd2qu{=@^tvtvu^ zJ{KK_Z%TyW7Lx3UU9y=#$JqF&vCt@Fh2y6^Emj@OO`_~wZ5|7AWM%|U-ck%b@9;8M z!vy}|_?tpb0;-x3RT-EWn<;=%d==8Up(xI*_q5EW41snniEcZxGT7Sg#MVS&QQc?;8 z#mj`nZ_<>s^SjEexME+$Hpefp-(&kTGlf~vF~UsT8wH9FZB=tOQDcT9&EsXyY89tv zR=m3NYN$E$pkEYQD?*5{wq9wG*XSDDqg)4=Wqh>`s!H1AI+@0eGhz+oVRqak35dsY zccd-i-FPTTX^#n$GgA>2$5TURyU=TS%46pLg;^w_*i3i!6+OIXH+dd9WRB$f38PXI zm=KjB0=E$EW~`#K8szCvjxVFiU8OzMx>HEK_~hcwXd*`~M%Ed-d>gdO`{!d?$=Yf9 zB|@OA^w85nO2Rk!FW?81a*KsKQQzS@uF467=omlku3h#EK8_oN4O|!hY4vD+obPH) z^mXiB(=&w<4;nN-nK}dAi?$S7X(q0$x^8aWEjT;C0=D_SXY#_(J+ljwWN>4|sTW0{hUMZ;G95#Ci zfdW2eSCw!~9xL>O{p2P-5%g!Si>~V1XmwgU_m~87TxoXjQJN9#w5ut!PXsgiGL~H% zJFXg`^wnf*0VTkTstQnz7Ue%j5S0rXEn377RcXE2Zb~w6)_URaTUzRKa`HUw>zhnx zwCF5;0w;Ag{UBQM4{D&azLvP_z(KTfWd%l$kf#|X7rLzY7y;WbyT9{J*HY_??E0>8xI7t`af>j$9)_J}4o`9W+pYnP?7|3!ksPDejV~0h z`>C9RI=Wn>b&yaPSRoY!sk~2EJus|)s5?O9Cq{4pQglDvRl5)r|P3K$XL)t|vG+rj*@y-0Cxv)p(r#TX5^@wCubu)~C&B z=qQ|4fqazf?#%==+-5Im1)^-T6Qt(y_Pk9UNisVXrE-!#>v!;{{zE#pz$Rk}p8|rx zD~V1*-PiX3MTSZ{vh1fyYqWXMS0?qa9H#OnW`t6vP22_npk!!wj`j$Hs=T#- zl0ero7#L@=87Q|WN9sjQe?wFQPp$UtABYg4CVsOFo+CTxd@`-Aom6da`xmX}_h4o< z)6-Toeis%nzR;B_hkv-$us6OucAbtkwLec!{O?BYV%F=%beu1*wu*``THo`fAfpL^ zOX=A6+$f{&<2144#_yKbtoFq<=w{;1^QaxRNw5C6dqJ?)A+Fzx3x0cmDdpf>F8K!M2~1fhcjZkP(fVI81EudS5bL? zTuq6nm)wYvB{#DQ7Hu9S=p4j4@xd0xXgn4ak;-}|bK>DNrTE0hl^f}kcjXq#B=EA{ zmJ>FVOMVdj_hXT+nf|bkfsUA&vMp(x(wwxz=GlnqvA0NHwJ>+}bry3_IeWdI%PAp{ zpk;tNn9tlM(mbIEvIu+@Cisum1%^H!23(he_J`R!MvRy0k&0o5RZtg=XZiDkAHWE233wINms_ z92)c%8uI3K)yt0icy~`1u-~PSJ?_C_948Iw;iCD)I>2rwN_`ebR1l9h(Gl7w!gyPB z`}jI?S-~^!)EKHqCdH&(oC3Bg>BA+?hIA@L#*{6pjfPoXBR>xWEuENKnp@Xho~y_N zsrxChDynFSxt)WhEN{Qih@KCz+ZgAMD z##|EKU$;y=6e1kz+q;}Rb$Zt)bs9Bq^PHAu@+b{Vi^e(03xftHhdE1Jj3>);(mMD$Ty~Z1=is5eVtY2d>khoi zDB&_R_+bdPp-d~1ntSi#F1@+T22X>Z!tsjohPKm6c5=fR3pzO?g_0Lo`i;cC%sxAM zjz_O_X!VC=dI=LMY`~&XnjY;Z#=6uD0$a_sE7fo{4b$)(9FXm{%N)5dn+kP}c*`Z5 z@8>2HoB_BjWNVBW1sEhaeT6(#lK?Cvw52Hee!s(z%8Y~RrV8KYKkR^>mkjGS)6QC~ ze&+e0X)lsATJNNHlK69)_q16aw~pWXNVQI|_?`5_91?_AH$_vuSs6WleJMx^m| z1%106%ZA_5h;XzBYd%$Rv>4fwWHkP@L*m3M#{tnIe^F_Ke!p|x$m4QxLo1^F?jdF9 zmP7=ti=9T!queAyOLC|Iw8dS_#uZWi9^P=0#dX5wwh)OTE~KujIhy2jKubPjH z#(cq0d#O>U>31Yi1S+emM!Vt^Lf|(NTmYY2_pFeNtz9!JEb$IyFhsTN@@^l`%8C6!@hyGLRiS*x;hjj+`~NH;r|uY#Kss$Lm~_ zi>?21o=u6O`i3@W@AMIij-Xaf$T|w>MK|$LY`ejJs7&flmB7#2%6rWWyLo6MoCBo% z%Eb8p0LxD$BNCc&T2F9^D}1p0!1w)Y3w(zTF% zpMUPh%+M+^!0;g!`>;$RnyLDih@+MN`kvCzHLr6ZC4D6BWq_Mjo!F6Q!J=G^6$j5MX=g>B4%YFH?JN+n6L+8-+x&-m+HP-&oQ9hAhPbfZ09!gd@LTG(45h0d~#gk z_V`FXZn9!7d2`{Nw5l2r*0Om60__NDqHFmTB>Z6}M@nX{P=?dU{RxVz(DDrE&XWZ% z{F1v6HeHY0N=X=lJ>>_y;w!P_+5Re9L(6eet{vu;iM*?ykr|;EF0!bKSKJ3l=PGbm zaoRwQx5zOOZ@CXX@n-nk#hpsIuB#bJhIc>Qy|zT*?YC}_*O8KXY#fc9B?mII+C62T zT>9o$PdnHtqP~_BC)VFo+@v|foYip4@gCP800&O%I-C*yP=XI-jkhUpjd?-%k%zuW zifnnvb>g@s0Z`F@NB(A0OsW->QkAq28RUDEDKfy?wg}WSFVqVCE2$iL!WKI26eW&| zy^#kkrH>XSjp(zjqnt=?(hY8AT$P!M3{4XQTN~^8!~0utRzKBTG1_F*!k7jr@kK}| z{Hy8H6zvC|CWr5OmwJ0sT+YBe`mrQX3`+DP zJ+pF$IOy4W{WaC!%X&fD1m6L{53{mlKq7bPM$*B;YzD;>(hCef!EUrT`ZC1vQftwN zT7O?*pQ#^afU8x!(%P(#&TUwlz(LLww)m3=Dy@GZD<3};fNQ;NL{;{zr9uM8Q2s^eIOMsl{6(K|~na~^Bw#xH^X z{XD1VqhVyJ)1fjAWD6-d_NLE0cAR>WiWE)Kr;SwmQ!^vXOWXy*Ucd%M44Iwu zVkUebf?l^}U;EVsC`*Zsm|hUnt<&`RT>zssVS7}#P5L?J2GF(fSDU}?0_7pm_G8%T zhG|vSU-c%ncuS=-eKMc>va9x;B02K5XJ(4H)+&se~%a z=et#6xz`TZdi7b5eLsX%kkBlcZq?M8QI!gxgy46PlBlt$0~$PYW3*yb7BEh+Y%eGn zmOdlOYGyTx4Cgbi*Qj6Om?cJKYFg&3&#}sW+u^Ui(2q_bei{?{z>;-OYAPn5z!}sK zISQubU;?D{cAa)>m|h3SJUL9WDhWIoE3Rn~&#LrKermu@0%8Ulb}g>iN^ z@LQ)cS^U|?0uJ<#Q!3J%kv4oyw2;eYa;1u^Q*zKmwDZO^6g98gkWTZ_#@a+i{yru` zRl+8PbalCXO%4A8^Uk2x6ghB_xL4%K%Y{DX6oG8d&wcZ5SHwk>@@xx;e1_$@lAtkJ zDqn@@VFrd;UWmwTBHAFzIjtV5N|;9HG45X&yA_W|%uw&pAvw>x)5Cq3ZD&XUcO{TG zP<`Y363fR~Qz&cR%M&#s2~tNd5hAeyJ9Ad6XUpQ8O=KfcI=V~B;#E0(@WK$uD@5Ul z!>RpYqd%Wcf${Lx?8G?R0163zJ7LE*FkG;t{5ZkTEYJ`eakK~J^!a-1>$>w)!lb3O z?+(45;)QzNhpsiC7*==lIE;m^_%=FWWKx3?_AXD`TIb&rx^Fp$1;7rRsVtUwMw5ZO zU=*^3>du$RxvV;OWMvt*?A^|?*;?SKS_~pdfc-SW^UoD_h5J@MFlydTPo2xnh`=Z? z<4eb?mkd(q81lsG00^vY0#+bx(Z;Rhy#c3k<+ zVRm!=0P6>$83)S*N0dpVRYV2$5$e6ZZY2nsrCPyn$|jAI)vn*;9@h{i-l6{2PXEX1 ziqER{(&EbN;<&4N8=YX!W3`zsw9lxl{VH{)I!}+s96r_lo)ZSILU{JHu{BO$FJ^Rc zY?7?0`sBjhhJ3ukIHuf0wn;+|G4i?+E*gb2Wk2t5VEy+oUJC<6c!Hl!I!jRCm$G6O z9g;*FZiE7{Q0j=}FeY6xoHDCA@O3Y_jEBc<`vUd*G`R>tGLeor9ve>f-^ZMUKbjqq z`stLClcUMgHw{)S@0V+Hn)z_Ns^aenxf1ymOh^J4 zp%)9J+{bH5yMROJf;f~S>iry4o+jr<(V(b=hr6X0@d*p9`#raYC`d9R#^&fhN7k0DpGrC&4%?v^ZU5q zVX0OYpNz2sVJ@y_6EJiXlfY$?6xnhp%dyw9dBHA1| z1MC2Q7kmtonn<)P8OG=Ftti*!G8-cZu5)Flr(u)K-TacAuFj>)*k3Fg##}L_BNcAw zZjY^3)P9Om4A{;P?!;T)_X^Jl^53cliUeL1D7@P{5?{NtCxm3xZAfL56;t%b<*MuQ z1^z(Bi!9XxLe(Ktog+a;g^%gY@u~&SrpnLo59idVYXE;62xamYUb4VWCPr2LI%gMe z%n<-|i4X-8R~!jz(=*{ROAGQ1zJrU{Bl!sYRh#aa>(W`Byjy+6E+4g)t>bp^?VK_x zX|GnuGRXT^FU(87`}+z%xdGvKck%qMpS^PZD{Zwg5Z{+pm~|Ntj_@t-k1NM>MRRau zwnwP=(i%&|&q&lvWNG*P8X%~ef*el+vZnJCyrwCaciaKun4KM^l4h+&~!L`+d@ zXX~cor)?6~@`V#vQkB?^y|gT+b7ax|oe?-lMt4U>lcB{`;^THlB!naRCuqShlgEsN zFWBtl|M#Z671fW-dnNK)!bbLprWowf8hFz?m&Qk!V}a@1W+Ibcm%7RM|aS*hAH8cRaC%*@PKg zyJ(uf{vXEs2Pjw$j>m8N^qbfvaS=v_TX)yY(P8h$lfxDg)mF6Zgmp|k8DpA3jrIqVis;4 zr$ki~Ki=X0+!DHP8pN@>_Jq7(4Inxd=gflr?OY>$ZIb^5CRZ=%%V* z-f1A>L1?U&met0Cs;G<$Z3Ms}G&H$O_e0wNDVR4Ra4TX9Vpcyk!Pi_CJ05bGY-d&@ zk6e8EjZwf!bNtm(G zh}^^nCY+!`1a>y_@ZHCEgP`~7smd|GHv))OBn2b}w7c*oq?8NCvH6Z!zVR(At`R${ zk|%rs-z#;!s+q@c&69_pDc z3yxM%Q_OO)Y;HJZbv~Q4#iN<}qtuY|LQBO)oAe8h{oiA{mx25}Q*@U(DfJfJ@92tF z6%7l&7N5hLx33rTsQsmu_9ISpb03qo_yf27HJsHm*)-^jMTy=sO$2AH)l`zZ zk(pAs%Zp#nYz7esjLIHGyH;2aa|CP?!fnb)`baqMS~ZEYg~pz}i+kC0!n*IM0bms= z*UR!#I^+PDlgAx7q%iqmyi=u}wPdz!jmOqHqD(u@+K`07xIeMjDjeYr+gT>-wVso5 z{lcYh#-N8~2SIDM68nM=1`pp#6B84B$a_ln=!Qsd^8(Qi^ill`&7 zJjy;y+lR@> zZfB7r`3=k04uhZ8pXHJED85zeBmN8@gb&ofB$BD}3380F)~l2EY9Pqodn4ghXcNKJ z>#pA`SNi<7b*wB((V9+fXBg~+#W_rJOH*h5%WSc=SvIE(3&9MCAwwn{QWcVzQtgqp zk;Ehzg4dBT_Lz>ZER5q-1v?`ue}uA`0doK#M=aO1+9FWcRykx$o{Te)SN*-%uz4ca zhnU={@Q;q-!v3DIGe-)GbUZw9!)GMRv3e_g+xa&H|2OM`lMLgnZ7$``h&f(d$w^M> zLt>MXARq&HI@LUwZA5!neY~FJN2|v_=3?;&vj?OH;U0}Ws(e_G?AtVi6*1}x(TT=X z$mLLrIpW401@{g)+Jb$gQ;ON^<7I@C@38PtFQvfnmGQM|KFr=m9xrKfV$LC%4Xvoi zY@IfnRntw+m6GT)+I8S_zr%%`Kb_hXqDifVsss%#$hs=8mw$ioN3_h=#yDq27XVe+ z%9o8!qjC)~Rhw15ZETtG+bSh79V?;!l7t0uU?7&dsRqtR69hV)yDQ}HQuUbQ+`R7b z#g))Q3FJxnTV9KwL}b*p1pdNxsn9>vG<`@ap^)`Bxf$bVBQ$!T;bTFgH?X2C8nZC+ zNZ&uZyIeo9rPd@;=yjG&)rl-`BM&9K?kP-jcUtKlPvgwThF!Q7amBE%G?m9NhV;kWd>C(vU zkH$fm?PxwS3qKHm{Z~1!NF^|q6uHd6sx*NhT3t(!RW$}_*dc1&5+=b=U6 zK8l7trF){{FT~R5$9V0XdzgF4YoVI&Vd)~6@{s3v3Ero{Du=R*LH+K#>u@siT{wDH za^tIv(dMBz>5M`0@9I(F?8I-r4*u9OO-=S|=-gIRl8?E2u=bm;%IZ+Te$!(GHkV2oq5 zB^^CsXh+cWJS04tk9H$_TMe-&MYfdyb=|8_1F`N^9IoMejTw=$YjE8@dD006Ibxd3 zSmnL;j>-S*Okx=hh9hsB;_E&RXP1avZTri$pa-}Q+;d_k;+4e8#knHs-gfnBLSrW` zwxL-anOz)__qQ=ILGQ`NsIdU9z*5a6^qe-1?e|_O&7>+;W*wnmb11St6GaJlV{^a9 z{44NYv0I^rEeUaOY}*Vd85+?qL+c~liu_Z1al%9KZqC#?2}fV$6H4UjGUNp zNL3OInds-7EekUg+VG|GJA=U|~wElT+B`mc%TDG8(q6OjKNgb?iN%RC&PZX&|)!$uZjUB@o*y~fJC!iy& z2oRSNLGscD_ymbUtRRSFh!?3}27>L1FxpQ#-I!x0*p$GaMx5hlHa%j-jitqt?rxd;&AHmw>sp)<2 z?4L9&UNogHc+<0bk7^>$bm6*A2^Np!@i}ZPeCr7?O(4dG>#Dwm5HzY_g9PyV2>wBE zVNslj0!KXoNIJrgsi1ao)_DF&3?#)E<`Fy}LH$A3vBD{RBMZZ99%#&A9| zKkH_$OSskeN&D-#IKeR%gaV>?;KM17b7DOmC?Z+#9`I$(FAjnT4E!O|vD*kCc@mn` zS(y~XH!#oqbP#Azbc$|HZK_8u4Yfk!p7&kB%oSHdCXh&H<(p*9xA8bEbdA5oQ8Fnc zj(OM0FFn0tRm?m4wOTq(d2g2#kP-E<~Cl)?Y{{xPkwtBTuzjODs6uxa;{^;uy9XL zD6?okLN$~2qZ+DgMKWm$Gc^9kYl1qTY9;>JJF{a54vcIPwUo>}T=a2kUoTB!D*&~+ zmdhRRq-8&|Ry`Z{No_BobrA%=8^V{DRv|1ym1D;K)fiqWh?tEcZYchT(fZtJx74;L zmNr+>Rz#E8{!YA$dnNuC5I^n$+3^N?BQz z!pnstlBTXqL0j$>pLIj)yVrG%+WL9h?{$vGIj>%_yq`<_dUPXLjo?0E$%7u>zZb`R zOt#r&vRF*#>fFW?dfl}@_l(wSwS>u?{hA}Q4Qk;o>OsJ|KhvV*+@;Ie}_8f$@0keIC4BrL84C>jdNx567D&{Zf!CdqX#5N3rf zW(tt8(FK-Ou-GywxA1_Axb00mCgdx|>Zjd+u!#xS^CoYZaytemvg9|Pn>4}tE%WjX zRsH@&wNd>Qj0OjIZ!nIZQYGhbd5>^#YsvRogjfwG@}wQZm|Nfy2l0}W^@fdSYpw1> z;3MRLU+_T9)kiwSLRhD3A}3z7x%a>98`Q6YM8`a@GJ|gkNdJMIofT<*tNBGg+D+#i z0yntE8=WopuAXD!M_5bTDc`7>hBj=IiEpf-K+Z{F9a$jo*2_Dk*n1o!B}uH+;GELK z$mb+Mbct@4Vq*oFW-*u(!P&Cj;XZ#CMpP!aO>dac*Xb1xb+%?{osj%~YHi%TOp~4UftvVl@@z4g@rVpLRJ) zgSlg~Z4Dm~i2QBhEN%RRpGQk*ByO#>r^cw-U)V{|MINN`$pMe?Qp{xIAWds^~2r2KPMkr%F9&9r=$DR44rL^sxk6t{4eiC#&{3l3EkEjb*1$ij{hOpbJ`YO#rHs`%6H(J8{VEz*zbcBo!S;mbWtnQV z3L&S|@`2u`g)!|<3*QMUhZBap!ft6gv7Yb7`b_mAg_ly)05!f{wQ zQTaW?cGeOER)GUFx?>I!;8cTpLKtyM)FErJcisB6!@=cIRp~`v7Np0i5K%FhZ@Q+c zZ}|TuDkySRLeTg9nJ!AFE!#exgZ3C~p=hkklDF;^bRLxz{mD!Z`)v;;YuWpJh0oLg z?g6`j#-~^L?D-;aQOWZfJLb4PJuPZ>&u4Lq4bl8l#TVh9RjaL#x2?dw>}Ul|N|Pl* zoDZb)n_6-p!Tv3HawI4?qqAnNlEEw6mb38@S3~wL%1fZ-P)t@RdyA|Bj)}mIUjCtw zh%SXQ1y@8}tiBlsSmO&4CCW=sd8^7xEL0eYxb!|9Y^GHc4RiH>|HZJU|lGsND%twO#t~$Y&*ur z{dESDTpDe#3TnH{?~5jw&TsjoeaXwgm0W?^vIgR9$P)oEUVjc7nS3ECN?XwTx{6F2qaWw;=V13B)`M^>Dv35D#uh18q* z5R;x&yel<$l{{j6yLBDVml|k)8Ic;R8jQ_U!Gt1MZ}VWAUuh2&)!=ciMcJe@nNTU5_1=lNt3DRe6G2=)qXp&@Kkuv znpwHn425J9&0}Rwj?6as?pvj#aV4#cRc*45{_JXD`$h0UILf&YcQE?sKz%m!zF)re zL-Vj5{{Sb@Cnr;KYTj7dC+2Bgz$%265zd+^nxdjq_!lJHyk{iym>1oOq^8c=SRCD{ zUpOTSr_fM71M0`eCIaMSDuuj$NBq0EomsRsBLKPg>xc-Be#A}UicGZ*Zpm`!_htja zIVDtr5EcRFrp~T_$@AUL1A0clk;!AezOtz;+3SIk^WN^^{za&|=OWw;aZ_ol{{-kz zskd8M(bwnkc0!Z&R8-_I1Kt&*XlLP{MA)AJR*2yT) zAF3km09(lIqdo3xSMFEi^?^rBzR=Izm-I$$uT^b~(> z7>LyT2Gs(?9b%_$z}2^K=j+F~Tc*NTHcJVYEUZZNx^|i@jXU}WomP3pjie4$mam+{ zP4#?>l5^ln`Xgo~J55QYUbK@L0=JI&c7wybxV7GbCr zVn1)ym1ex2LM3}Sl>wruGz@>M4UAcrWti0Rsr~P>IhP1C5=Qk)86 zk6z?zVfXA0{<$JNOUngDl%p7M7_K0`6-$O%%7g%3t2GvjNtS)-Ad5=7*~h-htQM>MJ6oOZni_rY4AzkP!XN#f#zoA z%eIsAf^1SOi4^THiWaSKS5&R+s#dw{LeDFxN^9lN+lbmu<0pG=-)pVSr+<;}4AZPP zmcc0E57oWP3u9|^uIuVe!vIO$XASGH<#TkUkme6jisiai7QB*>SuRYSZl>}`GWy>) zpJK8fjpkpK)iu5|`;WeZg20az+YgTj1S#(tcgu#DvF0H>sb}_Z!rfub)tw8 z2CP3sPJ7GEu|;RQa(Ab4DV3B8x4vcE3S|U@_fAlq`3u6}wQ~*THMX1GGSyRMxcHl% zg$o_dx<%^-irKxMr!jbn&k^>K?c0_!Q4o~r2SVrm?eXhg8`On)e`dODsfl`=k+rKl zZ>THNB$~nDvb4477;G&2pBNP8?FMMO@0YqJlm*^<`AX|1*Hx6skX-)U%?Ef zRYA)YsE`^GK#J(${Vq@U*FloLOMWqLeTpA0ji&63B+S0>&e($aV%#v2!o zN;n#852q?MSpEH0BH*b-Xd>*FmusL_Oz`xiu)lt)55;WYv6}XLme#7$WSM{D+A%tr z*E;FWd%Eg${!OdtC&2E#noY+`(@R80x;3ffro|b7H#?A342yo5&Y;}A+jU}b@FpvR zAYiQ$d(x2Z^JQ!dc9&f>>_UwM8qoc!1{+kFikoyG`1?>&-kM#~Qa zQ?vYBf8}%Liw-49W1N3*ja{w?T*HR?GpSL23_(@*w z$n3HoOLT{*fGgHx8Y$apU`=8+#9%@AyIRI7^;Xw9l&SWgSnXd~-ln{Eyn-L*GwnVg z^wNl{D#o|I)5Agb!@-Z2uGT(Nd3sg?-w4j3V@VrjVe3Nv8dA%Or1!K2ZL7jGf4o1f ztICtc7qOKUjFwWPwpOJtOgKPGP1h~TfMyk7wgdkYXJw1+X#Go1U3Is546+}ueybAv z!2ai%aN39>XpmGqm(@q=d(Ag(`uI^y{i)+`e{ks#maE{V&D%*uwm66MUhnD?t4%XA zBl9r+CoC+}(<&Vu9CyBVmMF7`*O0h=b#()CP=E}Wf^e5Rpz2VOn5yOR7xsAgUks^};6cD>vFzEE=*x1XPCY>;Hjln#0!O6e1wR@Lx zGcQ@+I=iss*a<_bhkNYaS>fi1Bb58df7%k05^7JGNsBhy9o%(Xf$$XNk0g`J55U11muQjwbj^GKnHhMvgEQD%NK^RYbW5(x zj#c+GaRhc8xAH7+p!4V>Hr#7!s;{1AGDRb`bNQ`j{N>IQ&b;BabwfsvYf6rrg!{1) zAT<%1U>-K?6r!S6BdtU?+pve83DBw`7EAF0%|Wpm0MW!!0KvO!-C zab;UNRW@?E#@<;q#NP}Go#fXjAGav)wGg6Ler8_*_L-$@RlZZMAji|ayoJ4yTP{4! zbw zp3joTAbaEAeu}hLm)EsK#FMYO;G~6T9yvk$n<`)KXD{d0!ur=@-I3aB2ZrONNj}~@ zAZc|JdlyRPEHm_JyF+NoN?B6!I;DKoFF1JXo$P>B$PAovKl_HEvMD@FZ)iv^HW>Ab z8`=%JlIqGP;Xkb7kqa)m$zz_~-r8gd?6`K*U%B(e+RB>hXuPhWGFnp}sm>RVKl${d zTNnBROGlh`()lo#_xOjn0L(=|pN?87glUDD_E~zheU{9j0A~4g#xlU&_dX(XSmh<< zXa1i=5Vac91FtE|_$R=|_AoCpDGNiRnQHBvXiWPdjaZO~F&qy7TW`kd_T*aK9yNUq zm4FEha9^g<;9Ru?t!vL(RUp!sg@6$Zb5}Z|VajIHxoI|?yMH!aiZi&?AWe=VV2gt06UH>N&pHq{g|W*ODo(D^CfvtC~yO^5$Sb zr}w<|Q=+x(m;0ZpEG}}`lsPK@+zm=*E#N^T_6x(Dc&(|MDU%0}E?5B!YP6@-ZVebD z36-*U*N~F8$@N$fcI3q8pX1(s`{!S#vwEVZdD?6~+rTBLOrv@a_Wqy^$WZCsn=T2^Fm;;wqDdD+`Mw zdR=5hORqQ4W(z!brxNX`fUEqU+Dx;ioxYw$`Rih@XRiUAC19R4PkmldEK8xOrz-ae zeLDSV8E~5K1$@5npH3RL(9%eI_;_e4r1)DBRxVQ_z5`Z)9geRf%p4-Y2PTBri_H;A3oLr%t zAzG9(vS$p%LYIQQTme{!V+@QNr0^{ajVh${NHDk8rMF+eMw|lGh5$r>%nn47h{)>2eONDXHm7>zZu z6W%-XXxX}VARWqc=}_^%q5~G4cK`+nV&1P`B~Q)-P$iMZSbRA`E&+FFr3ZSg@=CHjYp`e@U1TNp-e zYlS)1#b~{qG(0&#nacUmS=4ztORuP!bY$05@?L{JQe(l_*IaeYp(x z@E6L-+)bcEvA<88Nfly{qrlWH1){P8TUP~;G!A!ecb@_WWehKkCu#kB&{2wW6|-s3 zr4{J5@#aZqj%s(1aa{iCmyKC$bG1%xT-$!KYDX!^!lh|kIM!D){|yPVrrz8c46y7&LABdzV@Ley3jty z0lkQ4nNf|{xpV)!xU4OzZ;jXZ`Z78u?14A@;GE@&cIK-xE0_iPWl_2O;H-8KlWzd? zjka%EFzs-=^TK&ES8p^_REAm&`cOrAdtY9BYVO(-rq4h8nCW#5jn$D@Gs=#K8Ai8| z*6O)H`eu6W7>wy=G{){ui`|TKu$Ztf4LzlpKTTC0ixZu$q?WUr`GA*`{fr?W{(?Ib6?JPy`d`B`}ae*UWbOeQGX*?HsvmezMGwR|t&twJWk zoTzrV?yOL~CbYzxR;fp$l3|yub7#{TAKr6VF6y$o_Nk?n#AZ*oL{iVxIy(bML8FgG zkYsZOs7Jla>o4h6`%}Gk3B>s@X^09_N*BGy~j<(_aFnmDJ#IULl=uO@;g2LHWMJ);)BKLje|7yQT@iekW2zLM%!LbtwK!8rjSGH z_;XhlXry@?#>3qRyp33Y;lS}2qO>0LU}G^2HWrTHSPV%_gaSmL9SP-NZ7A+s*pcB@ zbv%CpUga1wbJ!pqk1PitjPQWDaRGuGGf#Q7$LKsL~vWu_zqZy}*MU)tGAr zQi_jC`nW6z{rgM}7^da7he;dVs@_v=2`Rg!hnd4-FiMTJun#3Fc3MB4&pe=cr9dZI zv1(I`#ra^baI8gyy+ZQi)D0o?ehl*GxBxPlpb^POH;NT@h|ewy*pDvbA8=x4W!Z?E zXJFly)!yN^`mlTtj~vrsFsfA=L`|D87_Cm1&!3;0Hj%C~!RMX?V@z(xI&`puf$+F>x$1@f5iA_E1qR9e3U&(*>WJabKTAVP;Pis>m#!j@S_RP<3odX z4MMX6Y+9Tb?FCYxBF8}*2?c+@Mnave-s}uuE`jKfJ>EV|g|LJRz-j>&w8=J1H}m#u zG&C4w9i;itGlQVkz72``z*frd#~s)$)asfkFO^_L56wk$q&wC4F>wSeSd>-4s9c)i zoOAj(k6fzr==nXj9i$omAvurKW-|;n3=RT@(Y%xChn#?TOSc{(k+{}+NO_Z-8$5xw zmMJ+CJIhQ_MPItXjB>rwtMwi_oCtq?4ExG#dE&8Kxqs6-^7Q}>y^9T`0!=rA+jEPAbmR;$YtfV*=At_u4oL2;Uogqs1$U_g$>xL*C}JeKTLC~U=uF& zaRjQ-N&Eg(gmq4_m=HC?l}lG_wv(pt%B$v&EpAGZvx$EGjcrF=ct=}xpedf2WH6`uTc~H*{uZLmiE(qu=t#K{4`ezg<+e>?s1ZvW zClxL|u}|M52}60G~+b0eN!AK{&t}mArA;1;W`EqqU?1 zNs|A6ND{A4BZ)t7@Qpx4_CYCBeI^^b9LIzW2v2I24jv@9U^^O-ga2%(ThiJJ7&t7%ocAy!wpN) z@i{YmZZ=fdi}pz4)-U#i>y$?hY)!1)x@f(_IeyjOSwT|ZzvtfmR{4U>TQ_^ru`JdY z9oWtPU94lm%xY~`;ka5I3#Kb(qVrmPFPLRvwWzo68NcJhD_MO3;T&$_^>&Zw4WVwPnQRds$W?bfb|6N#hsokf_D`uS*PeHUDV18!DcUT- zrR}ktmQ&V>+>TQ(l+SRHY{RJ=$Wf}odD~Ror|ZrwcXS z&#?{7YPTsyr3D`oKA^d%4b0+PT9oynMOB&>w8e@>gm{_=MwJNR%DQ?=1FlG;0ex`5 zp;DkcJ9*W_IwTSto1Obb86|XZBHX4@-P@h#nj~x;}@H14U)H73K36&P@Vl&X9kr~FEn@fu! zeQ42^*&C54E!==@Uxs#AgIO&n*2puW-#!UP8d`@^>a-2EPvnRl#6)9K3Ol6WAyI3v z(BXecxQT<NRH5RM;p9`B@GU3}p<1y6dZ{^VDdaB@mg+fbr?(7+@?YVP7`flVC<+U!C8SI-xn>wAaf}W(>-%TGZPWaXF^zrsr4@LkWr~L=Sp= zRIHP0sikBEMr)5@-Dkd#Q!bNV48$sJO45f64gr$2WXl4Y%lZ>ceZn1Wg_g4m`;u=c zNg$75V>J3?wo>BxVUxjH*PNKCSN;h!%YSXgC9#n=FMjaR+EDMaoNdC&N$V$7jQ{BP zWA2QPee(2<`^nMf#!(e($_v>G#~jH@+_!?GJU(VF8GYBSk+I`iYns#f;A zBMXaH1WR9xkDWAb#Egfp8`4xCyo?j|(}+*Vn=|Kdz;7)|E%zGG4CBP-_KOpfY@9e9 zARZRk`?QL03bFZ)Y<>u0` zuKrPDwZY&&o(*xzi+#%xwbp{K{yZRLOj`|p{-lsCO?>b_q^Z|7AXv;XlVNXuA-oUB z7JZLHv@YqEbcdHx0JVlfH$Dv!6D9Jaa=@7JMl!O&l6gM1lkp8;3s!|VX+D@NR*~XQ zo_;!no$-X3>Be|(d_>$Ts#(Q-Em$hk&GAT-C_lb03v<`dws<7MKL&87cbkwmclMFm z*m;ZaA&k8kkX@qx*b8y&{`AVSg5&3f9y>;>B7MoPO~}l&|`v#L|XM znJ#axVA1Pj`s-h-TYxrtds2-T!+5x9KwFF#&l@nFLkbr68OWf)VzulNIGm1RK_&qo z#=$?GwSjRgV};LYta#}lV>yIa@xh5=Mxvxd^&uwev#c|qm<~&MJL@AybolX=0#{S^cH>?7ZKc;wzohlq_^q>sl$nPrmwx$H4>`>{W6^>egF4?~ z!!+Cn`JdAExY5BvQC@?HT{RwW7GuqQUXY%xNFC-AwTM~<$N@?M#P?R9Sm)E zNbrbRHadeyvus3!E0(@+B?XtY<_a`?;}2x}Dv$wJGL;4NX|l9=o2J~^oazD_(HghC zQK7bNdy|Y(rP@<l+j9PpEu1KH4)&C}1l0??fnXn!=-5z=+mVy8$a$(t1`D*_nD?ufn6&6ooH21n0?Y6 z8F1wwOr^>F-(W05Rw3N;Ys|&Hthqd{8pvNQBIaBK!Crq!qgh#+W;GlIyI6wa)*9ay z^^G_8Cu0-{7Re!F*e+H6PGk&MMxuSmn0rMvGV(D{9ZX7Y2dymYN7TWD+pp6%G>DWt z`d4?cGuJb6ewNxirS&l3c?KgO7@K}bqqaLu?R*YPLoqgO8xWiBPv>HwXi*NGeb_(q z_o1`TUsT+e&be0(CIkCX60Z#8R>T8O_8}iRg+{ddaqD|KclKP6-Q!>FMD#b0xtX8C zr}0gYp@qbH^{E(oTDoe`XWLb&)$G*Y13&ZXxfknuLk(nd!##Ll<3IeMto`vvF0^+q z?t{_;$MNI&$F(^3klZ*oQJo3tICpOBx!57+_lrH#Zcp?P$kcRI>4SY(d_?QKXeB*jII9!jmcS>-FXiNs|oaUz*?;v}QrhIHda@yZV;bYG4H zbJ(x_Y|@7pAn8tseD9 zWiKk=z+kaAt#e)bLwn}^O6R)V%2C?6&d@*hriI=dZjTmvvskHPKr%TcUv6<|{vgGn zROQm%b?(Z3t?RBJ3$<7@?O?ZjcBX&b+q|OM6Dlos0xbf}UB6C?-v~hdZbS%Z@plXM zYC;;MyjrULuhW5AEC(aRcNs%`ASsxfTy2#ep94J#n7elCq4?7yXC;Nt4#&7@u*8&KN4w=`hh7gGKUQnnR7x87-%#8GTHdUFNYw z99FAe4Qb|Sj?^Psq~?d2v{p};$j{Ovhe!CD4$-TnC7Z1#jA!E!ueEVnb< zaEtbV+tcr(bdxx&e6{BVRj&i*O5|&g6L6;cF6%UYv_x-(r z_FXxA#AFF-7``HeZf4OjVY#oo>ekXM3*Q zny6Rn{ZwAMA9%?RQV8^~m!%K7kUg-q0?$^QU-u=K% z&q!SL-J%264k`G)7plRbBU$(qS4&4K4m67x6V7g?I)$1oEth~ebu7KBawME z>pDtkT~;1xH5;;}Cq@&QL3rsFQO9i;_4nQavUKTz-QpkVGimCXBtAE=gi+VGoWq82 z6#q)bI6j_6O2j&Cu@XN0gqoJ9+}fXDr4$3#1HqzQ@TP+)rD(U5MkGoHMkJ#3>4=1S zrWy<7B>?1ng-KfG>`lsit;4DH&vr7u9^>g}%$BE}vP=0!(){J3YZ}Mgv-+Qp*M)jt z;Pg}G&p35n`J|6dTeP`x;`X&~zCxCT7ffH9&lliJ9?z!OuLWmCd&^9K!PrKiEs<Y{o~Q<6zFI!n9ko2l|OxpN+x1 zUOx=x-}5&*<+)*Z$`5tvCj|u zjl_-=-ulhIk!JC)&&vGGX6|QpH0Vu%DWN?|SRUKc=jzy3OH_Zd+f; z{rosttlaf19qX;|9n%i&J1EC+x2_KpL--@^XR`F4k14leuawQok7P1vZUN+Lzwcmo zVFyOFV~+YHM$}d?71RIg?dn3~sG{)Pnc2mxp&T-?;}TNA0mN$SK!rvsJ zg5Kq~c!w03scoT%5?O6>?cX5Xx{RW*1bx1}lRlY*7R}f!F*z@86qgGvuD!oDEWSZ% zw9m1QeS++q91*kD3MsZwE<#)xj_hx61}Zy%K26yU|CZ{RBcngouU~s%1llj}kev7Z z-JeMkTD03G^0~My?}iZs;zl)^vWy^Z{EM>BfcbhD4PqE6*cl_t7KoKgiNw3JM`rsH z+S1(fbK<8na|@@B&YXDl6nJ6{#;Hj%z}N1>yc!Czy(yHA4`H+zz?Ry+gr}hH{8MX? z@y2V|oWusvby^=5D+|I|@vQVQ{D*`H!lw77bw2V zZh@w^nAi#j%%ftm#Qo%e zzL;DrosQ^>)tu3@3Cji8UX6_fAbmBi?V}2?g%&YHTYQaU$?XVFq2; zdJp`{Pt#p#j2%tEdN#4qaDoAf3Smd0&aOCvEvh3GYd#|)D!e7->oB|3^F1V{kyo_i zlOIgpJ@#SD|IWo@XG;G0iHyJEdvJc)Lg-KAM0h>kypZq%}Xwx#Qpv?*#tQd|JqVMZ%qfLGkvvt|4(2rf%v8%#4 zZYr5YbZ>0oWVdJ(V7H7d;=IPP1zzgQtdJVW7K)pfx=vD~40^T^MSG}N{oq&?@Nzv@ z+Dnb2qj84lf&e@`7jx8nV>?rAtq^rd@^-Oj-*2!8=yC~+kK z0C?JCU}Rum0AiQ09UJ2LZN4(db1;CwxrNKcVf4Q*|8zJM+4~rn88{f2K%xLiqznuI z0C?JCU}Rw6X#TqvNHYBU@^2Q0B0~y;C<8Kh1puEx2UP$70C?JMlS@cdQ51&%v-UdY zHn4}@o8lnJBoH+NA3eN5S^0>J9->Bt55&MMgveflQZpQ7jl5Vyrig=56w(|NMJ1&n zk|joBCgLS~45B+p&D}cJgiAU2`1e_7t-a4#|Jw8oyO#klybDki!l@j!oQHLsj~w#` z#?3G!81cAF4^c}G^dnV*n@VFB1?fBL0$M2-anz*G@e!1Bz7Z^S%Y_X>vXChh32nlD z;kx9=f@QNjagv6|7&gSwB0Fu-Ygmg2Nmx$#7_UKOKI z<>^aw3*Xfog4hjKBXWit$?CWK=L@8BivG!NJws~{LM!KeydG??ty3^1J}p z8kG7`IO#d^<$1fBM5$Q@kDAm^g-+osRUpy$g+!{*uelqC{k_l8UOtPHd=>l5P}#Q; z$BY?0#eHyays^q0L#4__znVrWN21y8y~L{2j1jFMeP4lS6@x|I^=sX+3A|z%}zYs;w~GRulS*zwb-n2-c6qi&DQ20oXnSVloo``cVEa zY8El#L$Po=5Wg3#2<2(SO5a^eVP(T9UZjW*S%J_dLS%)&OAXEzQ6 zVtc_`b%(8vLpX2spuqknHEM*Z`|zokfTIprRH~6XGAZm_06#>lMav{dyjaAH#WQOy zI^>;4XfkTFhx*~6Av930zDsY>V@@H-uB8*Gq%1h?eK`gh+-rpJ7FiEtFHa!d(IB)S z{SUOX^?LvS0C?JCU|`UJ!VZQvjBbp7m>QVbn4Oq2m?tprVUc5TU`b$UV41-x!0N*~ zgN=o)i=B_Xi2WFc9>*?DAxyizStouTHYc1`VC_>}?SG`~H5d;E3$H~8-fhza-+_#x;40QSb2 zrT_r|008O$MgRo>000000ssL30ss~O00962NB{r;0C?JUk;_U0F%*XXOsi6?q9Q6@ zU{-Zg$9h9h5Ur>b#6>N3p&-+7ykOgrX;fVG5yYobm*UzdaOE5LCzG}_f(c2!bIr*K zfJre80X5>grO${{nj%hPrpx_2XK5Ab45cm185Z<9HEhvxZa5X=G-rqVkcVfupApZ7 z2e6G7!x?1pVR#VZ_%fVD4nKy6a3&@V52GSlhDVSUuZBl4B|Z(0At$~KzhS}}HT;f( zHQz1=wLk`TvF>`ec1gPR8_%x#wacI>+t^2dCM0ZJz~$+ZJnHW7gb+8l!nL*@P!~|D z!>6>20+zTtXzO)CZHN~ARW_S8hpV8yR=GC2F)Q>g)2qfMxopO?xT||5bGWR-M}udG zTj~N_{<+;rNJ)#Tyn$WT9O}H1yQ?dT?o?Gu>Q$A8w0YDtDf_(D#y$B$`>0y3^-Fiy z373A6l#Iz!^Y`VdCk5Rz$;b^Xaz5(mFi$U+L;sLVpZHmaBDOszG+-9 zYTPp#^+An$#S!(vXLo!+U$x}_^UX^xpWG#Pxg1gOr!A7TRPn#~0um+BL}MWaD>h<@ z!%jR2B$7ljDWsA{IvHe=MK(F)l1DxT6jDSn9q33WI@5)&bfY^x=t(bn(}%wFqdyut z1`eFKD4~=x${D~w1~Hf+3}qN@JPfA-FFr;vl2MFi3}YF`cqTBBNla!6Q<=teW-yak z%w`UAna6wZs>14J@OP5KV+x&I(pWzPyUnG_!_vtmPU<*uZ)=vWai};5z3x&kf%3UhEPt36dyD zk}N5bDru4~8ImbklFbD!a)qnh!UBz6+9#H&oL#LIEQHFoozoXpHayv`&pMo!MUsD!wdRc5Io zA-~K?ZZI6nDOqY;7-YsNnQBS(IK7+Wn);vt)PJym##X}ytc8XH*h<3%oJ6AuxRFK+ zXx=p1Kudlh$kW$5NWDo-Rc5vayI?0``e66O?1BEA8GyYRGmO)x5s1X#M4}Mxn0-+G kmk+@HfjI;RE9M9s@?#}Q^#(nWV9)>n0003}d-j(A0AOnLQvd(} literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Bold-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Bold-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..0c6994871e33563d2dbef857ce8bc1520e9cf3c0 GIT binary patch literal 24808 zcmY&;b8scy7wwI$$xO_NZQHhO+cqbjWP*up+qP|IVw*Sj<$J$+f4r_*efD0x);?Wz zy6T+j+8zqx;s7wf*JXGDK>u&7nf+h>U;O`H;^L}u003C&7mN2F3}$rDBt*r;zqrA# zoazg-;CEp25{e2cU)&7<0F4CzAniBc2FOXMs0jlAu&`em`7aof4-hRWs4y^pahhMb z`hQ&8&yN5TJ0r(0uJ+3x`Gx(QSz&4ucUK|+0G{(p^Hl>NjD>|}XzplX_r>{sY3cv~ za5I%gh7t=SmoG0OY&@JF)ex`Yp|jOaTCmm#^ncUtou@ zOFOjuA^`wR+OM4O3#2G+&`g$guAX0<>{oqD005$NNm!WA*1_aUr``B9@8lPjb-!_q z+8KF%&8r*u<8}5E_(Es5nS=r}n{J*O-$(KL#6$1uu(CGggr}IkC zJ=aYPO$`lqA&h@OqZ%3;nj4yW!Vk^_Cj8pt8jvB%fTA4$08~&RhraZl>s6^%wrXnM z!&dz8Ezb)s51L}w$wQUEv7o7vm>_MyLX-u~A>i&rpy0j@pv;9T3C0Ma_N$uGhYU7O zj>b@eA;GESeHQ}Pg-niw>uCpGSp~r~f1hoszvAON&h~rKw^&c6pb}#^Aq#6i?p#(V zyN<;Y5P$rk&^7tJaU`Y7V~2o4TuNw+NuPL|xM(o;59ZsJ--i4_FQi}fkal&`+c56A zU+Lkt#5SwL-uVDx;CS5!|B2dn>bon>cQ9Tg zbxpXlQtq?R6+Kmj*Bhc36%m?6Ku2Dm`edq6$KbsR59&jG#bBm-vkF=o6z&?Mn__tT z2L0ixQWw07kwTh<#A3Wh04-wSF;Pclp5r$^@&PJG>P+a8fx}fzFpYjSM>bsugxWgf z>ImFjam~=nDMy4&_?Et`m^uW17>@zIm?jj0-FIVOx}YM)NRwzEdP8Vv%B+)cN+tZr zmI6IRiu4KHTtt>FL@HZGoH;7lmaRVdA!hnrZm`WKErxyncbkx~ zBW&_pd{Wx@3ZVR!#ZZ>Ss-P>gpgm_rtUMFUH@*jH)ncXeDr3DI9c08i@OQud7bwiQ zd#3iE#4d`;Ey+$TNmnh2+gVx7Sf7oiU*uO`R1ut2ec}v8iae%}Th$j^mKR&Kn_Cu} zpR!CX<4rB``W3xu_u_09_*L#N40@$tiHsqHWT_RnE5~K4V07FT9 z3N&Rs#v^vQehx0wf0I=%65kD)YmdwVMm~TgvBzGp`psIPdY)PP4Z7_X;|sZZVVY*8 z?<7nH{y@c)H7G<{ zR?3yop^qlgtg|;nkVD%fHpvR?(>AHF=jYpKzrx4pg*{j5qnNkdafYb9jdFgL)*l{Q zdvC|-VFnd-G+!*L)T}lvHSUzxA4ZfA-1i>Pm~w<@p=4iSRz18kJx9&(uUBu-)Gdz) z*qdg)UG$bjJ(+Xm)M{`nyHeoVceN!CAXC53H_6s}(6-nH{YZv7XDGWU$3GM=jU0?3 zsAb-8ZdORx?`e6`NH>ohFpX$}_uk;_CUCy3YLta3GFP9#Jp^pnGthB86KvL)K}Dpl?=)>QIMswLWZ)`#D86i~1K zNIOcCaTk4CL>WAnQxt&^YFDA#SFJc!jpkfBJNHwJ=C$VbhlUr=vR0ylR4UOcUZ5Yc z!i;dnt-~u#*;SQUl)oJI&(&+QM!SSj!Ux>S_kGV=q!Nd7W}GR*c5l;cD>f>zO6$@0 z&@#lqsMqv5{A)Pj^Vblj5$>*cnjZaPx*U%67R%8V)+2g01_kWu(7aQm>1BoIR$AM&A}#^z{RkzLvvXX23VlUp$Uni zfA^sSjHlpN{@Z6v49(|E)(3jWd;7Vgm(qfQln8`b3``|Wj13I|MzEf}y$}3>Lc}6;+U*@$TPx5!78a*l?-9&qIXo(1JiAQ+rTnI=xtsYE!vHxkPzH5NdXC8dI>UQ!k)%&ep5Fs%^S7C8MkfE`` z)#34hk)W`^)Zp+yQDAT&G!Os<_U7&;%+}sSj+UOJt+u{|iiS5oO<#?%9MS1Fny$?s z(rDCZH@E}ak#IR2w(9Lhp7=VRuS_VVrKUP{>S~GJrL-_aMrgy0p~D}XYtNmnvD7cD zWv3zPTx_n%*qY=8Twl(bbXsb5R>vG~U+|Trmf?OG4PVDXGv+a-3WO6C7FT-> z$C>G(h5o1fJ|p-i@bET1APctfYE_gswaE>cNgdYPe`E7*BC12ZZ8}=m!=c=|be?MJ zU4OcJ1R|RNVL-^FFJtEYQ#$C4m$sVK5*bp&o5LHF-*gsP(TT}lPqENbr~Ti76ASoZWn^Q2U~)lTt+xT#l_Xf@G(bOf*F zXqvOz_?)3XmJu~u^hI@vWp^Q9ce`9=t=YowzJKmqqbFr$ZB1^vT%FoQ3WlTpoNSRL z|5=xqjYC#fT&PC8c#jM##AAee2~soA=a=! zov&58uTnIeuHDHQmV;nb_6bH;^p7a7=NE{V5wRLSwLbK(bExE?{8^;5aXH!V{BLzq zYK~0h+&5=bOtQ|qp(kO{>P5B}Sx2E6i;Hi`p~iB%H7}_MIUr|Z-I=0*cZzqYl^SBv zcEn`@;xJcYsiXe&q&`LYpT8xwjb5cP7NAC`gk7qx%FTtwCK!+?Sof{0kV6|tiuOFm zf7Zo|ItUT_n+C<3b_^g9Dm76SA6-cuhWrpm^pUd1r*_ zRuEzg_)omx+Hcs-)b$CCZd|}}0X>_{J!YW{Zt8d7-|>5QUa}b9kw4Vf-)UOS2Rr4n zy|R2ie zbQmP$L@AV@{OecSv8dCXt(rs=xy}d8BZ?Qe)~Pu@5?8v;QtbU?877!eXc?am7lhF? zTR|1L57-I2a^zl$C4}irM`FxIUm4#;>35I1b|IvZ0ip0=G`?tHoqGyTx!h%8vj0Uey`B(4s&F%jv3=%JP+;%}jv~At;RB zuI=U@7y2;61=D9(#Ff_u=&7SkfX`0LWx5m2}F1^?(?%frBBOV2~k<&tj((e{kR&~T?40zMRz zy_tO;?OE65a_go~ARBw#NAWtyF9TYVL?Xv$=jpLvrt5jz0=bE0Cyzbfs|) z4u)^>6RE0q)VM*#;88eWjmctI>xq)@Qn@Zs)n|M~FPO-eW{J;IaXu=?xExouARwKWtV-?bq!DqZ+t3^a6?!c~6BE ztJ6EGM~dY$hc|RHm-6zsOFbvq9if2}F`MR@G=JdgyMfTk2FYQ!OVpI)%%Pg zg$fDDpp)^iP%>2BV?`F7T~*HJM31|ioBxP&IK$}{CY{bAgttb}%0AM$T$}k{>uvs+ z6&QW+1x5fW^<&N>1800r*)!%%nDe(3-rjUEHSuHC3xVWtjuVqG8WqoyIg^p4B* zE??;`jZ#a=P*~k;O&RzZIlj*7)DOhV5`p)a${yZSep!m6d8dauFWix)?&=zp)8poD zrZ_#GpNJT#A>FsVOG1bF!IM!|`u>N|(P^j?{TMwM$H5pzh%5t>*S`9L) zJ`w8mz3S?;v(*B4r>QF&u*=Lwo*R~qAKgc_2YDf@6|5~m@`^xd*2o^81$3)G`uUm_ z5X~Z9Zt95*@`=kShVB3W=+++l0X1J7b6I70QH7^+N+1W{XsMp-1h>>(ghr2>_c)i! zX^1to0UyBi@803@7>l-|lzx-S7gARvW@WB)HCRA3*%${b9IeY!PxaB)u1`%Ee@WmK z66B8lA+7Xp)gC(~(kn1kP#R1n6nG<(3#|LKf$s?l5y0nFpf!6!yg)Paf%G`qX_}{e zge6X2d7IL5e#~*m1+tv)@|U#`>%1ovqB|5v7OM`09oKbu3sO4nm96a_y@tCkW-6PI zkhn5P4N4)~T1heG=_*l%yB0jn@E6GgpA`gF0t(0|CUZI*=Qh0%!l1b$G3Km+NI(-H z6_5l-r!7WzR@GAe5cA%$^#&-3y{sMG0V6=rw)i$q2c{to}a_y=MLumVQ2J!wIz{+m%905(NhHzlke{vNdwkcseb zuT8MIi~7)A?I_6Gbpy;tjcKVlLJI~S-nW(}QJUCa#R*K2Z&tS!3~vaKFaeG$KQ5Tw z@q;rD+E-iN2xcJ{9+HE{P*CkpD$Y&56;TioA^GfR*#c&!^ZYo^l_qPzXC)lXBLn2$ zH}?SPtrB{EyR7c#hOIY!?>t_swed9yIb2LQ4at_= z)xtGFOyoo{@uWf|Du^+@L&x<}+{x3>BUGhu#ieM}a(eTwkXgy~O4w|qGEpI&?Nh*u zWP$XLrI6m)QgCs6c%&N~>dM%DS8e~}theoDm{Q4+aiewx->cp`g~xz}Vg;zw+1R;L z=hcYr+H7IwaMJrJznh$KaSjOqGY=?rcLNu+TRdKZ7Y~ zFx@OPl^_sX%*gG*ik6uLW@A@#*dOkoAi#-S+XP)w6tp-k=K7^nKHmDIjN_LrXlOK0 z{D2k9*Y2k|^9Xd=Tocv2hZpDXD>TMS9PQeQLSn$)m74RNP882fm~;M=TVW_?5%%iN z-hTll4P=KjQ{pP)$AQTj%u1+2N~jtn`put2y>m$V1!(3puk}usLShYCXod6Oy>_UT zl2D|usBsG=>o{ssN>#DCmZArFwM`VIo^7aU#Aq<3%%rdpUIh3;$nveOnp;W~h;4=$ zw9EQD_iJ5m&)5D%0g^H^+N^GCcc%v$nDDWz@ffpkVNgE(dzigTX9d?j?-0F$B#)b* z86E|$%k_3^dX@A*cj!L-wq6>W0=(GVMwf;uFli<|EJen|D0l>fwOgh-)rgkwXTSPML~tFI};FFw~gpui+ z_qnwl|KRZTSz<4!S7)>PiMg)7)Ge6D;BzF~>)tE!x}(_g(2AoL zDLux1fBuB*b@fRrTf0ykv^6iL8Qr)NXT$Zkks+Alb+NWPKi$#{V(t_u0nLE8Kmwo# zu&Fg{m}VbxN+-DcQ8GpEQ=JO3vKj8%oW1$0OlO zI(>ctX~qjv2wSR-usm{El8`kZyfgHs;$M?vPy7oI=MS^u zA#1BT*4$fdt0m>*hlGDv`(p)vh|dqOq^@zx(m>kX?bwuKcDDA0E^&sKvfq5=Jk>t{ z#=1^TL{%$un2Yc~OZ$>h-)Q~C+*r%&6`CN(!tQn;>GQ2lMvk zsk@(;xICxE{SR{|hjZ9{?=s6smivvgmVT`2D&G z@K+gXjANOJUV~<(Xii>dHNUVdZ%cul)*yUs(qjcS4;ZWI*oX7@TcmV;Wtb!7D)6;m z!RN)|$wBTNrGMP7wRCo14x=s>Du*aHFSmmH%W<2+!3h*#k%L$8MC0Q{qa!I1Mzuzj z4s)x&QJ$jbPijU72mx+)Du@wxD1ZJ5m=(qSkj2M4EVy2{KlOSruK{*Aj8slvIc?vj z8eopTJ`qwF@Jlt4R&L2Cv@ael87$>2g9;2Xie`;i`Cs#d4nqvGN^&X{zBhB@z&vHE zOZ$ZS9944u^_OfxP>nxX*jWJieXPQvcfYsDAGmcDE?Zb2`CQv`N9Y$5R_v7Sq%^rMz>_LF%aduke;qkQoP|2)w{aTK@ruFEq?r2f)S$*n> zf@G-ZwCis=?;k+TX96nEV6=y}2wp|>K2 z`n^FujUjh?#`aAhyQF@Uo-3J`HedFp_f}KG z=O@GLHL-x9wo?syEBK7|2aPz`gI)<8@;3+H=|kSR#~ei_R`;qWhkpmM(`KsD8KA>DuyW@|KfK1FZ&r$veWN19eILm$@a=mc2^=9w5=4GjzYF5ncTQKLzs}f*$A9YC}%jo+5Wpah)q5XthDH#xZqWf z<_A%FT|`72-=?IDl3*PlHo#{;yyuZ*%WJ;7l;SdaeXS_)#Lq`_Q`>N2P>f{Gh8;}t zTxbY9F5zs#m3B|w@~`@zl@%mZ%k4Yt1C74B9zWnc!gSuoza)&DCfUt^-n-#znv_mc z7OT1WQQHr32#03&&R_E39CK4h{(#A$n_(osIjalCewg|-JR9ZERuOeE@aMwNG7ftQ>H)V>_u9&t(&FY_>_x6 z@2D8%H;g{eDS}|invp%hDT)<@y?oKxqzh~hw!jcxEdk)p+h1n_+H18kj7247&iJbU zrKp8vmq#WmUl(rt5-)*@!t=hvAC5!@RGm*r*z{Ml@?{2V8GB<}r|3Rt60@hpHxJp(e` z4{%rp_935`wl=;1QrisaCYeg7GImdB!Ca3-vAA7Ovo?iC#ZsB7kDP1j7^h#h`ePwA z0Co^iL1V<5`@a~YOA3*e!iSPjc0)smuC?}|6>}sDx-S&c8_XZuD)N)xaFf?;QqOkq zI1d^ZmQP^l&MZ1KwlEkJvRevwpv3y}ZD4Y0FB+EvqJBhUOdAZit*IttfP(qT?4$R% z)`xDshti@);2{~4yp|$6qzo+yM%`|Ax?HoVnL|10nOk=VXuYBq0vQjSmSnoz z@(FzA^}H{5touDkc>%3lC}g!Y`Rbt1i-jM!HG?Y#U`)uJ94|E(3=Pq5PBe^CH{H(tjbWCsAI=O% zF|0O`??=pH<&7k2-#V~8qvjp+!Lp!estWx4>Vrc|Y2Gr3(sr zkLUAs^5hzh=Q{Mh8h`hq*J}d#H*0kL#8l>}gy`5$9zA;n@pU|Ulv>XE)`Zx%fn4iv z00c`loT*|c5SZppvHvSudd&{Z{G$eTD@X$M9*x@Hi%8?+n<#Jr2HH`d3Q|}5Z#I>~ z!ntrpw$X4Lc`m_kJlXiwzt9#S%=wW=Cs*)MA}G>V&UTrd@_?cMGGEHhZEwK@ZJ%Zg zgsbJ8TYiiKe)n%2#-X+|q}>+0qND8NQMWouhPY-X<1u#M-&_SLFm6cSCK?6sOFJIA z44EgcuuX(JXO9$Zz`HE*P4sfiQ@Jpglk|Mm6s?XW{QRsgsGle zQf+$2)ZTlS?Il$_37D+z+qkKnkncb?!aPjVQ?2%K0yt*u#nYA587k-i9WYoEz7)f9 z=HLZ|qEe76^ZyQ0&&%C*cmnvJkvp0lZ+LHcSw=+HkJB6tpk4x+KYxo2eMk4*L1O-- zYEB8_btskcr5n_Yc0<`@C%cCl)sHnd%7l-b6w{jWRN~D0V<b-hD5D zdOauNsll7F$DH-dJR5-qgrq~Gk*+jG(S9VadOMTPu3FLpzn43rMXG%H#1^H);ErGPDP^Dkr*kJ;SLlj6> z0HqDuY)Qed|HN%Uz!7}1kTMZWdL%8u`s;P{soq-!{ElqfobqRt|LfnJey%r|?b@QZv#@7= zX_>eyfw#roOBVBm^(qtso}<+~V5x0WHjB%r3(9$jxSpr#T4;G)G{NTbd8yJZt~uk2 z2KWcWq3vpu@WQ-YJK85vZE(!Z@gCTO-q`m?eYB9B5q$g{jS1*dd~xQXh|K+=gfMcJ zd(u$_OXqB2k-1fKFX*r$g>jlnf0Db20}mQ2sf*6Bn$G)a85V{bB)8~EoObK8m}C)i z@qZV~Ffd399;a1(IS(i!aqS7@*5}RoKQ`|)P55SoKq2pg*Q@*l)c!&nBMiC`F$R>E z2bhW2J&yl4?zqttBO^qX8?C2H>~5n2)tWJqJ{Rv6suxz2g+O+1FG&_t+-8%q)|fNA z$G>tOdLTXeo*V5S??_AcrD&9K5vmu9V-x98GE(VFSf`&%883d-*eT3rA6Y{_i!wip z8vFcQTfc_eL0!U8xRz>0F=HYRz=K~UuBDJCp#O6xtI)nZksngaM)u<`m}i*Ja>(-) zPEU7A%H@o_F85XFI2s~@chO?xA2D4cgj)4M@Jh9BD)+rl$bor7)bMw+jg(?< z!9~`e0Oda%h!x1J<@92Y4NcF&{$Ro5Fin$^zo1y7;gBS_G-8X7b3kA}wn1f&u6@n+ zYnd+Mv3%AuUOOiS3)w&QJU4F-ER?(MMlu}cK`*8io&OS}uFCWWtWdn8$UKIlShTjF z0Cxs$>7iq{q}ayIXqEbE-_k(pK&KlcuU#gd;&4qpQB)GV+ZgUHQ_4|FG_Y}@to>rz zm(uYlYYRhC&)3-BF%u=cP&jK(Z$0DU&y!=-^jQ9r8xH4$krVp-k9)_NaUNt|lT0Pu zzPr{=*d^G0h2H_Z$4S)n^O1cLU^DE}i0d_wD##c(j(x&DjNW~)zCjuY5M6(qy~~DS zdC%-?f|X^gQoyew_ZqdqUeRio)QXfUTnB8}qw3rt7ZVJXf~y<2$eH{aZPv+}_8{K6 zcPMzdi&u7KC^Fm2&4GoO{P{%=J`b}FP2+3M9_u&fl{46fvWRt)O}Y<*bFwZSd-wCXyn`zn$f^-MlBUfq2qYf< z8j*H+c6eQ>V1Mno?w)wOlkUBhZEcyDOwXH&K-^S&815H_jT2|_Kj7Hsps_lgdq0H) z(tC$hpE|@QHK6E9*DQ!sQTxeXPh2g8ckm{vzw1SG0VJHCatumVD01K$4Z`a@%{e^p z8P!7y;|p`!zl|d7CMY#!>Y;A3$N{(vj?9!`F@h}EGO;oisdt-X4MTGe^S~Q8%A@kz zf++(mSy@ECnX`Vo5~K=}E!UE2&~ z49jNoAGEgl2-xY7*>an>i&P2hTiJbQ&*H9W-w5)9-TBrVK;L^j5*Z|QV0Dr(%TlU? z?n{S1wlNc=lm$=XoG$baKed`RPv4`4&gI%uH30NNsg{>vXd4JGhPzX>L{P=Tf7INELMPCfGBVAjiz&B6&N|K-b==P zm><8E&l!MtA7TN2$^F+hGuydaPxmCMcb{VTGpZ2Wj#=l;#~bqyTuY>li>Fnwk_-V1 z3vo)Y5?HHD_J7xy?CnPs7vgSlOJz2gxEe$7P1LN}Ww##pA{b=(1u*_H{TuW`z$9cN z$)9QN^gz~hm?0$XBYZ%Cey{elF42?_wB^T89u7et+r7)+zVYXvul_?KZJ0yXRO*7{Y+I4)ruH&Cctj0Yi`9Q<-b9?`B#M65GJf9~_whYYi^uy3oZ3 zxo{%q`w%YF4(%u9BPVx5$2wr{?|pv#YAJn&C6nY*0{vIc&f_r0nQPR`ty&{g#H&_5 zL$c6e^KO~MK0ZD}hsP;=N{h7iHI$vQ(JF{6ZR6)Z-Z8Y_ZU)@=`sWno zybvps{b{}_#4We=>82BrV6W@Og8b8Pc3`-%fCX}>h6~~|h1!5zMX{Gu1wV~Z2LAU% zU#`c~ozHri?MIeMqrw}-bEbZh_Z-Y=2eT)dgkaMUx7K+kT%Vs*X}du`_N=1UIH%HL zb8PG>9v5!kOdd`){&k$d0&{MT(B#miLu>s}__~{l+!*+TdV&UQz;IQA-XWPbq&e4! zpT0N7iZCtJOC)vCSvoZTfCl3$Qi3c-XGwXv_C}E-;5p3#}^4tP-jTy835!Gu_N>L6)~!b0#nbdb_85*)R?~G(}HwltsX< zjr38K(2WlLyB1k1DjK!{%&W~~7Q|ywe3^GDNPEUL)n{z&PKuDr;d0>)yq(k=kM#4_uepu+)D|+Zn@U4wOzUM>SzJY20903(*tos{gFK*1WD3QY^|E3iE%ZJ zCE3~#i}Ca|k`9)18>tVE_)yTk!*W^1c>Pvl)T;cQIV{dNy*qlyZcJ0k*UE{^GSCuy z-aPbdzjOF1yWIY;PLhzUMk(=YYar-fld{^xN(l1#7v$dCzfn1ou(K z^4kf#o%b%$QM+^m1J*q*lnHJdU5&v{es{yx2cjbG8gvQ?Lf5GFB0P5oHn7c-;Le#* z6L&l&i&nW7%z9hr@<0+{b2F8bW1a(T`qjExyAyKE@8JIk%!{FK8GB$ClO?3M>s^$b zv;upo#1RKZIwFGcMX-pM1yt$rx}KM^G6-jTWJVY=gG=@5?w7iPJ9hcBlN+9+_gh*D z4=_`Z7{q$s!Lf0TGLY|%@FrXU8*Ze3BYe|Po}}e@sZ4yQA$U)Xq#)xL^pk(GD!6Nu zbwj!sgF8XD_5u{(`9IPHYdM8R8#Ky-HDZnH$^InAT|(XrC_?f-V2eBCh7k{7nk^}& zqnmKl!~EXQn@qDfs|fxr-tGfG(HA6Gsre)OMB$vKc*pepcBrK%84`GZIuH@-GVArW zrL5v$v=YQCK}!x-z`u6?hC;~qb0*t;e4Z~ymY|{R_8?x*17861#B8&VfmT9Fpx%1H zEtKO;RQelSRsVdx`Uq=Fn8sgeALXJ>xfqFWD#X)`mD#G`Nx|BeWR~Wx8tM4@%jCu3 zzEe>G6#ejN)Q0gHE^we+?Vy@>mz&v3n`W*~e)AZ_+)jkoPPCfa*SqYC@mv1 zvSNM$TI`lQ_PGxKg+p8kOBZtr3H$X{t~nmG4%dU%0>Z4G-MZh$aqm~%K0Tn%?e}gS zm)p{#HiS>Eqcb@TcGk!9uGjJNVP58dIQF|c8ior2OU0i1um!?}u|5gHIX7{1;@n>v zg8E@&IMoPKp8nFeW@sGc;&Q*0qKbow-G${Xg|J1OCgsQ03*HVUGipbY2#%J0blnkM z$r?0+L{fsg;+z`QcWvyrf)w8~1%F~4g3e+Ch`A%pCs-qp7gs>}0zwfd3MYgo&kMh9 z4jN0VTHqUWd@gmn@6T)YFk5C;f-bsxPqd&;H|o^bCe4Ie3urYZ%)VMjU!`L3UW7NP zK9aSTic0e;#wqI;ITBgvUAQSNN^WWSa zkH}TJKxXElu?>_ga|t{4-uN~=w0RcPIdny8;rAh$F#3CSFIY#p$*HDojg^!ANr^$U z0Qa{qj&mp#dHW!Hi8rH-<7=I{D?8j7g!`_qK=6; zyTcNs+RyCd+MK7#T+Q@U!^DE`>@UP?hcu+(5JVMtyN?k5%Tk54;y(+@n4?n~RnXxS z(TTX?igYZ=s&aK6*WP^Ic-}S_PMvx_h2s}Mw-XL;T{*m7=XB|$w%OcnM<1^&CGu;u zl4o=+{8XJX@5*X~B?2WN9YT?$Mk$a;PaJ(QyS12#a)9$C(5G3SWG2&htXxH*-E-ge zwOwcXpKrag~|vdT6Rv-^ZRfV&W_+Bj;7dvuYgyQwIbic(x!Y3h($p(L+8 z>sh^}#rG=x)Cjd>imYw$Wtv}ga@#z4VI+?ExHni}6aCOniu0#;9^)H>mR{UeD#qk? zIO~b*G$5yqAwS%B?l*br9iO$ZmR8>MDPnPr7_E-aK7K!RndE43+3z(5PS*LL-1an` z><<5fu($hP2OTgbZFT-73^nEU)LceHBYnEgQJ&eYe@6$W%6rlCdj>uK$}|vy_98}n zZZX?;6c`pY&MHpAhQX2sEEd@I7mTsbkB!|cONK7)Oz4Sij)c$AE@GW<~DM>w43Q+eGir@>kvu?tfIlqWb#Fl3-<4$3yWFB)`Bjx*YK(>McC zBP*O3p47^aoK%2EBXa)Kb|XBiA)6$~(kZvdlB3FEbl~HqLz2=`1?h^)y`qJ%0%zwc z_b3BWxd2TEyY{G}(2QBN#Y*F{{D@-;ZE=@spIn7&{zvr@Id`j@Qaar58C(_%;BTyQ zw|36G{DyZt4b56x(QEsV;O*U)tFCaAIl@8*jz|7>l#3nnXc=82uy+Jra1lgr?Rl8L zV`i%zQ~mE4D_I^t>9Xmzi8moXB7Hc*O|O!z0|9EtCjrKB}S?3_MPSUgEfKkhl z+lBiu7HDL+tgmgT)9=)qy_W0L5GbW$vC}GX2wUrWZP?Lh*sax<`@huccZtXB`Z7Q6 z>g;=@fNSbt{1w(T3T_y(wRa<^Zd=N*w?`r@Zc{s5+>FY5FLPxjb8Y=x70SQHi2vpu zAeVZw;#-v?v(m)P55jZoQf-xbS-k@vBP)Q@d{19k=-(tzDCw2}5s0FnKgM~SxJ+Ky zv7)JG+i#m^#hRT?4<{DYH+wClkXg{7pDZp{So*0*RW|26)1QW+*XZ?__iIs1COEX8 z3i>*>M7XK*dBP->HPC z0#m;R>6*jV^!`zUk2ssmBYx>2^0_^?*}^MdBCm5drfCAV_|S9)r~YoT!O@|RA{1SV zPKYB3rM;YNR&)^?9K}$=l2tafTksspFXsW$ZS)N+d_d-LZQc_D;jd%l3kHI4L|Pfz zKdCZ9oj)`&bGe2kl&MF11w_mTx=I5v8uUeEDkX85IWSj8D9-U}A?PE?hK+4*_iQpa zXQJ((*FLqSzFL1o9RmSIZF{rF1SIh;Ob1F2WEt{5(E7Wyz9mxWHt)mFae3{yYSgL;Au#-vBxI(ao^d?o&bAWvK# z`gX0U9TK6ZispdX98R8ACG(j{#Eo^XDtn7gX5Am-z(kAbF~{bNq#)<|=*lr-3$02o z+vSDt7ehJ?X{}29QB+#R zt*c=(YvRfjGGBuB<<-DcWsN{2{AQrsXjUL5F~!SfkD383jE>cXoU$l^`%OCR(SEg& zmjAvWX#0LR7|P)U!N6C}U5D3~5y-F0Q2| zepRalNt}?>V`<#dgOTO=0&Iy1XJCo4(tDkwmYf~QxPb`EYnr)oB3IZ_EU3V~-b2WR zCN8(;#LE}r&FovD=hNOFO&ga~Q}|APWdo8=s5e+2*5e@c5iIk;E%0S*XziqJ+rjh| z2Up8rQ}>Wv51-##jOjr|Kh+7oyW_UaMdnMd&Lilnu%FNV^3;kjPuMHYtHxwE6X)Y( zCLc{!W5w$~LuLIVZm++jDkKwnYZIT%Yq8FRAclhm+4Y#L;MSgyA~Ef4Vft7(mYT!S zH4_e}f{D+g7FW9mM2dgR?MA~i%{g9`k@Vx+= zpG>i2W55S|YqHw<98R^?TJd5PYqT8p*uot~``#4T0->H)w)K`-+f5a5q00O5R;zVu zWUKDlwnj_%yXA&3-vP}(% z>ACkj#(l}d!F0vm=;>SQ`P5D+s!f-3nbI;1OKV`hC72O#R{k%tcRom~T3+9KvkPhA z%DiKxzWEy{*S2CGsHFZ5u-n+TMD@S~4!IkqC`zCUH%D>L_ab?p76A#twftY_Wvp7O z0j(dl`UA~zj!^TCRCTJM=aH7Qhpd&Q^Rm)SedgvaAyG4Sql`7O)Wh5g8Ohgn?9C?} z`HD8`!bTKVwGwloM2RhR3z`5->whBnyv@V1^plQS*b?TgRhJ2lx7aH20>DNpl^IHL z)yea%Djh$+mMbKy(2ML=>!m>{nbfNu3CnNl>TIm4k0)D^%y@{_=S-C~z44o8!kr1C z;x1)hZH%|=r~(9-Pk-Cz{faw}72v=6_ES`To7NIL+60Z(>1!F`QsrSD-Qy}k75E>( zx$-3f4u4`^UjticPlC>#(>KaY_ zq`wv`Mlo;wW^Pj7ef>UJ{m<{M#Wi@lq${V-XFeq;Tab?F^d_r+_av=+pK+?cl>K6G z)Zg6_x43ka**gNocDLxp9Dzy2(5}n*s9hY-NUyr5tvaqk$St`pec7rwpE-rZBBQ&p zExW*w4~+R!5`U?WgpS;mlk{&p6}r-Kx^MpPd#J#JUfxO`jsqG(YEwKM#_Q+2cHLL_Ziq{ ziP+H7wV>Z7v#cC!s-4wq*s%B}t~pRhqHvG|hBJ{_Mc}RXVMNrImNN4B^J}9QSs>TL zuWP%yHZiZQk~k{~Y`ZpXi>TnWujeH36_$E3Vn|;e*1iLN-3Bya1C7z$sMv7`MoFrj zm87zUewQ71Uz7zKN(ESW+W3GQ3^X^^4!1D2^v|8 zTSB8&Oh>ZzT8Sy0X3;94PF0vr6+w~O*a=X#Mi;n{W-%r)O1b!`gKkuz>0t-xs}yc%n|PqRXel3h@XIg4We*7}(vuH_%k)WX z_<_FqA%kBnQLxXP0L9&%x{=kKnC1hkEYFt%n8`A%vUGd0T8QIQ&2VunRmhLp`j(qR zh*Dz|F0i|qHAf?@kOh;0MllAEmD7m%a!s@~X0|q~If`1?sOGU+$Sfbz=k42_#V#zi zb}i6jJ6LP&TcX>UYiO^s{%GI@H}R1oa*VN?A#*qL!ItsG%;5-a%_wY-a1MeWm(Gl? z93>@roj!#VbNhyA*54!^zH9n-#;%_ou65e-20AkR+V@YJFf=fFEW&!WF1oy6}jA59sg9d=VBE}ah8y55)d z08^C2$a({&G83jAKwFU*hTobC0uh@rlDEd3Gu!MbCcsNZinQ3PRdR@%vu%}dKFrUD zV}=#YDpSHy#rKe;I3ujw6)Y_YSSZRS%z7F-p`i;_Is!(%?!a7g4vU!HnXZwZi5)3} z@34}(1ZPt@(`{6&r5mv0iK#^iEej-hG;X>NKE#ZVAi5N@rHY}Y#Fh;wi{NBYOe;~i z_fP~LZnNYJ=5@)HDDT?=j7qu_&>vIB3nrXYqmC|Nb%UpLFoU%JWV(Wqf#g;3cj1K(|A(GL`_jI5=r7V$b6(euN)4lw;EQ5-yt|f3X0pU{ydw_TfD23(BapIMO-EclZ zAPj6%3~XP^JSolUXv`Qt$SrPsgT+WC4XkvekWyZ#=QsLR>dIqW9L(27OuSY59G4wVh7^|A%>f~SDT`1L zAj^^u2?aOYI{~qaS?^szFe+`w^ImzN2|MxNw4KaudDRiaA<<^Tyw_-$Pl4fK+oZ@ECu2VuLF&H4$NbJ=*X#>qpmPB2%+ARqZ7wwOsz+;hPl zk7G@MVvM9=rQxjCCO!WLXMaeURTAgZ(!Dy((?8eu?mYwk+iOwvpZ?gn>ks`SL0C6y z)_NurHyqq2?Uu_(1%64TQxD}V|s*c^HzYNR^Q<;sWS{1`UVH0N52 zU_af)oXh+Ql=cBXE6j8#nm5eGun-pIR7;ek)g3Tq`e3;n284nE!oxyFVS9ZYsjoAW z18*cp*^^Hc(_>|fedB!AT{dhGc4<9ELF#k>b>k?4JIa=y9fHh*Q&UAJqSMOTL@n!4UsD=qm?QcAXdvza2xrIGCrT9 zDg)PQ4St>gg>tB_mgBfM!I?=v_dc)@mPyn`6i(CXamz@U_8|~QVv_8_DX1dEqu1tV21n{ z5NOPck`@)8yiYh%)*Lf_)Q~9;-OzVvzwYYsRI%jETyX}sb^Gge zY|cwQdqPwDoDyxHQ?ef_r(`CQ6ig&Fmu6$PSyi}2t%_l)sdHv2q&y_K{E?#QkupoH znLXW8l-X2;6~G-bOE@HE=dV)x88OnqCx6oaqM7Asd|=Gy|IC;cbH`p9*Qd5mCjmXO zSu2*0!8442M4d>I0i;2<*#lV&W_Z3mfWuhxEj-j!0B%8`C0W3p$WTCxx*}_VF#&{V zZa(k>+%cn-v5tg0k6Ph&UE91B`$wIJtt?~q+?b4sE3`LlKXOHS7A(2ehmE-_neY?> zJjZr~Cm-i5b9k~1cyc&AISihhG(0)5OJ1e8gzewo11U}A%r5I1r1bn4!#nB{C}WNc z{i4klV{plo!ByEAu5y4Y%HfJ4T-{=7F^8*|!BvcK;ovE2F@r0T_dmeZW?3D6yG}4I zs=UMPnD{ZbrCP7yn>0n$WUAotMR#OBUY4 zIvR7mtckAz4)alMBl!6Y{Cor-J+a&{II?!9gTb$sDoJ+$hiz#U`Mg!#ssK8OW_PFi za1VhI(Ag$3P-AzEkYlMMZlXc~Rox{zT3&jo^)iMEAq$nXa1eY?`T)={=E1s~c`!CQ zNZ-6JN~&gUfiH7%8%W|X;q0KYV>`9Aoy z(eiEk+#3Dga%+=J`f2L1 zt}URBB3hbD9nv>1y|h1sx5guUZX?j>N`Nm4ntKvSKZs530T;EUC%o~;u(61Eo-y35 zvKT`rE^81UNT=p0S|}TZWREib02@)!WZqxP^gy@)Y!@qd^zZ_XKXzV*d>tm8uo!0z zb$>N}Q=Q})*YxX{9kg&n<@DpX4Sjy}Z3|cF&+q$Ae}k4@bkfNemz;Cl>F3ioKcz~d zTAFxy!$qr3l)ZVb_xyun%T`T2QUB(SfB&UFOLJaZdBfA!uBoWof^@=W$ADks2VT6f zZF)>DOL%}kQ<@tE42C@|9S6!(W)75TuJEH-XguIBU|N0|b7q?J9ZvX00k;UdwGExo zoER{X!?|308UvQJD7NBOG^)A$DLY+FqUkCP*Ks<{a~<4~PSa{E-7}MMBA;6W{K#{0 z!!mPmhEDmri8!ZMN+;slO|i*{a&$S$=32(m>kl!{MtTC3U{l*vo6I$L%Dtth?HoS$ z=9D_;-VQT)he?S;jNch<@)p@mKL#FOADu^lca&tQ%0U`L1v!Ku+jfkp6wwkFhn}hW;;Tnt=`Z_ z3Eq9k9=LRfL>nSC#&|zIqsOpqC(K$1IITIpa`?6r7A&SEichAJzgu|K;;XaDf~yvL zJu=6x*K@@iBlc}V40~Kxy)pdzYx^%Mrh?yQf8+{XwORn#y=E6v=EJesKR`!*l1+{~ zO45VLtRt)woFgwhEr>fUesMY>*BRIOnfW-r{)brwa0KOTdkBJPB<3WvfEDYW+5pPS07wn9 z!3ZE^0x>PzEQ@?;}}-SH3sOhXBFTy9zFs%Y>dv63l22`7@KF? z4bMA`md4gkHfHDPdud7A{5(0kRU0@ob9^4Xm3GSr4KuYkVaS5G_qOoi8TmjZg&2Sf;WQS-f&EorvTCCA_xHshm_u&O?|hU* z7dz9!C`TZ34(_ua#hzbSMo#Aab!@t6`Z0$1w0ZVqBVMZk9=6jHlz8UV9C?}D_VB_i zrniz=1#E5#hRM8fIKZY{%zPDK$5;>`RI?P-mo8}m1=3b&8qIjRggGv74jn^1LpP2U z=cJ2IZ<&7XVErd7+q!#m<=Beq(dC=(WY65O(@`$FQIhI#(vj5h@V_LBBukbjA0md$ zWpznoNHM7)jhMqLLeC{Pfzc7wA7|PUXU0^++lF{5o>heK#x3cmXqNzxB|LuZ4z#eO zO9Ir_K)d9~^5+(LyVbH$ISMZ=(8`N&*h4-CI@G91)#)LLTzMWkINHH@E;C~Zs6^N8 z96adBwH=}f>s}l(c;~uyR<&4Y#1^Jvd3C(H6=1caF1zrcx?6zX-heKh{sd0lN?O`XtC}R&@usQgEG!ORE3SwiUrdW+h z*j-iZAVx0k9K9A?dD*Rc2h3?FP~j5Op$hp{ou2(`4mU|=`fVxa;Zs} z!sZK{C`_^3dc^OaXz68qKQ8`G=Yh=c;x#NqGBVZg?E1jAw6(*Oq}ma_SPC=*2qzlo@)GmvGV4B$LiDBMfC zvwI#**tR87HhxdRyk|-ZlN{d9Z{Wb-|fuF&(f8WSgHokKL%hXT23TJCltOu-N1AjHk1^GVNKZ?P*-*6VBS6!e;(# zqNUf-(sTIpT+JVx13|762C%;PD50vAfnj0m$55@!Boc`e2AjRJln>nz@-t`cjDzcx zw!aSSiE*j6>rlLo(SAR!Q@=f2XZXQ=;47k zSxQ+Xb|Rd>*|%)!b&r_ZeUB!{@i-PXX@P9OTz+>JO~=W3p?I=;KECUY9RgAuCnJHV zFJe<}G{s`t!}W>!5*$=dG6&Tg^VEVFgJ4NNI{d1uuAj` zIJM7;M6mJn{=e$4(-(H0ywkm#7JIY4@cOqc&VskLc~>gRXIWnAf7*t8JX^e4n#Z0? z6ed?0v(czKVbXZglI4#BFm^Tzil1V~gy*&F+N48UGNv)-BY*rc-B zhScCg2Q-Jq zvSs+<#W`rCxEVG6ya-lzj=fi&@W|4x8E0O#khXAeZs z3c{xksM&n_x2EJI+C756#iwo``4I#-X#^uL5o02EHWnwk4JXLK9(wdQpHG1LKd|l3 zPsdz8zxy$3M=x0L)?4}q!bP-HT=w#am-ZIse;LZ2K4k=ZPQihzS@>VT=P5|*446)8 z4^zfN73?t}>1K~z_Eh2Qp8pGIawbTJ2d2{l7lf@0n2rx!IB>N&e_lj*to;}~SjF-j zhe)fgF=o&=D>-OvGQg7l>?)vF410p{THtCiW_dCI>;5i)IPxtwj`bR9WjtBk6F;l2 zg0J1XCqo3ix2?>d`vC)LUl(HVOAe^XC$I6?pQ2FDqt$*w*alJRsexa{a8RDZXO1IBTTm<+C^w6-M;Z|~fy8&_hskYHN2*DH z>>}-?glr|7q#AMYW3OhSNUtEK1?Vxqknb!)(E5;O3lOt;Xt8Bn8zv(hH=*6$LQI-) zQBR@VF6VqBV&0!zBLo8hBhh1MspMU zxPOOzg&W@a{Mk;wTQd>Wrez zDMqFHPv!>(%^UW|7~q@)QYw9XBNHSz1-?;_P&=7JFH+b(40-#p({mPn67Zbk+xbX7 zBfwtr%Qyz?7sTDY5iZg5Z194ty;`@91Apo9On^vi=00^fVWcHSB8aaqMq6k~sP} z4sqPz_{GV`S;FPS)yDOKJBoW6_az<2jic1u~DHSQrPyh&@a(HVjyOa zXK>Dt&oIL9oKcw31LFb{Et6BGU1lz3m&~oqmsyBd%&|CO@y$}hGR^XiRf^RBH$19F+oCKUqoFbei zIh}Fla<+0V1HxI(`<(Z=*tl$P+2g9_+UI8Db`Jo~rIPLd0RR91>HtOn1poj500062 z0RRF3761SN00CwI0001Z+Fer1PQpMGJp-sAq9!V?yK;r4AjSj}qZl+Mgar~==%$4t zhR2vvG_F`0zsjwAfFEJ}3C~Q2XD4&#-1nI~1&|go2&ivSMz#pYHHvTodo#SwbDv(3 zNK!jT+F-$`JA-5NGz^X}-~`tcuOWj6gV&kyVeken@M&-oDNGFBWY-^qQ#i!0!CQC~ zI|gr~F1iNq;y`>E{7pQ~oxl-)3Y{m!Voh9?dnnPMDxaA$n zsPgA3-c%0Gn6>~ba&K@IbRoY*9S~%6d6Eezk75OTK`J4O$e8Oyf z0001Z+HKG`Y*TR%$MNqaj-5EY_k`Zdd(UT@1hq4)4##Z~5p>y3^^1;%|P}QpNxB2_#CQiN-<g4bfgoV=|We!(VZUjq!+#ELtiv>3>-Ld zQ9>zYl+%y?3}7IG7|amdco<3rUVIE=I3pOzC`L1ev5aFp6PU;(CNqVpOk+ATn8_?= zGl#j%V?LE);Rr3Xa-DsAU=zF8&VG(@RASi1J2tXctYTxQ#Il83yq7o*aGam~;1`EE z$$jo}iUllWcjQwKspcLJc*-Ll^MsGo@PcPN=QMu4vxnEbu$n6zVjXK)&j!BmjjNpDEZ2C$Td_;LBuJtpNwTC! zs-#J}WJsoDNjB#=&m}H%n+sgz4x2e4Io#l;Q73>aS~V4CuCSeNfZ1 z3RAbHRJF%YI+QMDd7DOLP^l{or9y(Z`nhpMY8n*wIb z@R_=N7H>Gz&{lZW9X^kFM?^#EP&)qr3&ftu0001Z+C|Pk62VXy1n@kMBqGQ^Q5i9) z-i(eJ(MlSPQ6?Okino~v#a3%~F zXpI?dz#TCy W{a8|@-gUZL3;+NC0aAPRlmGysvh&yg literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/Roboto-BoldItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-BoldItalic-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..99de61af52d09e4dc1775878f331e6027a78b6f2 GIT binary patch literal 28824 zcmY(JV{oTU(69eVHnweRV>=t$HaE6y+s?+eZQHhOIVaY8?s7SMLNbGZRE#S{Xwsl z@k7MWO5YX$fF$_I5B*^;XGW09(An|Fhx%__^Z&q~#RxaCHMIf&pizEeY5@RXW5ou# zVpDyGpIn&#=7adZAedUZnfz!o0Dwj-08p8Du&loS*I3^O05FaGiDCMI6~sJE|L>3Z zqxJmwgg=nL2Z0v+wQ_X*(GGs*4+H=}N^%4EZY*sKf8xyl{;WIx!xG;KTB?=4>(9ED z%s+Vq{{ajR6mP9>W&EQl{lw(`yEcU?2oGt$%B z0Wm-YL(ntOGto10g&LUljSbu7=$9nO0Hx^%02C2^4gSQtt`#P`S)wYV40We1i{cl0 zNtVh&ndcGZ`V;-Fg&3k(+7nC^{?$wRn;{es>x&<+?2hO}75;Jndv7>s9ngaPLf(@m2csV1Upi6up!1J7|pP{2=b271zyPfjw3tBRA4pJkYEeM z?_fzi4y`ea@g4gXV3i(;7O1M=>mC+cT!#+m<^E?qR0rk$sE+lh?nYZWA^39VXW9W4 z6}w6El~Nfjq0GUjPB6_}!l%YDJ=n-Q6KWgpA}8$1^}m}%mV0Fek(X`5Wasi3{xT`2 zu$uoGpRmd^BdAg+gsW|wBHG%Lf6=x!EX*T;XStz2jn~V%CBzIlb!-kwEO2CaTu6(2 z1;(M%+p`ec%q%~(-hYB~Uk#^gK^*t0?v&f|()1jYw6m@r2=iJb=vy&XNC!8L*8Ij~ z>sRnj={>Xst*~`BZa`kwwuS$Ia@G&@MFbXsz#!BM1f2iB?u*Nu={8)3%|#cKLmXC* zROMg3%Vi7uaFP;I4Vh7dvH@$ctAinej-bu&2s8^S^fT~PMr7r9NvzJkINmzTXL5?3 zS_Hr@ytet`s!zz^=lwIfw2hj<@Wc$ELv~CkI#_oY#Yw_w3*vq@M9q#1 z_^E&B+@rE)oiT8)vczGWF?#6rtzUqz2WHKi_t~7Uffw+6aZ7w$5x|xHk2EWR`|sFQOab=0noQY~}TXul;gXphxExr#To znj{v)<`M_U%#@#s7FUKJTqp4Cwt}9(D`RjQlBVpFyi9a&-dA_;*Pfq-^}Di8ubLap zSM}cojnfyxnsxU3X|5ff(^(rT;Ze(Y%D2O6Ngdw9j{ypQJj zay%i~T{PZM?Y~B@>OcopTcGdLkIHQ_+XvT+vgWtaaZYwNtA{g`E9&bT|5!!2wcHIi z7{WW1sCv)=_ z{e{PiQON{mTgR|1-n2R|Wq7%`sT?PxwmB@wl#U5Jj87B(%_qD2?VK^-)sMKJ*bA>O zJPtWORC>_`@a*5Jdp!7dXY}-`&zA0fLexS(66ybQ@FDRGCV%F~y8v%mPIvYQ!cZ3Au#g6|tED=gBl-?NDAd6a z!)1UtI1@z3x2HIQ!GPIogCHp(4z2+S@lOo)^VEiGEBl2~f|8kA&abh2#(72D8-@ki zrr;L&4+;w$%8B?y2A0w9I4l>uaYX9F2iQnHj5}a@`i#Rf12!)mq^?x*=#E?V{d4IadywU$rdNJlld_q1~@oW5;n`jcF=@2bd;t!hNFk@Q)2{&vP1U8 zyG(OL=5oCQx$2Vn@`R;Ro*{T;N%l@8cl?0{L?e#NyM{lyjFM{b?lQv-<*N<~i(`*u#8pWk4(6NnlvGGM|Bp75t$8tUoknfU5q zf{B13w16Bi_`lM?fcrMU1pvnmf*9R0E--wU!kAR(aSl0(`bi212}lUY2#5kTA8sE;VmU`bT&IVtX@7uR0cPDnH<|cHcaVuA30IDpuIsDLPdKmj>`86XHCG{7Vv z4B!FS2ABdA0V)7vfDkC+e|sct`F0si0fbFhco4sV6#|Yml=`&TA&-BckOPZR21Z;# zhY)6L_Hhw*K=}NALa=Mqn7RK7#4^AL(ZxIkc?pr2EJE{`xwz^brqY-s$KuLdPA#St zk4a+tHo+!YKV`%k>jBRN=jTz84FTX9mL?BlPVZmPU>UQCT;_m79vez{OldAZjfUc{ z*$=4T_YeGgE{q1|`G*Bg$Wy{!M;UgK0oRz5ZR7B7{)@W?^z;R(MSH*biWI(@nUBrK zYZ~KbI|7&xUq_kw6)miOrGG6DF5cUrNXCvA)v~6h)_sv6_XdqRDl0ZlysY(V*VUis z1yD`;fv@zdxWVAT!IPoPHIt0dz#={wln+24Cg4DE9Bzz$74W}`Aq{niOK|N6H-Zht zE%)TI%GQ5#XGmMw;P$W|AqVY$DPgnJFoF9w7dhIw~l8R3XnNfVNf^Kn0R&7EnTYsQq>nrXl?V&Em4Ng2k0wkY zzu#P);EFyPTOn-0KiDwcuj7qpW^Tc?@0jgl(evYCYSHU0hv}DWyBLgQ%yIe)yFTt< zLq{ngGN}fp!u|d20VDaB8m7To+wl<`IB53Fxv)uaF2!HN`%k`|RC@4{#^Xs6_8D~& zsn7j}hOc~#H*r!RT1yZ}pBolwD3ObjT3TeDIRirqz|eGq>~RnB;wyv^MO_UtC=ZW} zHfIShB^6AMqhJbjjc?7)R_h)>`&D$YzcL0MB=V&9+)0O$$s%_GV;Tz8WgM68*#}I6 z9oYxQI({P&ng!Qr{rXA}d4rr^L2&_#9KhVc+AARWny4qaq0xt9IPkDO@4cnJ*1b`2 zjyW;>H8ip@A|{1WwAz0cnO!Yw=l+KVjX0i?s+P;09esc2)~+sl;ThwEdbL8PjjQh3 zII}_6zzXR!h%f$S>!PHfS-eeWR4)O_J&k$S36z~LzOz4V49CJnOi*ENos@wR6DOVl zvfhO((@qAxcv)9TSjUWg-(_)E6>I9Cay!<*c>2q008~{?G*$m)8}J#C%4ou!a7X;P zd7B6aX%qQOKTyrIJpQ!js$~-CRX2|v@d&|<+mPIs3kU#Xs%qk<0vF@)a^AoU%wZSx zaTIt%cHD#SLakcy+FM8UU@f%q`m5+ZPmQXZ^->(Z$@aW)w=>DZ=JURD7BX@f6B|A} z!ZXJB;U@L^f@n3w+7(-I`obi10U;?Bmk}tKfVCA&49A{9gur!# z%tO5M0w~+gwd3s#ubtC6=863z3ulS-pZE5>n6*w>m{@eEl3~K1?9cdqvUQ&KW@nG5 z#@==j44HNF3z?6d2BrhsrQ-72@cl;iY=4W_#Y0ta;fT$+M9Wc>ju@9IlKY0qD_fNi zb93S~9}+7Em1PsQ+Tazt{eoESBgq8u(JL|3m+=JK%CMy~xj>RO zF}ARCiLl=@8n_h*PI<%7bTk7J{$UUt>4_fb6B+IugWUGF&@ssg z?c`B>YnE3!Tkp1Zr>Y98S;p|ZvQ4_}M=Rl>N3M45?~*$Y)9|0tB}h6(6Op(rq!W(xKE?`iXtGk=Vz2)^MiHdVBBmSFXfq5wO{NBTHM-H zwLjEhJlEgD>ByT_nOS98)};&5Mkz)O=+nMkI;H63eT^P+O~w^1F_@H>7PXp{#pN(w zmc8e+=&dPtdw%R#dtWAkV7JDIN@XpwR$8oG9P~mLYrB0TV;5Qekc=@;ap;)7PNI+>d!K~b-SlFVUq@_FNi@h|~yF`*7Zy_3^sVsd-z-*0fn>(ut zfip6axZvt+BP`I_&)Veq{-_{$+5Dbsp~ePCX;3vJ%?6@nFzpMC|;*EDlxrS~Hz!`V3mq zlqBC>yQ9zvtMzW0~C;emUS!kTe|qm(r9uhdmdCGeuy3Jr3f9y496FbTJot2;_0;7nS5i9 zk>VL!eWTYyesUklIKwo9I(CEdB1*9<%92<#rK}Mi8fpZ|%9pnR5xy34ItYGtxBLMYc zq#iJ!3`SWf7G(qtb{|EMLn}TMS>jKl%pufqDcyt}KBaU`3Q<=eLF)J#E^reJ9*%{^_7a^M_Xu>q|lv?E! zU1bWf;ufn?nc^(Lk-RE$>{4}0m@s$(?oK?ug*HY5OC6j*DVYGqpXen&Zz)6%g2Z;i z|5{y%=~B=UTGJ6kc>;Yxj_m^!g6zKCgaFKc&Tx}2ado+~oVo2J6P*1*uMdhZ&677k zDTs+XSg44EIAdD}UP9_m#DGfMNjn8B(O7?rmX8_I4Pg}3-YulIX5W+rzNI*Jj*xP< z^Bs@lh}};cc^qf7lkiogW^@Kq21;Vyi6JwM%?M<==rPh&E48HR?ud$aQofUel;e4scGN?o@3zU`UXI=L~ zE2WgW{mv9!(%tAFCe~lYDou2$ZUTB#83g%aOq5DGs$ELi$~ww#V*4$cS00&}?-`m` zK1!{TcU+JFaeT0JhzS_7!E!OC!9HLQi2nK`cX5CU>idbvJ(m$&Phk;KTlTgGnKqV9 z!ml#K6&*M=&VF4Id0}7~q!RrUDtyWu?@8U;xmv!7gKD|M322OR?7!!x@oV01zIChx z6?oPUtSBIzqW}*jl%xp#7$ba;wIG7Bgvl+ElXX{rBj%Fd2Z1kbcGkdB;w6KWkOOi#RJ~?x`V-pzhF5}rykZ8GG=WVv^RCBx=$4j3 zY$Z(ew(G2{7ZpvFCabbDX{^ z+>Tdqm}1)JO*D`RBB-!?FVJcN9ow-+%=e=H*9LjaEV^f;lv4u_@U=J_Giq1&4Sn(vI zr}Vpj^|^Fbl(j$-Pgs(c{6N)J@PwU4n{lifn36o3y8!KOCD{3THDVttzdz*u(Iq;4NI6z-k*~#^8~Nxlx(8f0rcZjx-`T<}7Mx9d@~BF0 zyk6$;ty(oWK6}#VzgtV&SEm-gmE$DLA&PN6-gpkzV=&6zRe-dQFdf|R0CU8?hBMM)9*(-R||#d8u}Lkc9z-WU+>U7Zm97jB526d-%!JsB&@YzX*> zFCxoDr!I9Ep8V0dJ}@SNiI-&~L0OcV~sXIQV`zEgr)oE#r2*pEP1Ve@`^nYOP5T?bmTnBN%bd6mPX- zi(s^6n|OQ-1hxjbFo8JU#7N4J+YScqaa8%8_NVDjqC77(=l#|3+m$+>OL}qgimvzB z^kSEVDy{Z~nH;aT$GWLey6Ifz=y97?f{6^rwYlkfVB9cQrq#Y^B?&g&Nft2q| z*6W2j{Nk{O%=YV92z~|c;e?@TR#P^uruPS>C~o~7$yf$o$YX;=hg-l<26x3!ba|=& zB8H@wvs#oqTP$1ud$yvRqG*L^(ciLcWZ@ldIQfKwGoZ!wr>Lju)NYwTFARvNokvL{UDj zhXKJtYH?jy4(C9!%NJ9~)Sq8=_vG4Y_bUIMZ`;e(5(e%bRRPqx@?G+H!8y<4Ef2~W z8rZCS4X@$_lGIvQFCYqK4uT>y5CcDH0tQ+N!iQJ>R8!m30i{;=V5Dl06$U_#5&5?QbFy@% z&Gs%t>IpvPLKw>w@ODGdMey$rD}}Z~@Non%y&^BjioQzG@3*kI`2o#3IwSW%TKh>R zuC*v4eNHEARu&u|VH!o%9n0>OkKq+;n;B8%95~Cry($Nq=Mu8?&V)P&Qkl&p_Dq{0 zT5A<-BOVO}jV{o2lgN6%7QSt`o1i?KX7`k(2G*Aqj)^-8XnRp}$ul(8=_VtcQy94Y z@ytq1!nR3+o9+aE2+g^3pmZQipz8CYjv5~X^->UzM{=CV{kUqQncv~-QTx;O!T#r0c8**G4( zhKSYmnGLOqo{Ic9>Q*>h)_}jm7MFnZSW{BafUD}bzytjgBnC@QcL*a&TYlEB=m!>n zd@Ll}cWwq6gZ%C9^o#%1ZC7>-6SW9rGqX%Ny&;~KynGa9uB}7()pNN;`0c6OB-_K& z9_eAY8n+hg=}NC_cfvj;=r0^ zPQj}}w#N9FQ9~N}i?OliB&!IMsjPeLYC(fA_U-S!xm!*mPe*)pnJQ%yif1YpN@@!U zo3e;_kajvVJnB&HSD6K?bK%!_+?!3X8ePsCDXzLVe~lVE`Dp~Xe?<0Ol+d5O$B7(x zfBXHjh}^MqEi9ps&cy8r;{P#RxD>o09_||yUOFrA4>5=NKlDq@Rau&juRsZ2%Hwjo zkaI7Cb{T;<;3nAbCZw?kEuc8Xw`>61x#g||4>0+^iE)y`okvs@b5N+Z^;3jRb8tpE ziO!V@QRA?PO4>G>J{1C_$!^B2Bvab4<=Xd?*831#+;R!qKkHb8(0fPx?5$YrkV4&f zbkOg*htFqQXBL7=%q+~{l)*v5*u#Hqh4?#>h;6-mWDXYeIP8`&orQ<;R(Re_qd(Df zPZ-;RMytXtbl6T5%4oY8Rnmx27i%k^^PEPX#Sh}{|AEEtt~(DrjMKAl8>hagg{BVY z9K&wF7%;D~c^AN6VB|AZNcy`O?nghEMDaCfyyGK>AQGpltyH#3`0h)^i=79nZgZV* zUhns?e5b7dAD^Eh4g$$--ofG^+FJxVkvfq=I@>FOIS#U*ZRcwsb{h3Uv>)ltW1W)p zOUWP?pXy9YM;7=5`Nls}2}C`8IiM1&{WwEXC4#0!IjL4s#=WGYUvNy5Ez?;z)O8hc ztoZxIeIGI~ONvSi(`>s%rVJXoF-f#h+Gii=upbf^x?LsC!W!k73Vak9otIT%uwaC3 zTcJaFM60(z3Trv_zNH8uY;Slb>mXF$T4-+gfGA2}qI~D^= zBQ-pJtWNjItX@K`-JWy`l)~HHLg?%^RePeUOXtVS*3(K^7-}qyr4%uZeqx%>5_-7o+`U)3{bwOe1(qzC3tlVimGB_#rzME;0=W72oK^p zhbycS^7%TmUk4GbKsE`RPdWKA&ok`)9q7nZFWGFZ4`EtbmTueMn5> z*A#J_tvP$=20Na`t@wI2*fpM9ODs-INs@)0b;r+<$~$g1I%{L4qJy8pU1likMLF9& z_kH>?j{myapWo<(a+T-l9Mcvf<`JN z8C)uA1&INe$e|Ifiy;&DEVx5Vr+>MCI60iWS;aGaaHw+@DX79o9xV}Z6v zkmg5W@@%2Q#^aDaq)L!xe3CH1ZW)ZUY|#6_sZLCDZN^%nUo~Em$rpBic;6JK6-hjd z!wHGh6W8}EcAq+d=B|#bTkacMum0FgEImjOR8-a!p9dwk1BZjkdCKDb^k+@t)sqQS zEP?Bkm}hWLXuAJ75bvKf5k*PWz$iwGq!X_-r`|QfRu+Xvb@MwR4v7 z(e`0!Z~6X;{Jp1BXg6-R#>prT)hAh^Qxt4WT5;?@YA)%9khSnmB&>mDZc>W+H`8wu<2QaDd%q@dPQ&07xiA<7OafDo#Qvtv*S?TZ!Zs9HQ~{b^I_GWZA&fTzN7T3@+oo94C%PqQ86J?zkd?)n1})&Ab^`_IF`}Hf zvLd8!BQcq6$?(4c9K*B%AmUdE`F0o7h_3-4e}Rv9k{D8%v8as!AMxoT+{t z)90?@`W^WS8nm44W#1Ts!7b91sk%jbRQa3dc9G_N8T0vy^UA)ts^n>a?w8r-ha~Z> z11sASUJ{1pva5omg`b&YT6=+HaSr!YV4I$(vB$T{Z^yatvAO6n)|t`>AVY4rkJYHT zrR-h|ZxV{aJuxW8KK{1f1=zW;;p}~^SmcsDR+3U>1I-eZ`7^ImX{Mh+j=qTH!wIzHxr502c2#FrkgMT#y$Rnd>@HM6k*1DG zd@q8E?yNGzX;P8pA>-vZ-t-7v{Z}eORu-B0y>9sXq8j_f z=dr45hC1)uuy5qG&iNj1Ey(Ey#%4xJ(XO0igUMueEM8ruJ2_IK9vQn@W<+jxRC;Ju zEaBQ7OQ`yK4oPKnw-qCc<>XEVR0jVl=Glih)J=pbqr&h%L)%mZNGD(c0s{}JA!Q-H zXL7E}H6!Nh4qpx=H<`70j~z_e6us+chkjqxzqLia#;o{Ow$QRw*wQy@5U-Q)pLA>Y zrbsFt#b>_B<#MX!Ct-?GVXow}xDASAvcr#H=%I&V#gzdAq2vfg+~Ro_uC<|- zu)Ht?fI()h&|lt?ABDiHFG~h-6GUBKvWZ?1LdF4g@VWfxyE^Rb{P`oD67iGIA*=`55!g!8On1sM|;d_s9+pU>fA zDo=(_(m}*Rr!fzzHAJV|G3Q?B*-og1r+Zpkq47zoqh;Fy3Rnc6cPA95NMUL9F_PU|5&CVSOCL)6eGhJP_WIB@2K zwhh-od`Rt2Rh&)kE8T=K&&E;Sn_2o#4yv(3M1+A#VjnFfJ!Gt)?goro?<41Ix$d}O zv9UL(#;v#va`B7Q4*7albjcDih@%g_MCDG4EH^6T(}9CA|7ypYskuI$!{{Ap4Z-2Y zmZY3C!db=P{+@Xi5T3i-YW*I}5<6yt)9(8Udh@^05{W{P$BzYB?%7Hqqe)(%f8xbD zYc}QS-Ad_VGf&P^nLv^%lHC>b`3w(65EyXnuM!vX<+*xnDAbRB;c?Oa z*?91q?UsW2q)XemBz)g&U*7NdU71&Macu>6R_9(R?rd0<`< zk1>OLvc?WGQ8E=6vU;ilEQLU;yeUvDP6R^QKe?K0Ca_pkiQ4Gv!WllE?^^kF8IOqT z5ixjdt2HB2XcX;2wfjeIs|g736ZQB3T-2omT0NSbW8S^P(lhGB2RrLPTd--;){~Dm zFhJ^7=1d|Wt@R3^L=|jt5qQOV-%rk>nSCqLC$94L%EWXg7jg*~v5@3uS@A6;DOG$d zjQ_~cPWV0oX|$*eBt}n_l3GwM^z+!qu_`#bkH0bS)b9`UviZDzR6v_!C9f1mtGU}l zUK6=b+f_=I3F&_08-|I>beLZ$H0Mz$4&{i33ob5yZ*ks0qzK}#0BFh&Z&!5xodOp4xG z6#foW1$A0PSpWilI`V%-VIfH;hbBb&xGP0X2=h~Zf*w%jyj^j^>lK>#LLR1JLH2v3 z+J5wGv4_&1Ec_;dJwLK3iGUaknoO#c)z;*!?QGZQ1S&8P7>8{5$_N2Mln8%v_ zRIV)l6A_b)KYpfEmuPNzyg{f6EKRjI0~MUe(sX|HvX!H2t^m}Bb0Tcv7A8SX({uxp z1=IBDkEWp}yidJqj?ztAd4Dc&6>-B^LIye%3+XiCrIuxSgKm8Hl@{LV|3S!dr4Z)W z>`vL(DYC$qKZ^pIp`Sn z@04dyi(EzP?GPKp``nyeaNn0GBxF!bhR5wfk==FX`&6mJh*9!@s~>xKaSTs31q(Rq zru%E{agl&@Y2FlgVcuOT2>_;^jCQNQn-peZ(~?QZIe6@cy9t zsh<}OgS=_31`ISei}}ab`RwxG*=GW|FEPNAYNc$u0gX@M!2ih3n0!x#t|me%0% zz8nz##9omU70v4!dYkF(Ca_WIImF3@U0SS|9v?%%su1O`8rL&Et0)&Eo~-?~+ZxvA z@O#>0`pAwcXxQMI9#q^+X9Rqv0Ryted{9QAYkrY`4EyB?WIYJcSECk9P_C%vxF4r7 zGNBcvgHe!&zhZYE*6YNNbH5I^=){+@tt2G4XoQENRSz6-_7SQMWZ%o&h|g+%b&<-` z)UOPWK2W;p1nTBPQ3n>zMrC~D!5kB_n328hy5Z{Av3KsK^0`I&#fmU+yE&f+5qmhx zQbM@9@hy2Cy9=Jrg`Nw?84D^b{7R0QL z4`%qBh^4wt%uG`$2NR9qh-DNlaCvLdG`tJ^7nuH)tH)+I4Kl4 zS_Xt<+XE|k63NG`R5>~Z46kIu$KcmT>vZYNk0tp>xDIP8ibHf4d|92|S}jAHGfPI4 zwU6Yh=flhk?nERC4z_F;Kmfrml&4&J2PhR*^r8C`zDSKlN3%68z2$iV>|pCVr!xZk z0GY=}#(4PJdb)rtXeOchby7BdTxBk#W;E4N1TpF1gSg=OD-x6U65Zcz)>=HZ+W$Q5 zg@I~Or^E1+ozSJe-FKAKZcCt!<&^%;0s6Lym;G04}u0Q0zSqs5>4$`3ZCwtRb=UQ%#&Pp zO|VK?)+A)r+@oWdkVH?%jE~*bLf#2VyB#ihM&qu$LFGPpF7t=ThlP%9T4#CMoV4cq zbZrXG*&j0NBH{Zu5th5h4>m?Ng8k^iW)V~~1wB?+sSwgd(*m9XTp%&JpkBJ|w%cE~ z3UE8cCj%V)+?|yDRP6UzK%d<-MzbD?a~RXI-0+Cq(qVAqFmq94-@=-pBo?6XS;7hM z9q$?T5*ecSHPEueejf{&JJ)3_AP4+=?1Pk@vT(Y)?f$eLdrRN1ZjwFaKASMu(x=jz z>u8ZsC+q0$uo}oG*Je7$cqTlQ(=5+3JRA#`R&9;RUP05D-4yX6%{|qMD%z&!z!ztd zQ`Z$|6zL7YI$*#Ak8k8^yPocMlfpE|w(`C1QSPJIr{9X7O=iW6@E!8*fV)fH!6j+L z|2u)8n5`Qa9pB45AkU~`V8`(yuHfh1l@ce}xgakJQZnw4f|Od$8`=JIt>t32=Ek!7 zImR_sg-UB0wXjk6(pP4fg^Y|cW;Qig# z=qi(B^}@xK^)$?k`t*U}RN;b(Tq$o6HZPyG^DvP4w)s!m?0$4(v>$f2dQ;|eW6z~cX7IV$yiNB#eV8 zC))cShXYODn$zYIpsvqZ-qMHA=y&`Y2HL_5k2|f!(6OQTJ!qlizYmYjX&|XljiGF+0dhsaK=J)% z`0Y`6MHCSOv8SnW-Z2obhOP<1oCex-h|z^>dD34dl3ZcOa&9&9_eg4LEGEoGO06(# z2RprTj^-~|kQ$gtxz0=(LMazfNf#uzf>15NXi;S>4709rF5xfiN_)nYg$tSzHC^u~ ziGXfqH6kKH6)mZ;-wYX^`w7%#*T5Lt)z@wNCz@nIqx%=r@3Q!-a_1J23l38t>(e>x zX_>4~54TjhA27B;Fn{&*fYWf$MvrP17Qm!6Zq4TkB4-K~r^VQZ!byMb{GjqZ`b4!f)e|3Guj40%71Wd2yN$sNdIBd8|^!w)6 zgh|SrVIqzq3r9j`W}0;`scGA^m5}E;*j-edk>;0 zG8?^j)CDn8MgJREoALw6ED&t^I&$W{2;YK^&im;#KJ7m9Fq%cOD5@r@-5=S6$IisA zwK4z|+o$%ZMfI?2K-<_Q=ktv6nOW#s`}VB0o1&Ym_PAE{l!L5~nR+|UEA3;U2TNFL z9b*xWa%bwH#@qppP&4oCFgB1OgVKc9+&hFBd3k?qJMns8`x?LUZrq`!>t5gMaH29& z2_uQ;FE_A!tKj=e(=}|Fk%L-3MGDF_I6Mfhc{Y=O5p_?w60lhLuYB#npADI&n^-wP zn*Xj;0DLqZJ!d8du2^tn$hpzsht<~m>hW}>+4Jlg03`b%R|y`pDR>}%SqT>4^^VZYl)BjT2<9b>#oCw!|E{~ zX~1L*`SHzjK!yf{eMHAN=GzfiT9Zq_K8BV?#7rB6mBhk-eqKD;lhF}~3DU)^{t-a(c>Oor6sX@v zBGohskBP#t+pw=0v~ofoPV19c-nxyG*~2FuY9G3$Njwd;xRdbRgFuOqH0Jz}SeE4_ zVb-b}Bc@vz)xakYMA0u1wP^nIY0uw`%%A66LnM2BDYvX3!jxycg9&j)$*k{l6k3ZI zJnxy7i)T5P)JHT&>NdQ(2P;$ee!v`n@ zDO7aXlb^ow>M9_aU7+z(TE%!V;yRz0i zt0Ktn9~(D$--x@A-qkJwN5^|Vjn>ZbibGSG;_`TmmOY-_N?i%^j=!8q={x;seh55Q zBU2WUZ2|LxAq1MC$ABdKHnPB+SePzJi>%?PbU;H&U$8BB1Z4#m?%oV+vKJ3T!qSm- zb!y0vD_Vm~2x*yz;lUeTVNF@rx&qZ6=5xG=B#+7VvN#5@@J6*|-(*@(=Ii+TRGmW8 zlE0t4Y>v0$$#biu%xAkCHKseg#;&a4JjbNSrsIR^dv;LCw!z>vUQ0cnP}QOT=RI^* zJEi`(;{{j0x#K3wUaV1863Ag9&q>r!$una)ek`A9S<*S&j?}aw1pUMplNl05%`kPN zrvi-l{1@%Wdek6x7<;iw*nktzqfc1jV!>*DhsyhOv)J(<5l5OMRTI2<5~$s=uh6y{vX#A1p`DG@-4B(7wltxjXI z(3}OQS)7}bhEvtMF&VAreU{av-;rV6zbtAQmc1rn)_iI*aFg2}S)Yw^+fEp~`#{H} z`8FZJ+59m)<=lDbiarhg;zMA@)ehgq%^a z)~i6A9^-PWPEXkj`LUFwn^;=`7fKO`G9bz|yyhwI?TI0AzoRwb)10mRBJMnL#E477 z7*0elzA5)$gqxv@*&G{qYoDCJ`a2Q5|UFUbyMNAjAWN?>oqT1vCDZvqw9b> z!E5UBx{ykX+LT$9y*%l;%T@c;kJ~1vwzQ?Op>0r4+tJIZzPYYefW+c>3YSM;;(nwu zhOV`a*L(6#xBF$4^){77G%B6rpVQ( z4;P}DxaS9Yhx&e)1neOMRmL4drJU>ppLT@Z6+02Du5WBlhU;#(E9{rzJDaxS-{zZ= z8rYlM6f^xRg<#SyidQJico$NBec*G@mCe!9o3H+FZ=N5nr~vUa3i`Ket3;|d^L z?qf>6Nf;}i6M2oBWt9Ia8|J$Do_jn4+Q)UhzV}I$fFW9F<0{=RIl;h$Ktw~B zIjsq4g*HxCC{t(yw>3Fy;z4IwZ$pymIjmYR=M~gYVTcm7(-vnT-e7feVnCBI_=DHI zsXNXoFjqAvrWx*LZ9SKtwZY$Q6!^zEu6FH&6Ha)ppsuC%G_QMo*JAFrGuOsPk7$XQ z#Z~9saEY?LWp0DJu`oK~{F5SeofWG_Q@t>Q*~GoUJq4U>RZo=1i8Gk_J%ey$_EX9` z*F4pmy< z2Wx2Eyj)jKQ_$US9%#z68?3Ae0{evUy0v@tzZTK)I$OHFE)3%hcopE&(oL}rcpb-U z3f{|ugAK_P{PVnl$|UnturkyTuB*=`rA>t}QqvG_ig#p3^E{R~cET(F5fLvUflQ|i zbX>f;U^VUgkS!*MyNglKJw#6heFPQsttjYU<^=uXJFg^$D{gWYjB9VIFdD$Bv6}>g zPH&2hzpiD<`tIWmrp}|zJd(WkFXd)mb$;i9($Z5VwbhMu+dOkZt&>M7Pm*^>B&H;2 zAE;u%#C<7%56P4=np!sYWpDE8{ZK!XVn6$&UobuJ0M+p>?tab&dIR|;$>j41aJ9xg zT6IOH-$WrYIJK4rvm4DJd~l|3URH`#IVUb}&&kWq^%Oq2dc}qZO)ZUeGmXZ^NW(ln zefAyquld=n_ng``c0x;X!W5LzMgZ}QPzvj?GB;;Xvf9PLWX0Z`20+DzSS-a0$g{k~ z04OPz66je`K?kq&YI`Hy4y?XPoAo+OB)QZ?k}KQqsJmg`Zz-!<05e;3fI7^!w85;W ziBY{;+-AjbE-f0eu`h`p3qY>#WAEVR3okK#uzKL7%06Qs+Dx1AFuk(;gL-8-6IIzp4#7~aMWHvK zyRVGQ9NA#dm)19oG;Cw-&;9&@OFWJ2VD}?P#1D?SvrEMx;I-b%@n+mBeioH-JQ;rD$iAQjLv<())sv)eH^k6SV z8UxL1gT>~$_v9w5LU7=aXV{ktBCU}L24lSQgaU*9g!rfeU8u3XLwu37Z@uX9+uWwP z-5o9F!e?&0_#W-%16Ks56UVA&e+TKp)P#Lp4b^VGkeI4BWA&IccTOaYy5kXWAGUU*Q$};{|@aa9w zJ8Cu$PfXEs0=jI>)o6;x+xK3XIZk`7YF>@%0YZ~a zGG`MxM);I8=fNC3Pp@vgrr>gA`sdG@bSGLf|rOrYN?`aN< z(}|hzY(O=4_~E~Qn91si9zS;r+r|o9C)H=v4)T3@jRIxX+^6X?j5cp+CdJKc8{uL5VE=X~ zNivNQ85#c1>jSdK_wwSSD&?0aRDio6DBax&&EYn(J|Bak&TeILsf(1S@}} zOKhLF{Eb_$xuwKg{^w2C-14%cecIU7cCWpC>i83FJbT7nso&pl#@*8IZdi2eiF3|a zxMKAzv;kJYX?Frn(_>fA1mWUN08IeWv>Pj_yZY%ZEa8+VU&-BWBG-}Ym2*V1a!&3J z^&HRq8vO1g;3AGOFm5pIKCNSkGoI2@TjzD@vG=nN>F-fXdg58?I|4lcnn@R+R-b=0-N4!EE- zB(EJXK@vk@Db|E-4JDmgvy=e8g`?c&OppZabFB*|Lf&{Hr294P@LQ}h;iX2hm;CAY zqsopd{!{TytP~f`y|jrh8&tWofA0p?!z#MFuDRxy!a>ePeZQyZ*>!avDw$1MO?3sD zK->n0zY@2>VeFH6wD1l;?f>8bR61w_s`59Wdf*0V?JNy8pgkvJPYRT$0d&A#4ru^8 z0c;f{R%WV2h9SaXZSGT)8nr4lKKn5PJcur>Lf2iLK+>~c{w`Cmc?>`z@agfE}`Z-B(#>9&!=q3p|T_;HBQpQ8Kd8N${KduYFM7Pl7g z0`~ukGpS-sZWR^uVaU#A=u6prxG^?ouJ0Ek7uhI3x@i_Mq;}~Fs6D$f(Py;^7SOuR zFP}Fu;UHzDt1g|o%H|q1r{i>>1zV+j_QK0=upaE$Rdm@Or-zr$yY`Y}+OL?s?AV2K z#Ajdqiu?S)0gyYjPn52L_KC6qZyl~v6xd2?C$(l#4(-TMibeczxQ0<29s?TMHR?t6 zU{HSVDE0$-N=Tj0C)#}MKQ89n*PROH8D9wWQr7^+7P^c;n4rE9-bW(dn(s|z}!so~RV&JsC_wU*3 zz3eh=Uf<|Wq4O%k%GSftsxXS0GhOT`1b@eBR6CuTGLbP%#qc;?Q(xN6@%s8qz$XBM zd>Iyw2nM6I7=xT{dx~`w=2KWIv`b}4$$_bpL2IK@n#-c0=nh@x*VaqtH$B``+OFSd zyKMf5JDSQ`bpu(WZuyl9Mn%FaE?qDxj5b?ax0$XRYkH?1e&wd9WQ&6}1%6SBd$sLx zXZZ!o6n=z%X}ogitPf@bm;*QGkQ&Y8HMzQu&JlZa&OiJd-O-*RRa*yg&QxV~s#V%l zx3E@wQCZ+ec$ z)uM%5hbmkMtR^&z`<{C~-2Xubc_plLk7irL2h**}*&mW~h<&~&NM9&7ey*(goXk*U zwwracYMteJrH$VUc(;U!V6GMWEcR5Z5iEKt)r8%AXdu9(2jCWo%4H}AQMK2xEmyPq zyS+!}<6xV0zj`*CX#H5SciFDRsRFN$@=y`TidCj<#2;JSnfaTA%VI@elSvGTqjcFa9Rna0fP8lm4VNSv%(7-ogHf z+TYrS6I(MiL+9TAZJw>k%)TvSXSBJS zHEny=5hy=c$TsD*{d{olMWkJmERX1pQ<|9u}r_Z0B%0b%felx(FYYana zH4%I-j`ofyVt&n}%$mKk;0bCi-{}SZ=UB4{{7;JFahVgqN%h5I1HvoWhFB5FOPL=J zOnYszfc+_7-}{7Wa~$+^UI7Zk(6fjZTiR#w;)zzga@ZqGJ!C}G!WTn<#o_e&iUy@L z80{FMGZ16&t5Y(aEHPgJ&eFt-&#yfHe3JefX^7`FvkYE;)1@=tt9*~Wb_;u|a=UUc z+tE_qp{(7K(P_e6fIks^zBg=_u;Gzsu5Fl!)aQXvt%+FLjgAoM5bC?=bXYGG=b zU1HJe?BVoQN1+O)GQGw$C|itBR<=5{hAa8~$DG4J+F^Qj^P% z-GRfbyCRCGuKthKt=jt|E}T?o%JxWWXW!Hmac1oc=-Mj}v-YZJHi-3qvFDl+KjQM| zWYzl-*Z)lxSE%d1n65vHXKRP7U#g8_3^3hLQ49KmBKF3pRJ@}nsw#~rJ<8rwt*VNq z|2;y&ma2|gJh~jk6L?Z_@(8pXMs*yKtVa){NRB96fkFPVM&}Aj z=Q8j`FBy)~#Zjp|E|-9f4aCs>M8f{xsTyjWk4T;gS+jjauKS*vbi%}$Kk-ri=U}rK zC!hfOy>>c|#K|$AzWN@Sd!=&Eu2*&{Pmpz_opj!>{E0N(uWVDc-%p~-8z{5W58W$V zB>bHzWU81Z=6db>HflLFPAvxv^!R<|+bsAp7S^I#_WXd(zn{ zxAL&h`|%@0-!Q#|`km)ve>LDmJL6$)($2hz*$!(3*s56aL}}VI*OUfJW{i2c(rtS$ z9Ojsc)>KVB&$)rEAw;Ds8RN7sw z?BoH>)s+22q2^BQv3C1q+5T!kv`P){=ho}Rtj4hG^+st0#nAN@{-o>0FWAAgI-Iyx z$>@+>?&M+COpV^~i)MeRS-N4CEmW$;Ewk>?ux>x|i{aOe6%E5Iy7+%-lY3y3eSU4_ z9`E5+o-uic+3~lshHkj!cV}&zx_H4CZaLhqi|~U-LvwSx>abXr^%vBf5NU+C&}pkHeUaH`7`> ztEC3E#W&%x-28spX*0cg@Jhd7$7uEp3V3iB&lUOn!m>~`x*M%8?CJ|OD6jtWh4vF( zSh>;Znse%NtRN}h^}n!VxAF5FYbE~DRrF)(C1Af}K ze|i4#v$u{OsqDV`l_yL6pLolsjhsouYme)lG;#iwXB>DM-?@QS*@21D@a`GlPwP&2IIj?2hV=o04**RmTW8AHz+tirp z7gwrUChQtFXO`MA?&Y*HYW9VrX|K4RJKMXkOB}yc&AU-P32F3e`mg90%co!br>0-H z@qbP~H7NF967czql{*p%xusg3kkZg5GC;#xrkS}fpN8Q9G>lOiHsxs8l260d!8B|f zK*L6$VI$J8sWD5##vBbBn+DQwi_L@OuK%A@%$h?^9Mrw=#|hb$HIpCbNP2iu@(WbM zd2pOcQ#Ce#^lTC9DNlAY-wo!;7y|Ajc!!;_BzT8W`OiFOGABjU+Q!FQr|F8EP6mJ8k^`gga>cKegPj_oFHvLCylU&jX;dx}%oyx-{~u~=P&7RqYOZ-y zv?r?Nn#bmodE8H`!9n{(Taw+wv-^K(1rL+n{x6yQ{=?+Hk8CQ0;cdVzqs0z2GB`&uMee`?TM@Qu`m>X}e z0@?&>l35)s+0+K7fT{*lO8r7q?7o;M_MyRKBiM)$|Cy(P82j!q*;_`9T47YaA%(>H z>imt-G50Uo`b@a2{{j1)wKFcBUOnc$Gf#UoKJJZ+AO02TwzN-j}JYzlE!;ysG zY??IfR5Id`Uv|u$KQ-YHZ%@S=n_|j8Kfe2%i|RtZZk#lG+PFo(xutv5h+26Oaf!t% zR-dop>d8tQ_a4v*^YfPu%Fh!uKR+lbkEaB+q(asE3ZBgFA zJi*ck2AAQ3g<=h?UZ(esL?ZZhT z{0o2=`?m=GKrGN5gk zsgred*5q&5_&d`4Zz7>{4-K6=e%oxhI`?uR`)#xT;oP&K^CQyz`z%yC^+!q1eXL@n z{##V)^OW{B;WZjzr~uEkYw!xnVFg9%3W`9{c%o7;E_=baR)We2MdcdWUBjf6{vU23 zf!rF<&K+(YeQ3%q%`IUBHEqK!`j3NQoa=8qcEN0KJy57#WA zU|_Q*HT6p}sb88T&nD>AMv=~7%}x^7xt%gRZ56p)O-Rvsa7}a`24=x9^C&p{JdDH5 zgQM*c4QiQykT(k&VWZQi%p5X-iSXO=Vi7o(V&NN_r$1(xxtI?>7t4>*MB?n&=zs;f z81v|&aV(uwt|Ej^>+}44JMw3B{eQ%^{BeL~sRaZ-T|%M-Az2l_p=!}mO+ttfL%D`|*P4L33=gUO+O&yjkg zTHlZ>Q#5b1%U<+;3LI(C>Q^flMJo-|$pH*Vhmw-tF1hj>Q_`l3YJ_OGj9In|*L#G9947g6n4_ikG>JdXZt=Xf7pXv zA4w~}70r6*9lzRgqx)7#?fIzEr;O4EbMp`8XAOzk{xLr{KbSsP;8K)FA2m_km=eB{rXg!GT`68DaVQ;Ku!q|zx2y37om z6lL~mbOMN#a-k|K0##P(DgsqzH)^D7;M8otWk*jyRb2tva<|7fAlpBfq!mEYXdX#x zY*Ow2jHDGcwb6kj%^#sUFJMt?;l1`AJ6}}QKvi8476$~9lFKexl0%3-jB+~y{VRsi za;N;LR2%X%UcVzxx_Na!0Ow$a3Tpz=+K#yMkURE~E|1IBAvFPmX^si(vOJoroi+X^ zGdXoKPDBdF-q5GDW1y{047t{IY99tVGx#|s%sm_K9~mK~k@N7vj`;j@<%GUIuC}iaKfCeJ zI({twtX5m_bbf6?R+r>z3%ZZ~es8w6Kyp;7`&5kBxMkwzR`( z2lB(qJo3XBp5OYZN)TWNR4f%&d+Ra5fh7o5l4+}pB?yf-OzZH`eQI7(d)!@{#>QN9 zx1w{aR@~0(L^9c4UkLlejkg5cxNV}q3P1mNQr~sBV{Yd!{xW6P<+y7$>n8U2V@g@F z-(>#vMz^wEC&KnMOf6(7CR~C2GJ)rvj1M)nS@+MO2U5emgJy}toqF&)zs$GcA15Z z1G{D3NEWGSAKEi>!_sWS%uo1(T0KIJ28Bna!H*`Se-awVqFv2Ko57U(@{{vNpaFHC z4nqVb69pJT2JT>{K0&3!H$aDC?6~&hbSN2)4yFG~ya1%|6cwwaNO^t!JQnFqAP0GoBr%(c~$hi5C#x)?ujPMhCAjF1x@a_+kTzR6HD!O8ys#AJ zPl%-YjfbI8FjvVilt$%Qzf$d&3i9Iy3JudSRjv5`4?Cu+Onmshsca@PFl+-=@l=hM z4{cMH@lN`kOBKYeRhrP=?C3DEc%k;AEYqM};-d812!r`;+BrPzXZnAwoqKE)#~sIa zcK05>^WEOld-nNp6R-RG&CH(H_8Cm+A2-~s znZ29&{pPv9`FtxFLHo$GBmR$}*#j`v`|RL!2}W}fQ~Ym3lur?ZktQZHW{N-Hqs$Z^ zJH8&l>!m17<1}4_X%VhC=oJ*|ZZ683u6z-);w3-o%eeft5#+f$beeknM4Wrs7F@@D zA0@#U4$(B1J&D+0ch5B0olX{3^%0bb*uCtVcb+<-bFO=&&j!1v490aQ$J~j@*K%Ff z^Z#LB_s!M=ob3Md^F1iUE^!}uf$qyS3Ehk`XC@7VvYHM09H(j45Kg^Nr{R7)zuS=Y z6<@CII3qkRXLxGd^IO;NptUa+PI9$V1zR}j|&rd#2~9fgib*giGxK0-COG_dTC7>yFU zMBH48=ydLE`V5rcGMDS+oYI2zmBanxyjEDj>+3gH9(K;3HK?!b^h|l?)EFOgWHtLS z2YbgRVXgRRqpmSeQ9JQ`LGIwJ-RSKR;kplq#8$^VSs{z<{oU=*E$Cz0p4Qiwx1T+8 zTWaz|@VJ=iFV=S7lpM~!>$d=zHT zL?nyX-*xv|>&n~DJob2dTe)l9`Zf2gCEc4&{QCRxqtBl?9N*(=cyM4JjyEhWEFjd- zRmiTnkzqbq{TT&6r(8xVF=El1jS(w?C($9IN}QP8pk{@N$&MpI&S0^n_E;4H z@A^RSG7b%%1rlY7d&BXM(YdKTDg!iWo8DF(;{z`dCgnd!@3^kPxcgZFRf*iN7!J7u zuo$Mam>8wSMA;fR8bX$0*~iwvc#OFwipN(1Sh`Svi@3Tu2ejH2+GMrOva2)q#+ z$EZXL^(`Fm=WHJBdv|ivB{z#Sz&DJ&GnE=}4wu&Hv%b zSY;0b)hY0W9;R6}2UYZ9BtG)Uwnt)-_>(&x*dborvSZJ8AKdcjH@?HpE4tvT_#QwT zKlz5*02D>#LO>0J)5*@Q04f5(UwMNx8^rxy@dTzqViav3QvBDC-1D*@d4i zGgKL4soHGH{Z)vRkY9!TY061z7WlDNEVh>eC~2{&$5j|gYE@nm{+5lfF4zn6gbv|z z0u$^_!JNP+vd!SKKr+4n9Rrd4n$*iKnO;!Ykeh$1}{y5AIhZ8~2FyZB5U40=1I zM+;7gM2;)wxDy)kA>KhYW)7$a+7C({WWzs}4X(T^`%u@mpT;Eey>@F@B?mIrFj2p> zOCQt|D~49Q@$WW5&R-b15O|ja!@<9WqR$Q}@X2QG9F3!)HZ-Skx#ip~T^VXkBbN6*~xnUAt`mK_R}!Xe~7LF$+) z6vA~d_Pro`pa|hQ=WJN@;wT|vKuNe*>!U&73YC^;*vGV#PKu~jVMys{MoLQ=i6r9W z>+#3>5BHDnJi5qT`r*!fUG5*;-R91_)(#9<_mZNuL2R;~B4l9l5~&rf4@r_(W6Xas zP^nw)#&IAQ{!|#>e|!P@oIbezORC$MtUS|F6G?ogh3cIpIa=S)s?HMd^wE_S7x2WY zMZ{!7?d4Lk0ne49u5LmrBpvI`YT}-bI=v;8&q6(hrh1il3>i@kNmLG?`gc{uKUt`b zQ~VP_7k#IWv>#DANc8PBPu};%)jQYq4~3Pp?PpJs9@H?DcD>sB)@!N8_u}>U_xHZ~ z$p}8)q{Y>6(j&>^7`;)NFJYS=ArU?dF$2I@dgw|=a za@E+=aJv>!@y8T0mpZg1@G*-i2IobZmhcne%oO#F zO*5Id)GP@Vm&&MvpoL3be+*B_6pT*r@>FIidzg`tr~rB)g}SZ&bpCqAx{lw^Ys4pR z*dS&aw|Wa#l0HE}^YwTUyb~Rs*Gi&6Hm7doLH$IbGNF`K{dvZ^5{xQ8E1g zV&{dmr~jv&i(WJ|wY0x>+S27z=C-A2b_VYHhP}zw`P9ue+j-jYe*rr}d4B)^0C?JC zU}Rum0OGro)*guGxB1E-&%po!=N2vxgwg+5{~5EtVee;PX5e680*L|ubD0szN)2syQWEK|5L35fss3NbJ&Aw=68MA=dMU5z9^)rRc1(|^* zGQkY0m&l-Ps8H`PU-hHOb{%>2%Jk4M+%$mw^qF-tvlVVFP_8|2%azEajYuUQ=8y|+ zTLZ6gUB}~qwO{Ap5dCD24<)n)_w_tf&~tRyeCVbp<~2RS2xTKe&NX8y9WPiLl?P>N zx2cy6_)g(S;B}XJk8IW)^#}R#E}xZ!89aALC*oze(d2~78rzOwCfA)zE@hiu%OfAM ze-ufw8wYt_paQ7YweYC`zd2w!nQ>Wz6=IzGR>PiQ><#vQq4`L~>^X+j+KouMic&FV zzL6i293n=)L9Hsnpn3`qCE}*ld#;^QLn2Oo7cPoGno7kDl?1=+!53;b0lt5`EJUYt zm|>YH9O^JWvEQjHaYuKd%a(->HDZ1R`#uf!NYlYt91Z3Wf|WxkIt=S3b2$Dl1f=T} zPix%dV zIA>bqGqlJdw8}nnTRwnaHzU*X%2L$G4T!eRr8sP&UJ*k}dE5jq^`pdI%+#XfFZeCy zad_HeU|`UJ!VZQvjBZR!Ot+Zhm}fB`Vt&OU!cxXEgJm1b4VDkANv!i&->{{yJz~$} zVBiSh*u&|;xq$Nm=L;?#E)}j>+zH%scyxGn@M`c@@E+ic;XB3mgFlLYAO9zT27x<* zMS{148icik4+;MfDH2&C@=R1lG(~iZ=mjx0F*mUSu`aPy;z<$$5_2SjBriz0NwY~$ zk$xrxdIYRlCiV_eus9LBV zQ?pR(PR zeK!LYgA#*1hHQpGhUbhFjCzc27#A3yGSM(;GkImIW145W&5XrtlDUQX77HVbO_phv zZ>)H%)>x}pFR)RtS!VOcHpTXaU4^}r{UL{Wj($!ePAi-(oToT{atU(T;qu4T$+ge* zft!=tC-)`~5swv~1ztQ}R$i~XbG$$JRQdAwhWW|)75Ve{Zwe3!FbinF0s8{x1gr`; z5^yh&Ay6sMBd{oNPT-v&z90u6ObBWN!Y{!Kg4cxjgggtq6Lty!B+;Ht00031008O$ zMgRo>000000ssL30ss~O00962NB{r;0C?J6QO!yMQ5Zd=W=Pf|%0;!9MYOVUv?MTy zGAaadVUiFL%;=cX=)`y}TKNXiQ$_81fS#aLkIIWcC`7qYinGr#T0Vt~1K!6}&oghLk~H!5S8MIOy~B@kpD8&O&!1qD4Ic^FGeW*_U+% zr6n1KJp-14_JC!Tz7{P5>JGc=7!kK+Ou(fp|MZNLuoxF=(VQ$Ti+>)>#FR4=%q$_t z^CFMII~_Gi92j-;VpZHmaBDOszG+-9YTPp#^+An$#S!(vXLo!+U$x}_^UX^xpWG#P zxg1gOr!A7TRPn#~0um+BL}MWaD>h<@!%jR2B$7ljDWsA{IvHe=MK(F)l1DxT6jDSn z9q33WI@5)&bfY^x=t(bn(}%wFqdyut1`eFKD4~=x${D~w1~Hf+3}qN@JPfA-FFr;v zl2MFi3}YF`cqTBBNla!6Q<=teW-yak%w`UAna6wZs>14J@OP5KV+x&I(pWzPyUnG_!_vtmPU< z*uZ)=vWai};5z3x&kf%3UhEPt36dyDk}N5bDru4~8ImbklFbD!a)qnhZHab4ao143!#H;{vP9o%#EXi!D9I?QC!g!$3llI#Sb%wv zumNi$;Q;np!Udd_ga^1wi3#9Me_-UxV1Ud-l-)0F^`tdH?_b literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Italic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Italic-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..dd742443826b2ae329ee3d042eeefd2f67652358 GIT binary patch literal 29080 zcmY&;V{|4>7wr>ItcfPJotZcj+qSJIwr$(CZQHgdwvC(je)rGqwRTsXz4ocoYjt;> zQ{Ck(BP}+Ih?W+5h$b|3g?#RWA__|U0KkucZ;j+P^a*ars$~@DnZCK1Z(HRX zx-F3Jhz3@AHUI!L#kYURH}+=X9Ssef90&jan414I{{eLg!^YUg#0mg_Mf%p%0sx>! z3JvtdCVKYYzHt9t%a8vDBoj+l<8N*P0MPi>)hGtt!6us;=@|k5ras@A@A!Z}V8p3h zrr+c@*YIr$Zi$DR7diUf+|FDpHl z@3>YZ-+l!D0rUq*sI{Jz(Kjdftx5g%CALo{u(ko(I{*OIJKx;n_n4RNMvRR%c1GX6 zHs3J-&ELF0(D2QtvyuMyHQUB~bCCbxaZdg_{{QaE1b5!_cONitjZF7nJC$23=dxy? zYpAQc1E!A#iKMHqYpiSN0y!}48xy(<(N9f~4nflo04NxMkA3T1)(VqdEfbV}4o)`%S#qTru!!hW_Z!FZFZqSHT15+4zh^aCiee2F70WfDzR>oGq~^>TDR^xSdDJm@LCq6$;#?MpZW2Yy5^P; zqN>TbvT43heM67Nc&WQyn7fptXTydAEz-+a6EJICaKt8I3hEKzRcHf~0fQfGWCLQ* z0jU!VyNi7-)e^#p-cM>nS>cmt!>$RU(x+qtZ(k93)$dpn*7~Q%{Isigo1^ z?a3>$Wr#{L;BHY`s7VX#+#SB508%Y;Ob>pd=+Mgi(O}ch5q61ojIc_4=vg-Ym&z&f z*&v0=8Ety*;v-CXMle+}wRm}FS_!Dh=oM@|1B&Z%I877us5g0M+=hpyfUYl1B&{Q)_7C(DS??N&YC8B? zoDDP0o;{-`XmTG+u?m0V{U(TWA8=DuK+Xsg2SKEd(FR!)F1e?$22{B}M;Pf$)zRok z=*H-0lXweGl@IkU)$uGHq9?KPSQM2}qPSY2ymIM+P%=py$t-CootT4I-krOapcPaC zjjZ#7BK^aX8b@&Est(36zj^q|8QAL4U|R%Ml@fGUM1L}Ln^1JAq5_Ljbh+gGeL>L~ z<>ZXzVg*rEEzBO!ml7?JHx>%-kiqMS8F@Kem(o8{tKz? zrveM@8n<<4)vFK6+n+^NP%R?r-ufc9NvN4_c?o%4j-GEFE}i{)S|@9tx@;`*G(*HS=o+nf0g7nWzAv<`A$&>v`?dbh!dU1e z>uN>Pebl)Bqy)M5XRgeEc5R}vRrbstvU`d%;&NNO&^d_megq9 zeLRs4J|kv%j5*or-I>=O4nAY8n(X!@FZw!)UVh2jv%0T0*he*yS!k4Ux^xtWIXZb> zEE7kr%D?=%SkciawRpg*(TQ@OGPHmx9OCwHaE=z7bLPLrH1Qfkp6cgwR*q_ed~9@| zW>1^0eA#T1wptzwtBlq%`_PkdQbZ`i@xr&aEZ8(rutKN3tt2LY<8$r;9=s{H6W?iz zaYvrRz5Sq*y2{V~RH|PsU!eEtw3b;{zuaxixb`&TPUo>g)x5}#9BSWuB45jsy~ADU zd*!8uStyj?MBs)sw7{sxATk6n?%bSh<7yxxksuzARy)wgNb$_1!{tW5=4t;$BI0Bd zxfn&7B2E4)b7XLZST~A6M-Wc|it7mdBbFU*2Lba(U)#_9co8`LDSf0xU8H#=*__-Q z+5TSWHkbj&%&H@9n2eIy9G{7N!ORivq0)M5P^}R@GTAPy3_&{ips#se^wbeJkE~H| zKb*SQ)#pdJ5kmS@3D>N{e;Vu|roLMP!9qE*;Yz*dUR(Goh02UTwBw4093L8Eu!CPn zv#dP|xYrHXF4B`uJp#T69iB%21~3bwtAQP2Iy$Q{Q;>wk#u&v~A(ChOTNwWF8GaX; zey!5I2-3Y#(!Hpeey-BJ($c*IWANmMu>|9#Nf(M1M~dT1m@$ReyiwNRdd0e33fG4J zof-1nd5_?51*EHEw806B)vFX}r%TZV>tTc+AI6;A8s~pi8hGe5dK9a4{qc(_<4c;S zlhHY`Fuu9tn?AvGcGVM~Y*ApIde6*A83{i4pFy|AK-bRp4q9(#T#RPaAVyyoB#im}YZ?$IV$6JKSVYcb&1*gU<{-=m<^{u8S9)*dGI6?+Ua1UoJHE8e;SHP4o?PL@#Fq1IyF;Z^=VFJ#rI2D|&j^wzFe>eFwuF z!BlZk0d-*ro!@!WKv!4S*hd!&QWz4c1#F+u?}Y{q%BKOrA2enV%YS2-ioCL- zs@&4T%KY+TpsuN*rM|hbwVs)gm4SuH)z0z$`QGW_SA3TOv(ep`cp8Ndpl2#^8@1q=f+0o{NFKt3P_5Ctd$^aI8L zCEwLn5?~4t4p0S%1B?L*0A)ZUKoA1iWlc-2U0Q<=X%ik1%y(dgfPL*}eQL~*doBd@ zz+$AnA!p#h4>MM~*l=4Ad|qEc__b=RoZJG@bVx!BQ4f9|LKJ3;kUSPn&U*W)RAz~> z*fQr6i)n=;l9;|t$jLt+(xQ#^fTx19v&e`Be<*cJy)et`q}gb@5N-B!Z?3Tv`$9P!m}aaW&#p&+GbkM3`g z{AV-Ek@nC=0KGg%x<(=K|s4o$XJFnDL_8KPf47pCqWgfujye3XK!b zYrR@^^~bvYRFl3?EB(r@aCiuaWIyMcNycd4kstNT2Yw*OVY>a*u@V2>bLlHhnA30lKM*5}lx^nn)S7gl-+`Ze82 z1Gz8%HqD#hb8r*wo%y5Taz&6UxzQ1UQTd0P*ZRiocv!n~>r^8Xj6aieojn(ZdukkdbyaR)$j(onmR{g&s7z6G@^X#D)kv5D)AvG-vgy;@d2eHpvWT#3w!I6 zlt)Ht%pnSS{fU-*^ym9fh?d*`mBAU$5=b=t^z!kyVSbcka>IS9)ug*yPqhfK zza~CV4sQ53g({U{x5^e*=Rr#lJutd`h#g%TE46>EmujLszeD*MY19E(8vJ@bnP%XqLY3Uc7pmM z^w812J2C8RgifKu_P81KPHF@Pb*ARA@CBM>cfhO9z1-1=_KgvV2<~2HKiICNtQp(9 z@{B}REv3YTewh^M#o5=AD0`t+W^o-F?yEvA`p!>|mxOii3Yg(XBB04ODl?WZBV#i| z93>6oELmsl?A?KF60;x?GEr(jiaYQ3JAD06hc~2q-7tdg2p5@&;O%2j)Avj=2bUf5 ze0UM56#TTKBnC`&n2hVC!%9yD-*p_KdKQs4VdG6Tz?BvY5pxSB86xplU>ek&sdB@t zr4x^)529LDlJufgtnJc=0e<-!QvLmSq^|9cooi<8zxD=sHD+{`{7ZHz^4gYfi;J@K z(@H-?$AmH&cXhU8;s~(Tg5C@7T#^ewD5#z#4B5Un}Ml zTIA7~3WZUQci%HOy(0Xp0gMpr6gy*DRQf?=3=t!3%h2T8ptX1YygGck z^$(ZTONIp0QqU9!UNS|VeUu%MAF8Nz;|OeyeWmks?z;oJ(bfHQy3>tYvDIGR^H4=a_Xf76ipyJFhw=1CsX|rr`Y^3k z8#VP&Jh}byL6v75SEKE2AJ#t%;kM4tXVf<)o|lQQe(GkP7Ms`o@l&_XTUC$d`d0RT z0z2hB?|099z0@6js+dqVO>rlE9@7KkzRGruRism3j{BBc5(-Csc6h3*SkjwhZl_C) zNTn94mqnHFP~NYKiE@xeicZFOvA1c>&fAWrcxZEy?fVu<4>n3rD)?_FDsI2G=^)Y$ zs9M$_s(8FjX*$&nlFic(P+ESO8@sJw<;`<4d%C{(%8DsTStXaDe#Xvw2hhp4+Bf!7 zYT(wUWLyL>x6=mE0O~}D@vv*-bopw-T|&ce3t~$o2`;v(flph1t<*Fyrn`qs#<%P~ z{Lb9$RHWIQ@@KW@tJh!tq*oR>|ASCwVta@+sn)RB%)i*$heFhPt!c11o|8(C{)p@@ zc$!7qAn%{f{$q1|J<1G;({8*n7hF&tOuOFTx_b>U4U$0%p_Gzxzo2%2@^ zZ|H0cSyIlJ6)s7=uf7LqsE!MD9vWB}0amv@N5twA(Z;MhN2UW;P&0#gNQDW2}Z9Y!)U0-Ie?>`*e^@)Sjm|?(%xbk1azu-*f7P7iQDSX=dfsDv;2zQfJ$ohP ziRKb|IKhe-2uN2YHrU~#_qRiiH&aD`%O=79*=k>=CegCo4>wL?-|DWb62xXj!s#X4|L2 z)HZX_jA9UR;qemquy^{@R^OR$))X=p6h4As*byj62AoV6o3h%ho?xX=(CFsL-g*90 zhuZqa_g5p8A98{MjA{ZssRfJ)v7rX+$#rTs0@jWAn5fLXGsix>!%xD5h;VsmxBHJJ zrZ9}NK{0AgchXMcrg}oH$COy*OiKrZv=xSTJgf z8gg*3D=L}en~-HGmrRKozpa8wGY(`H0uU`g#1LuV{+=ACNbG9h*u_8RC=>F5QQ|Rn z+rp&te3tD33c+CyP`mhSGFC;?6fmibRK~jl3fAk!4A?>4cgL4@m@U8V)7Kq^vGiwL zNsOgw^ctxe7%+#=0E=FHqMLgORc0?z4&>7n2Q9nNEzPBFYutA$oo7Fo!{5*AREq+9 zPb7}~-Fx6if^S3)bgxMrs<&$vr(99=hfVT2!@WKh0Ud|^Ya)&$;zRbyTsG%Tf%*rr z&LtOJ$oE1Ji#KW$2a+2BtPVD5PSXRuCcy_dk;v2dzp|@_D_@D_(T@*1gO_AK4Od86 zk8Qb6Z4cEJ+Lz#*mBrDr9fa(o>v-Mrkc**(v1xRF3+>4_9!Gk%$DA)aTUMT}E4{o% z9%a4d8s4MT<5A)%F`b5~MWfXo?BmGudyHv}bti}ZuL7Eyx2%IAUcI0+R!b zrBPTwh#!PVj)}NED)?RukB~q6%ZI-H_hKO;)s3DTOac&6! zg`J70Y19x#$TY$;;QfGgK4g!^d1O*okCTz*^y<%7bfdTs zJ66bNV8D0A{-^j?6kXSaolTlGqs87P!#X>+W7M2(5@g+4C{>o5V5y3OjV6M5>SZQT z&#*oRVeK@=k8$)O;e8>dC?nO*!SHw$x*p&xyx$uaq1W7AG?nSvJnTNTn4Zxlg+Xs) z6p{JcwbFq4j4%kJxg*1*+xRhqFADS^ds6Buwmje^Jej`rVJuuCS<`JZ&&Ym&yzBWo zF;rgZV2R!P&cyT&EmMO9N%yGPpK{$Y{Z3f*Ti!5+(S1xp@TJCeR@ekIsF2%H%HVVsk@7 zZ8A398cEA=0OxYp0RHS}Xhe&Q?v)-aR#~()k(smii>nyvm&IHheThR!E!RI zs}#FaN@p^V?@6N$S9HZH0xiYdicbf}m=4V!tzoU~k*RJ{y%ED~_F@E)XpTR|(rI*y zM(rZ5+||9AbUQkid8(#OcgRLE+g2DYbX^N;v;-_rc#l`NqO7L@73F7S4D4^#v33P= zKVyon)XfaW2S2PQBe$t`m)nVV_iaP`acgdla&xEXqiiY~`2=TtuS`u!O-Ct@i__w_ zCubYi^h8)#DmCSomvDNt1IM7TKJ%Q=@Tzg7dcRN;A|<={*QtR<7Is+X*+54a-srd* z%``i-&$z1BI^I#Wbkrsq>TAw`VK$ugBG?IoEWMD$B(%8(opu$fYANZIRvxk&1ThSW zQ@&^w6$pexQ{rRGQUw#OlK?d>HKD1Omg%`YJkq1bu9tbPrl${+znecP=r~P`(AEQ8L}N0L?<*e;aNQT>Q5C#g{%IT#gd%@le0aCoPcoa_L$bT# zFAX!rpfcC{gr3mLgzwunsW#3*t#fv*9NfS6i zK|H-ajA<9js&(suVf7#Ot;{TQW4Ne+b=))YkxdAuwqUoWCOX(t`=;mmtOGXScDXnL z56o-)?I-}hmWBq`HrC=DWIca%&YO8xeVtegN8Tnd4QIPMsKf#<&QIn$(Z-tHojcyF zmLkUKJjRaFxlPs%f2SHubG}JOJ8!faTn0e2H8$+stOC34FYi>i(P4n?d;ThaSDN>O z*3mWFU-6pu!i2_+CpFyasnT1tE7L5i<_zP5SJ`a-2r*zQSQqv44>n|41|e)0j}=YP zp|Olx4QiteZ+XrWrR8> zz24HPa_QOMq~UB2U%j|Jkq!B8BX{4n-ge$1soq4}x6mT;aGLG3aZh)KI-Ed_ouhX7 z9s-LpGr9>`yov9w|I@}n8t!ODba?0IWWX$`*F zUOO7ACEkORg*@o)?*JFi>^>Lpp&lCJJr4ehP?|mf90R~Wbxv-aFw_On#@s00DVi05 zJ9Y18?wg_wJVa z?$(axpSZl&HCZ|C+FEXjm3k3GB?m&M?ya)yo7*RwZJ>_4HO+|z=+hQFtOvaoZ0kg% zBp0k}7ItF2?+`okuYnP~F4D{0nZGyBv(aO)601=AILSjM*_}dTO%iG@S!d?4cfau_ zUT?0b5bMt43f0CMC5(T(PN2iRUW-;LpsSPrrJ$AhLjgO)n+agYIaEu$F@RSWuR zCiKb*WmGTA&5k3tfeI$ei*$})oTNCh?~{^!*^7<>gts=PZfBd@- zV?5ZD$Wcd3*vy?8pSA0yuhDP%m0Ch#NkGQj zj>;Wu1Aj-&v2J-yP0Tuig`QpB+;QIR6?N=gHylX}cjuogcq!}W4}0^23S?jCVb=gN zZX<5VIH}v0XL?;*{q!o^6_zH3+hS!#i7OO#6KYnfJ(pOc zsu=%QF zlj>K`<SUrZ!i}-YVdbZ&Pci+T zDx98j(@Qpu$_m1$Z1@aWpYhS0Fqe==tz`Dwsb#1C#e)6q&%`egrL+@9*tal*Lo|62OX((wG_YCb`}jgx9_W>vZU!EqPMi5-9O%cb)ntt;AQ1{Ro8V^ z)kg92SVvLD5o9P`r>Bk1$Xb6dP&C>yN5qMc?%z3=KLqq8A|lCppbo!0bXE3x)jH6PUeSU$6LQo3 zUr?KsCF?_Z7S;Ra?K$-5a;H~;?BdnI`EVERq0Yd%$~P)b5OutHTb zBpAUA-N#t{vH=?NPWz_c3sSDCFDKY}?t8&?C=${3&OG#@8p&2Ci{8z*QFRu$gusi7 z&4*W8)*P;#4ey}V^pume3HZ@e|Fe0L5()X8<2Xs90Bw#20(M~5)H>kqaPeV=hdtoT z7eg_bITgc?lGiZzvnQuD(6!QY%Bc9DaqD-a=Wm&qCF;U64R5t!lHp~Tf6NT=ELM6U zP4F`GGrK3YF2cW1i#6UhgRK=s#3DOEzAN`!QX4~Dcvz@LlIq6wpHAZ1XRj8vf;mu2i`2QpclcUky4@F;%p5fDSl zVIpI|ZvFI^b@xMZUR6`tZhDaw)n%bamPl7iN%8F+tatY=R})4lf!Ewc85W`&lTfD1 z;?C|Eg3*_cbuwIco--v@N7utTv$f0l*sz#hl)(?6NY8TpSoMsqWO6(OhK-Qej1Z~O5z6B$DQBXLllFSs55YpmKkcv0$me@Y zfr?(_K7J=ccfOGqm6-#=jRPbPA*~D7wmm0e)d>QUd4{0Sm-0M(oWa{#&~u+&L z@cpBTiobDJ`T(;XDPae=T{VA6tk1LB+flLgy)a3~4b+wjsjx}AddpKA+baLDK6?w^_Ag?VemXU)Zt@%ERlP8RTI_2&Dlo@29EmN{s9v{o@{|8Mz+T7>3`RTM2NXZe`1$Pcdh#0gYv*+-S^51{4M-AADcIj_ z+I&ZBbkY7O4IdP0G9a<qTxSw)6X3 z;9l5v2r^a3v9F8h2(rUy;cB&pq)(&L^(iQ(>BfoW_MnjZ`aU9GC>B%2b(CgvpGmLY zT<7h+b9eLhej{CJ>*PDM};>GhNqH@tqCmS{{n0rv8q&=J^0muuMI+`|>{YWag z80b>EBMY9=LGLCRV%m5N8RE-0{YuAf;E2$JB1K-B3d;OAIlnkL5z_phC93AdN||sfut~#~m@5#wb__HS zc`Qrzf+(3xbq@#fN-QUv%gzOyyXggM#`mYJWrEe9c}L`(ZYInvFOLEJ(AJ#^KpF7c za?JzKcd5LB26V$JxM-X^xRt4Q-Z096R2_Ncbp5al-%*74zv%H+d~D+d9{|!6m|8*+ z4j7_YgM5$cQ@@`0H4a*wABC_$q`>6&v&vHckWuHe!Xv27AsXi0`T&)BHjN}gdM>S4 z3@d_94ZJ&S(Ae}u=YUD(VA@l~<{e*~la#b9wAh1MnDE*n0_N&^ibot90av;0FrETO z_9M-XlDracHN-_SX)y17zL*E$LvymNn0t};5rVNqyamEihencT@#ZegON9MPydpgD zLM=M*KL>qgbTe41h9F65eRh@QLYL4$C4G&M9q`!(w!Nr0^YismM{(o=B-zGL(kWiS zi$vaPa8ZS9xQY|+-zi2_0%MJXDLV=BbFLJ8dV~E?XBKn@%~+z zY_5j9<{`DwFK+Ma4L?^|1@-xDdrN|t49LL0kO>q%I3BOq99sqR-hCjkZxVs+PU%#bpB3jTrzL+u6fMhmjfn|Nu4P~zl0gNpRK(=qpc$_*BvbX~V(C4&rvaEPWPJCb zi2qJ}Jss8a)Rvl}{uNUp_qteTDh%ot*J<8B3cm@JLbx#yDIGg2mPynSI|a)sz7++F ziVN+vun~&U7N3rvO%Ud2=wW}>E+c&yC~veM@8@9IlfTbseO3!16Is33-h}N!9EmX>6Mj^De zy*o5WzMd`4CScQx{nznG&(nveOu47jPg?wS1%~Er$=$geMprTwn17UkA20l3uo{=M zr+Xmr5qtEZGu=7WSU#}ju8Z!R;E8Qc)9LtJ%2%mSc*vGo?YBm)F0@dc(<7%OlV;G9 zX_s9H=ro?uSUZvlp!w|`=NKTN+ecCv3V9`-$ANt!Zv2*|=;bw6d_HTXION({Ct;0tN{+4 zt~hejuBY93(lOOX1#W{)PEw29fmkdpAK}2p*CdWoE~|9hV}h(lgOEZ1Ytd%qr^y}S z;pwA!>>h@heH*(vHa?d1u=jB0jSINb=00PH(wDiga#GRMUi+bGBw9s zLTL8kvim@3#X$DdsmHIoYEFuSYLBxX15X)G`=`(fr`~zr|HN44RY-mk$Eb__jgt_uNic)rXW&jI|QzvUl)zI34#*gp)`QgABwwmHE*xxBGub8(m{GkMY zFPGrZqx?h4p>^Tsq?Yg!p{thBKr`a_Nq~-)&UqaGIbT&hr&A4Huk_qU6XLLDENZvl z90E4&Xt8%}EI9l{Z-Vi_H}yQO%#OQ|u1ei9IrlOTu`f;C-LHD^UKkgTos97Ly|IkA z@1L1gIR9Q^8K}ToY~qL0?L~$-Oa85+lzID_8Mv=qYX3PM(YZ zZ^wrn2KoLpJ520aUPij?AJstn@KvLuXia0+q)7sg`62taUqx!*NjBBD{8ze$G`6>6}$t!`)|#4mn_J z$e%tXyhu!4+?*zsU136W56~DhVM!v}3(kjO&9-DZLHN?s5n;p3V9-lDy84bBf|f?+ zviqp+y1=fv3eK8wbtZF6$vSq77Y5NBTD zq?98-a>FcjhAmqqBr1OknN^RFNG+7X$UN}}MrC<(wO~kEuvT`3;LNc}QB}w@kNfR= zui&utVR$|UjeAcq^NTnTyR-N8XL{~?&B^B7Ywc^DckEbeK0PfB`g`gJap#wlj#_pQ zW2bQ(^)$LlxBc{>1vW3wG}wz~@#qnvIbx1s#E?}(I;g>*!;1rMOeZgSb*A~>d?-*M zD+T`ak|swIhLQ~34n3gqzUPc6$`p9qDN;D-mbo&GSC4*KyLGc-y*^dB4`YV8f_&rn`i81Bp4$ zM_A|EShwtVBR_)R&)t-l%?zJ;?K*~I5Sz`f;m-?e+w5)T6@_ zW_mPIEeyu61%4(KTXSp5ovb+rzhDN~+w@VAv~f2#Q)b;tRaNaTQXFevt?4!IUK?_u z*l)S$4-+PFTYVQ*-g(EYQ(WvlpUTU|>CfH0lBJi2f@0baH`Z%3?lGVs7vTt_%eqGG z7+hJy6L-#lGb}l*zm9DBI;ejos^toiWr+&b1sZXStCKwtbeK(bCUN95k+S#$_-Cj~ z=KO?(HogBfl51*{ciiv98W8B``_j>~X=VL++Y|bVKZw!(BVFln)_A>yRuNt1d08q< zbKb4-xxvl;WQcLuPTnjc7uh++Y8poU7Ma1C^$$<0)5BuJvLzm)SVq0;bp>+=Xkx^$ zjs2WHt%zE^q=4<`xZf{~?9FmHWTp~|WX^vb>U!|waZ^v5H>zkX-F;z{qo!UO!AH0{ z2%_O)hPUzOzhDS(WEJJRM>=5B!mOjfo9FwA%kx^U`!QND_pU<=10vw+W#MVTvK^X6 zD31oVie&5tGkCg?mhL&LLFKxYxo?2E}JJT3d(q;At=*_-(I#fp>;Sk-hM)dtRqNZ(oenWGKza-|G2S|&Vz7pW^RH96h=sWC{aDnR&x@I6AW&7ZStW8VzuewK2{Sr<|*YG0Fu>DV$Ml1h> z#&nwyBZh}sQX~jrNE(>Jp|Q0KAjoaXN!>AI_;Sbcc8h+(yJ6fhAIFLMmE3^9cr3Png;`>uFV&#-SB1;M}2$# zcK)$|2fxdVyO;Iosj$D+NYUN5Fk>3g1S(*2HxovN8%Gs(khNYfp9k@S69#vq|NQnWBI5YOr?pl?-?TmgZ z&c33?gjGq8;tWbQB|w0~%!4SVnpW^b>E>Fn{3wPfN)oi?Ls~NJSt3F!Nxwh0nKgRq z8-#n#k6=Ixl)f08h0zIuz8bpTWnKTnAxhW4E4gt}RZt4@;5-H&v`7Vk`~5gm^$=)J z^;9VcpkrDjgPp?@C+EU5=|%|PutWmpsubX(+srpFR4P+;t(!wEcwFY@p0D?9VVT`T zF)Gh>c3Z5MADm2VPE*Ul3}33wtg8A(1rC#e``A_34EOJMLpm;IU#%a)&2+lS*^4vW z&1YpNG!p95y2*%@SK2f7rueG+lJbakyg>YVrq$5B@FL_l#j=^(>@xnyyc}@mbS%nx z7)ej}JT+X!P{LWvRjD&A7P$nHX|aIaJbbQ$vmT;4&_EKZ8?V7`h z`r?PQO;6J^ipSWk%(g>kyv47Tr<`#z|L*Z#oLclHay3|qSDipjs78i0H$|9kv&OLI zEZItN=ZXQvMi~2^EvMc|oT`BR&bzgJU*Rgnm%U8Sc&H9*qE)5vY7eGx=EH#M_R~zv0gVgDmY#^2f}NCz0Cd3wt9JU;um0^rwn& zI4NU%jQ&ry^AwuV5cy`@6(I;sgXCh`K8Hr36v|5sPv6tiJh@IsZe$aqc zdrTfr5}y&6zF)T11FJH+CjMSTP+!7dM`fH2cG8#1lR@K89&af`{_^HrE3bXnK1YrG z$X-9jTlMDOE)O>AR6PRG>i?zHZP0-EgY~=wQaC)YrzxcmiP-Tca$z9fZa>iZC#0Dn zdd++JuoKPb%m&}PK^^%7*{`n@=_v0V+yV!@O=Ddra$?-`zFF)w?czY5I{3usS>z-8 zMWhmfaN)z_m(w{=LdoD(j*{9!2!7(^L=2vaj^;bRscI_cyA?I@)B;h#dep?}T})7H$z;!wF81NwC~ zeIRBU6{1H>fI3+Le}8H2p@F9J1t?r7jyJ!Bsv|gp4w3uH6k{{D+>l zlCk%4!|aZ{b@%ZvIvStAZoNf!Fs0VMcb(|due>R9Hr^)nD+>76`v(?9TZVu*N9?Sp z<&uq;nf6N#-M`MygXxUFhT=|Drp-$#4rHiqD^`i0y4Ei-)<XG2ecO@xtM_03{i7L3ZU%M(P0qj7s%>A{c40}i1Symzwf$w~rrMk4lzq6Pu}ougarpj3N$OT>ObWUHmnitb@#zvf zpLZ8`I%Ytj^i_^DG1cjc5`{{#|Ha8DtBf##3Qv($c>Gwruh3AWzo^}sYP)w;x zKYXt=>-idugA~b`PMV`t1y_d#seJouGP+j@G9=BPd#GL=IqzoLl!z3|`hFl39S+zr ztVof%cd*Yyd11B>*(iya)nuK8lXg0vrSIrbMOei)^q-bt|D#HLMIon}aP?~L>RNjz z6((a$6SlUusCwb)v%f$BEftI2?wa2B`BU8jPuvN|Q;wCjO?$Rkk)iIn*JdRTf$|yu zd&6kF&B*QcR6THru4Yf%h<9g41~`ASzfdfdpC91!*i_B55d|s~*W?%E~pHa`+< z&qCR{&jvm8&Q?{BtBy@SE1)FHwo8Ivyu^+);+KZ0ks9TitcD=1yonKO37)0nb`*(Y z+)(mJ>43b&)5h6)8efUSRG&*cG}zN@hSIrSC2TVVU{6l`(pLv%rj0bzBkrH;%37E$HEDqLhgZRRq<4 z>EnevM!bit^zB48#-G3Iw`00W^z9rSNh4ila+?XVt2gd005wJGWdNin!66{|RQ zS3Pvp*sMf$mQDR&onYGDvFdUgc4hFAdY5+|XJ2Fb&51ZEA1Io`PckQO&4d;1 zVL}Nf??AKUXaQ?HS}K`@8`bQe3BtVtaBulHv$}wH=xcKb zJ<1Oj@h8?_q$4YO`JatsybKR|@Ka+RSj%e1)b7Nnj0mf#w~!U(sFJ;1oLU&aqh=vY3oZ0KuO9k-t-UFmL_7?r9{`_5HbXZ;$)3+N+EJC31Wgzfse6#@euLAAIFJ}8Vw*}pa`$mnWI@|@`4Kxbb8_~Q6In8xGn^Be) zTzD|(CvjbDbzYTJlM@d3&;U@#fha=U@5@ZA~^;A7a&HFxa#%nX&0sSS}~5P^Y!G52lvI@z-3iN2y?} z+MFA27dMmyP~wfxF`d>YSr;9w&pnQ4pAbAB{mi8Nt=nGG=r-Mc(-~E7%!^N~JK-xJ zlsnl&K8qr+Rd);B#WJxZ&=Kc3v?9lZH(F5qppzk)8-R5|Mzz!(Fq#U|(lyuchHNQ( zg^TcTFqS3NQv+ICoP6!f{5^)TSE!8PyIZ%Gg|?QS#93jsq?6q5MXq+g=6He|Up+q& zN`vbZkOJNPv>nZ{Mj8H66oF2y)^DE}Avbs*%YPv`x+bTO=f-dLlCir@eETpY!oha> z6O;EKv0M*q((a`LM)s86s>lbl-L%}Ip3U3rDl=X9>`062WOt-K7r+mcvV2{Y4ItF) zW$}@Yo~`-j<>U!2r*E|1Y1cU&sUMKj-L0^0;5{BG6Gwfh4E9zBI^r0;5=Hr9r~vVT z-31pX`KFj(X0nlxx;K?W@{P8H3%&9-r_KjWwBynr4&=JnnkOMMi0+ z_e~_+j~?0oPXN#`FVENNoVmHIUtaGqeFb*i)eFvjSu_oP_fC=YI)5{5N~xA9eyQK7LSm zLUTYNq#Es>LpH+FTLadbt6gDl9=?j&p1*GH|Wocdj)NVnz6E#kSRP1I zuv8Ll;?$kg*x*z_6AvB>MYEx6Wy4Q9Rl0>}2U7-rL~?;u3&GM2VX?U+q;)X4GD67? zCO9o=Q(hj-4(qA2SD1^4K3QP>^&IwBTQnE1OLF^on5ejF9QG>}*VMN|lYPHyXv-YMNE??+xGzt9U99KH8|AiO6Xe_NOo$K>n)qOm7*JT&i^zKpL$s$g> z`nDy??wXO6?rMLh?h9vhs_YV+(;IV~vzWWMb=)%`%{CPc0He<2?^%Q+=dDy;S-*z^s9YvTf#M9AAs*ch%g9wJzbR|_s4CQiW z30ON{7y6b@MK6>x4>U#8<$)vGbjPcpd|*?)tMP<3ufJmNOYSQvE44iwbp zC~!Van zo3~=^MGbui)-?wsmB^GG(1|0og#; z@gwYdu2FcE@iT+ztTI9I>cg^KcM|p)5ZR@gOx!5r!rgkJ#UASr0L|470Idy;(i_k{ z;wOU#*BgvQwVls0+{4%$wUCEs(>;_9QmeF9(bm_2XU@c?;t_Y=+Gkp_#UUBJV$cI*b2OEd^@l1u*JIrXk!$K0- z&LNF!y9VV6nDlslYQ)J?%HII zMx$7F^r$<4nN%4ytz!n;D?vK3i2-AFbJ}j;J%{d4cSmd9w6;eQlXjekrAa&5vFbUY zF(%F#-88MO{!XQ}VdY?Q7puxTT2;nvW;(07rHMFbf|&z9I|)0OOmG*6I1edrUPwy& zpLf=x%8ffec-kDjW^nfjdE>E?*t&}ByLw1}uHfkIi46^bA|(v8ej1#)FDTAjk-CRQ z1KTvE_RZklXF;SHD@FyB36P*GSinAAPVzq{X62Ke(+2N*jr;iHUw<3H>WQANrCsb? z@;KKAbVNwi5XP2vppcFvvXP@DWf`NoU1cwnv2)*jm&Sg__bX?BJ!BX=(?#7gA?tM5 zAk##sA4Ku)KpZS)#scEtLqn z_k&B7kyERyqe0!$8q=90re1_OGs3cW)Bbe5Sy}KN#CoSYPS_6 z(#*>Oe?-8&MGPqRG;mf~NcHi7K6VA`i#WnAzBG#zJOH;`d87B(yQpIivI*`c4de1s zMuWB#u!Ji+g!TxRT8~N!;gVMoE|aY_8p0+22N5n^zk?He(3`h^&CPe@c=P|V_NFzv zT@Azf&UAR4U556XVdu$Z8{#jly6j%*`Bmem%p7&uIa6nysr8hD4>tpRI6b1o)?@l! z7e#!})Jem-l`pe|Q#NjCJH3mnAS;!tM2m7&+ZlczPwWgMOa3NXzibwxC>Xf zGaA<;y)s;gmMJ-+vPahEpqA~K$^%mc zV5$L`s&7u^5+zraWT}!|cG;P81b1u6nb_d&v?r`i3#@zx9^vgoh6__kUKdRz#d$*} zb$^V4-U92|mEp~7=lZ~XP&113nIvm5BC?SAZJAov2aw7lE{m? zV139Nb%k`qPBEOls(i&Q^t2A>o$tg49+W+JB69b23wC@f7e}0 z1yap|mgGnd(D*5h#)Zs!CT_%aSb!!G6|>)I)Pwu!hu2PuHtzHw1ZjJ^7NzJ~d>Xpq z269B)lM!R&Bsw7UMuIVU5o=r)joLCgnGEFA7MJ8uf)uI*A@1KZ!0n0CI;~@K3|0yj ztmp#2p_~ab4NMQ_AQO+F9CnW4K9u^X+!e&HrW=S~*9N3fSMc7YXylY;T5`kUZfDVn z)H7wcd$rGIOR*{`S|!Eoa%Ysls9o-k64=?EsOcO9k*mieyq#!>b%&ST@W?SzcUbw+ zn)=SMhHl-{1+Se;(6s?*Tu}`)E_Fzau{0MU?yszh#r~k1U;qvkohYL3{sG^CFqC^_ z;7Nka&HG)iqP2Uoa?a0pp_pwqGx2P!HRMF)EF*;85%8yFI|PXKObVp$ zYLD2KL}|m4ott)K+FG!j>9(~2w@v?1D=}qbSr0!6N?69NT!bE}&3EU}QZ^%J*)cyw zVM4qrR$Ej>Zc8R9)Vvc|vdpZFE4zf8JM(&9eDwr}cldcjR=BpZdCKuepWe(0WqZmj z%bFL}o;_~$oblJqI~zgU#QS%>&X>IXHp8iSV!8r~CuU>1pAu0_vW2Omu7QdkX0aB72)Ei_Nj;V`Hp zc;Yy6sAJe}BxJ7YB__v`&<&tt$@tx|xKm^CB*$WNo^mW)eNhym(va@N@jNCH?I#~sNr9o3DbW6EQeoSIL*Fk-(ZvYkGNq-=tUifg8sHE^4yJiE-} zP+c@R>?Y++9{L7i<;r}X!?}eeg280V#(Dtfke$A?=}0~YqL4Y&ohR+4Mi+Qnxer(U ziL-pgw)pH$w^bMR)?Z*>GOGLHPDQJr@MuY4>4Rv zo_c)la8s6!$p8if!c8`MMx?^jFpIQdCTxzp0Bs}ec%-KY*-spK)Oq8mA?Mj$OGXa6 z;4)*lv}l0AP*GYusQuatjGZ~}tnpK4kF09wTG6RXBgGfIi(z!vKn#HNzK@BUaXw76 zJ*b)0Zbld>@TIJJ2!bL_g5I1$Im-wqJJB0E9e$5mB{SWnYqrY<2UsN@{38cgr4w47 z%=U!EjJ0)#neKd%oM4p@`OIulQQcBlBQz1`qNHiVK?}G71`=?Ertzb>Re7xo2eUP*i zPA)2m#bk3a@L%upWVS*2MVwl08o~UWZEBw(C&n=vuHSE2>~qE&DX3rBn)Pjve#*Z| z_KA^B668<$?gQc8XDPhj+sZF0w%?p%Kb;r`X&53ugRQ{TIP>?>?lg08Vz=X;0C) z&uPaDzM<#OA9K=5bujK&nxRULy{BfRIBo0}OBPRlj(2YPeF(vUabpwl_i)oviFxCyK zh;&iSF`|N-90PYY$SwyF^1u#|?+hL7R55mRcXiv0K6O{>5vjLbobrQG5}3SqF~}gt zm^+w10;+d#Z|kXM4*FW%BVvw8tWy?EZSzk`nmU@E?Yy8gxx(>G2D##>rJZy#NBA(} zR}02;Ov4j~XZhXXsYDXfSffdfIlf!I-G6#K%jhDIzaOJ7LUN zkg;S%DdL07W?}r){IYWu;39h z-Cy@+ET_|(Emo(?o0)Y&K}CB;Ss;Q>`e8~57Ecfgl`SV2l8w!~ z_gb<@A?0-{7^>2F$#@kHwS33U=g&KmcZ6NLo?WD@RDPnMsO(yWLtNDNiBZW}ui=m_ z1b-F1)%Q9?n(Nq8ZG)b}?z1+hHQ6SG!Bv{e`X_+W$z~H~4-6m$ZtRQU^fsUBf+ex) zG`BY!-CpqT>Hg&Q_;sWM_xD6hJJO>=|2G}zP8^@^08^BWRmTqsqOP9GSYF!`ZjuOS znPN-wsdF9X|FkFE>^t<}|NIFzjEa`exROt}VR4#>*P)@75l>V_u~U9NTz)UbD=T{p z;mW=O^_GMzQ&uRgDu^9~T>QJo5vgoHbNtuc+!iXy-Q3?#sq)iL|HqV`SLZ*S&OglT z>R|quCMy7)QBhC|IwMb@J#A9_jugH<$GxJKR z$|%K@NH``HIHc^@X=vGj$~YZapX)$_oPJ%I+bb1bqjQ0#Q-b&mh?6Ou9hUN=at_$k zKmL(U*A^1Kq-+iZ&F=ldhbK&W-huHA_Yl6M#zt)7x&I#Zj~fU_ zvdM^=@XcS2`fMRk{_l!hQFb4D?uu%{-BDbs9DPYSs>}}r2)~;M!3PsO|5Y`ap&@CZt$U;VZ&X~FthEjF&Ax{VUhBVwfb$=py1!fAHXE19q zxpqdEvytxMv^g`>-UzyBC>@rW#jq1m^9q-xVdlMah-JXEsjzv{#uR2A5$lqEygI); zn4brwqoeuJI`hes%luO(*94Puc~CS=4mraeJH;%~uyr)et<;vCJW-)EEpAZfIsoSC zXKp>!T(dfwYxbW$SGQN4E4XSM&opVuI+!i_DP_JonDMBTy;A3Ep|a~kT$m1}*NMFZ zvI`WcH$wMVoIL%kKXv-gpd5R&`KutP9aBKnsvQyFZB3JQF#U6q`A=5GG1TQU2F7{{ zBtzS&kqqtst4M}fEC_Nsn&D?!sGEjq$ct&R+ZPPuSo#bL586W-VVYABW|PT6^A`Ak zHO@^z^ns@o^ib^)F#|)*ZP8|1H^^;jd}mK^X6*hj(W8YPJ!Tag-(>NmDsDvG*QY^R zL_9i?r|oN6Q7zOLf{HHx-uHH4sq*(v{@s1Tp{qB0GN&y4krgE6uRm^m?K9<7^3KLt z3-02;wpATJ$h$>E37D$QD5RmbL2xPs!|Kkq5;}p93J}nKYqAP-5wc6g_*mIu+PP4i zBsyXc;r8pGBW^ZisOaQvj~EgeHBBATU`Fbgw#N-|RUd?jL0-8j;D5m6B>JqvkDnUw zK=z=5*`3E-Vs|RP<~n3sM*r?(*XMq~H+=Wc&6}=!r+%DraC!W`vYh*J!jp!cOT4$8 zb70WqDUaQW+b3Zyjyryk`&js#_RNOyd4ZX2&ke?^9XaX2l}%h}t{oUz(uNe+No}Nc zwWM6f6cnJ=NbC6T8fooS6tyH*eKN7$Az9j>|JprAl52zfYu7@HQI-bymoc&p@9+NS z3Z_|%f(rI`$E}~)yC;$U=A&Igw7)6SDbnAIPqzNjZCWh_`IB!OIvmmc_$g?mDD8Ng z@Cl_|C*0Tj-_Wi)MZ3tKO1rqdQf=t|A5f2)wLe5Y(n&RVr=cJ>CK@Ae=2#Ov4?BBH1<>zBmW}` zpQ7yjhbaE}DQe=UHIjyt%jBS+@{b=Bqx>+YlIhI!WM(t*a`dl?qH;Bd-9nTd=pzD0 zq{c{7LsmI*qBbfw2rU#Xq?h{CY-@|$TYGADygaOmV|6(YsURk0IphFVhV>2Fwj0Ur zP|-b3-h4whX*v-XGffGEPBMfDk%@nnA2_uCZHK4NIir``5^Uzbw@g}HIIwBb4yhuu zbw=*A+s;`%w5Wd9;>$OO`+T$ZjypcH_dBP@lQrdknS2+!gCl!H|Bx#tf;7{j;|a(W=!V~<`J`)cirtzP(OR7GLZWM z=+;rKot&!HuD7IW?TuB3_qD6FXZ>!iUH_ld+Ew_g4)%f3ILUQltzAQ|eJPjS+-?`M zhFZI;`J_raMX~+jL7{C&^Xhj<=ho7bYU~^{2iE!P!WR%x<}wAa(rFhfQbi?CHUuDE zgA37Bz39Ay6JLlsdd=7tYDZ`c?XlZ+%t_+FrnryZq=u0CWI;yjrJm{6B=J$-#%jj% zyldp#ksHjIwxN5`Eia{dQnXWP=|ryl=d%}e6x_#W{{=MAd5qs2Shsef>kX91r!R)B^1%bOg9spaw7O_-sEKP#VO|@ zf<1dXH5alPGqER6gZp;Rp4OYysT{#f0*lWuBYy{<0qeVGoP^EUak0Ha$jy!`?XIW^ zvv*xT*!95`Akh?rb6+w)2lr-w684-_XD9zjoxR@=X8fC;VB-jl{__HMhZ4XoE#Z zMa`ar1rS0-RRj|nh=t&B+GwqU)yykP0ZxW=B6Y6~xF*C4p>>Is+At8i@3x#1t{ zzZg^+%BWFxy!qls2eW5gJNGZa41;U&j9IUc{fCI9Pdz{F;l>S9S9pqSn>>BPOQv46 zKxx`@zH$_T(CmG;?ELzP$+Znb-_`a`Fnni-jWDzGzJ~T|G`XVfa4r$HED)Cc5C9eW z0}udtg;cV~1)OuGAn6hrEYX)Yh195sO~C*n!g&rvMY(EJbW(PR;)i0LeYW-(NCW_; zNlGSeFgt0MPW_#ZYA2gQAf+}h#gR8U;-X*0kq_Y8Aqpdi)q58?X{tHTgBkqTTCH1U)%dl&IF3TnSCz8Z=Q!Hn?i6Z zuYQjJoTT&ji4VVg%p-mL_eZ&HYQ6{N$rIs|6YJ~k=9vaZ_5XjKG+*=k6D2;?&Yx-F(~X>i-vfP5B7FJLj|7+=Ek2`-8L`3i830dD!9sQIfJ#=FI9O-HXf90h5c{*iJRX_hf5iix$ z{ytG6$?Ewj3Gp$t%p5bFY2w3tT=N%ZFzwLOX?;hJo24aFR5lp#L5$R6Ktw-NOF}dn zK-BPYXb8=fhC_o7n5MwyrZi__$Dxtwfa-{tKpj%}nO@9oEN)#?`eJET$%`fIs2w|$ ziTn0((S7?+_6Hwdz<1-fX?6MLWL;iE!c`ww-&U7z-1XbcR9#+dmWxcZF5mdlZ}ZdY z@-7nv;A*V@nWYd2ygecCqPnNg>2iRF0WZ@qZs8hY2>+ovzD)4xGXaO3gIlfi5cgYS zS|K0U*;dF`8J%9phqyI`D_k5VgI5gf?4=BkO)ZN7>M+1IlK z*v_5fuFXd&FtIjq>H7n}U4iV+@jvj*kdLoN5pcWKv5l)L345!@`}apZ$_k4G1Y+>j zproq21$q92x2}4n?;YT@15RwtDE^50P$HI-wM*?Uu#>0r5bKdbL9XT|>rV9fcx- zZoMLf2Ko>Fw`BaF&VLWgKa+X-bo2L}X8!&^Vg8Z>XD^ExbnbDh8*?AB33o_(yr-uF zTHcNb!8+A~$VsH&zDvb4DlL8lT4dw1T7RDwIj5mT?w>%5xC1*KRC;7ndNf%*UVj$u z8Ko3apAh={6iM3V4#i_gTXo}PA@QZzVwE;q=sL+_wsk-qHwIh2sGI>RGc(ejSlJzQ z4dndi6cSYLoJ&zEy=$Nysf>;-q6LVQ>7)GvPT8OJACO43FC9{-Jelb}nLatxp0Fww zEUY0nk;-+4$gtwHqz}nb>;U+ML=Dn$s*>z!NZOh-GC6Vl9t z<;4Hj+V#gaaozEI_seIWf1Kkuw&OTXVw?OVj(rs3@S|v5D^aPXCGA*K(Pn~<(Nry~ z$l5im>&EEPNk|h?H#EjtX^1s{to32H&~#JdP-vkV5>nSOb%U{@Wf4;=luZ*VarVCN zy}Q`K4lo+YFL$}TJKy*FeeZqW_kG{z<8=9I>lz}l(ll{Y{SpO-Bpd5!eu>~)5oUZX zH5NPCJkUMyL+bjq4+aZQ_wBjwjj=R=x!=>}?>BMomu#jm_vqDrVG5m_J+Zv6cxaA5 z-9J$rWLWo;|GoDPvPYBJ%v&V(72(!X1i5TjH|lWnsZU>HM2(gaMVS#DR-iw;DMMm* zUd@)!v&_~jThl%o*x&&#&Gw>n9QOnX?zfCN)`M-yZ-Vs$rlH|5xiwpa>AdRpi!}cP zAo~|m>Y2V|P+0)b@9B1DqZ&YMH!4>o$fI|_-ZU+zdH~$g4W$4G{6ULr46Stl`?cE> zEF~mkTLx&BW?i6U-yI1On)#X(&;HZB>sLM7 zmQAI$Ec@wH2fr5epn9~@X=fX894fjWi5EXs6!Awk`_vgaHy@t6Bp(#-<(lzKrWd&D z0pSw5%Y;DS*}?D5I&`B8pDs`5Q=TZuD+Th(B#k~MuOl$c94#MM8fr8fDc~hFarC9rKO4GNN)AoeW-Qq=IuXKr1 zxK6^aD(QKifdtuM;LM8@pUGiDmJ=OX13S8VJ}`RG8nW?yUE&4dZD|IN+3PuAW|Hwt zS(9B@FoHyOx)SUsI#c#(5RuM1=u8>59ou~tz@+9PJ&Ym942P!Dei7uV6t?=JZ1P!( zFUOU`$KArUX?|4Vg&4jTScq9yf>bx%CR}dgNB8e1vf=nqSzWXa0WIUw;-6Er65@^v zS9#!!lDHdv6F>)$oPY(^#vX{>3)}I;-yeA)E?hl|wiJJMn4|ZYW%o!dya!K0 z(uCnv;K@1w>9JtCczkE;Wb5CAS+w!JLh%sb37wcj2-r+Z?qgrASo+Z% zvRyu3oFe`|ekWlw`93PwL#$8M*_R^NW+YUew^IWIH`h*f70H>5^_F_cyFOd?#@2ho zb7qAh_MCO)&mj-tt$4i2W$9wy-w@>tqX*2|!TeYoP%@@;bl<#;J}r}!DKCERBaMpZ(;DLv{&f4!i+&{qob=9V!r@yx9(XHFD z)k3~!KXK|D+xDafECc@*1KPX4}EjKv$kaOB{f=9AbFO z4ejZ4UJ}V4g`kQ3=Z6iC9Z8djcFl>IpO9HeJ9bQz2?&bT!Q^akN{Cea4Yj~KNXihDO4!a<02L>76ig~k2Kr3>1snAg1{!yVcSm;%%h0O7EZd&ZlaJlD^K`R_jzx>B z!{}IJadp#<(ezWgGc%m|@LVUluzz&F2Z_k%b-$wd&Ww5S`7b8D-UA*DCC+d>iVHs% z|0!j$Ed>x`+|Jn@E_Xbj=Q)yVVTd+ROb~$G@)8}vB+_-{1N1CFh+6U-SrTguu1*sP5Jn*lH9^CmT}gvWo;2()4S)lG~S#DD*qYSw`@t4{R+VH$^=Np;S{EpruBf8T?oeHBxUz7v5>TK^ z;z<&X;q>9(9$>m)167QqjQavGsr9uAKqH9a8hT9o@xZ{q>7l3Yl0V%x^w1shACl|j zyQD`pY$#^X`^5|&D(>Xbi?dVcKEC)mU`jyyfgo|O*b8HA%G>OP<1an{-ZuRADB+L| z1Lql8w+jTruxeDiCzBNkz#Aiz1+@fma}MLfutrcl>tVNSx+EBIEz4}l zgy0tGW{q$V5@$Sq*e7GuhY0oT4Rg=}4Cj`24nz(Fms~p1H?r<49ZzJw`CM)ASmf9{ zXz=XhS>d(4!;`;jioEJ?eEjj@y;r{u3&J$c_7axeCrox9Un{!>EWB#c2-WL)u2xmt zWGVEzGQ5re*+lD>1(*#IcEB(Qu~sE39ZYzOhz!Pec>y0ptujNyzE=gClGnMEzTy2V#j z-S~ByLb*n(whf|V+lZy(-by;ofsQL^IBsg#P^Ia^u~a&pzA0t@KbpR=w)ACc`mh43 z23_Zvso+U5D1Y_Jtu@c#g!m!K>F004N}V_;-pU;yGN ze_v0J=ePOFAkV=70_PSk4}{VGRsRXHx3TvyFfnidF%SR%dJYW<004N}V_;-p;3)pP zmVtr2=fCQ|BkXMqDGZ_v$lx^qqd*6d004N}ZIe$(R8bVhzkBX^Z)_oEhPbGV)Qmt4 zMg~cR(oD$~DKV6hWGEz7f>mK8|}_s7^@UQQ?A+E_4b{gk_=HiPa`ft3PH*e?%qc zqf2k#r0z$+9zzRnnmPUiFHcCVkZ7Gy1Fy9YcdaUT=?E@!2C~?P0^S3!-6v7S= zi&6b8IbFEMwV1c|N}UC~wfnKc(@Pj=_qa-BgL)fGw)2}npqMJ-F)gi>`(o-4S z3|tkh(P7ll0Q_8vJZE2Rph$&HhF(Rkv$i{s8+pGBwOnRaog5m5TmQs)8bO!DxAh#J zS|b?Ob0#RfmN<@ja83o`p&m18C8O1uy$YHjA3(G8on=+vFn6OteKCfgn_qfXWm=!n zrmtX0cjE?UW5}7k?6vC-Wz&0=!g1n*2gAAm0cu8A`uV0`VvH=jQEO(Nk`?Jje3yK; zwW4<06>7iTiA6n$l*rt7z!h(vi5I;QA7XHPh{Z}kRn&*>f04dQMbpHKo>(GY+(ZfA zLAK2O(oWcwI4E9R7atm9Q6Ut>!u2>C^`Y@U%26*`F5HjB@q(*mMxw49wAoX*<@BV7 z)EPaf;&lLUJp{Iu+Qp-JB((G zCzuv7D>26~w=l0^e!ybKlE6~OvWR6Ds}^e->n=7OwiWC~?0YyQI2t&4I72wsah~9O z!^Or`$8E%&!TpJ+hUXWr7w;rKJHAzXH~6jir}3W>2ohK)=p?vG$WK^7c%SeOkvx%E zB9BBxL<>Y0h~5(85%Ur&66+CLC7vUpCb37dMDm?finN{dIT?a^JQ`$kVkZ<^jd{Q?6fg9nCDhVu-c80i_U zF#2JfWPHb@z~qFfl4*nK6Eiil6tgvEKg`=Kv@DibDp)SC3bDFn^~-ve4VO)W%@5lO z+k18%cKhsO?Eg6|b8K?bbNc2y$wkSf#^r{qh3g#GTW(@*F>Wi|P23-Ngm|3s^zi)R zRpNEbyUP2QPlGR??;PKEe%t&-{MQ9|1ndcT5om-1It2O!#sroHP6*r-crEZ(kX}$; z(2}5Q!E!)o7n~BD650000000IC3 z00ICO000310Y3l$004N}U6D;o0znwZ|D$F|78GgFX-}POT`dU=qKFDXJeVax9d>n1 zN!?h-qLb(7Q*|r)03G@Wo%#m-cXoW;VP<~M`?JpgNQnppf^YT=bsSwxoWSuK-!rnyY|`|LZ*&S!HD zs41hwe>g)0mppfM&iSd8)LVtIb;dcI)A%KG%D=`sLrN9upLiQMP__cQ%JUtHe3`=Z zUH$=^&~8}(004N}ZO}(-Q$Y~M@&6=_ojASsLhs=BY^Ts|Y(wb1w?GoxAtb@UKxhHc zt3XKHK%ra!(W^kf0T8{3BH+N8E)Ib!IC{&Cl~zA}v)a*Uh=M=uky}SK|2tnnq9j^O zL}MZbGZtcr!%92}B$7ljDWsA{IvHe=MK(F)l1DxT6jDSnC3K=QUFb?Ty3>Q6^rAO? z=u1EPGXNVJI(8g5DW!~ZDj3Kh1~Y`A3}ZMh+>F437at=TMJ1yd!&t^Ko(W835|f$2 zRHiYV8O&rBvzfzO<}sfIETl?eIL2nSaFc_4Vk>*u#UV~`T+Hm`1DiM?7Kvq##Ic<_ zd=x8(Ims{D`OQ&I^N{TK<*Z;OUs%OzT3Ew6)^d#_w6dNJY~&k1xXwAwbAxxh zmv~8#L`jllNs&}ZlXS_DOv#dL$>9PQxx!WMa*4~_V;iR=ms{MHJjrLb6iA^INwJie z{q-$P0nHL_2-<8mj}h%gblH?1L$|I}?{25EqC-9MU#Tf|rCsSzI+dl$GG)2)Uq6?j zruu4{wKnJvhia;;ngd2j_Zd2UCT}>@*b%&H4xihYBciUfD;<9Uwi2T|004N}Ma?}A z!B7|l;NE_hR!hq!YQ7R8gu$p?x`?Sw>{f%AEk=IJTa);6oN&Fi^PHUIJjCll;;I$o zZpvDSTUF<_EQb_T1tnXAbJZtX?n;e}G$TW<$xO4manR)l7GVBh1J+T&0qm863pgVM z4{%e32H*|85Y$v3KHBD3m$i|fzyS3BY=Oa^8G@}PGXlc}vz=y7EfS{2O$USaoEd|i h88ZRnDYFYE6J`(WGAl~t8-NE{{Qv*~QhWB%005(K;KKj_ literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Light-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Light-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..cc534a3815429dc9ce2a0c49854eba221fe9fbd1 GIT binary patch literal 24576 zcmY&FMw1vM@C?0RYq3R0RuNkbpP3@yb=$OHDd!4 z1A`q9BQ$VC10w@70}~JEf$4y_@LkS+X~GOJ+I|2)84+~wNAIy#l;UZlqKZ1y%|4$e zY-wH)6VO7E8-z1Pj%iGd-b`6Rh{?*xNR1s$-)O3;l9m#oPEFPy^ZUc5aE(OrC4e+Fldb0eww~+ zuZDm&L(dF$eZRgHy}apGpD#FGCqU#UObuNVkAc5C1izr-(0pVWh+d&Zqk1j-Y*w+B zam;)8>zVHsyVGB&PQmkJZssvBIZx)K#(_99JhGQW*7 zpNM)DsQnkaHfvwH3lME6MxUYsEN)~oO>L$DmTb_o@iU+@V1CDi1B9+``2Gg$ zg;)nxKS=tf4z&wd|Bq@dHeK-eF0=-SI^(dm1FH|t5|cWf&Uicc-X?6bR@}Wvh4VAU zh;@gNiAI?mwh0c)fSX0>E4G=o3e>sbZ}ysdGmOsj6J7CZF0_PYCZCQPs!7(i;kUg* z_5#+fwt-WKHdOm-?3yMs$|wfxytnY!hh z531~(L1(<|Ws6APQ={*|u=jfM;9a2?R3B_U5b1Ev-fGzfk$59prb^k6#?e|ftR=F2 zGmXA$r59))G&RmnrnRt3@)x)dbhqB%SO@|HK71dZ7mfG-cU|PhA8_dn*WM=#a7J`l zwZPJa(ip%!V5$pMI#w8ntJX@PRVsm1sO6lAzm#vBw*H>W+Nby{~-nVP%o{Bz_Q7&kl8 z^6%ll;*?_<~j_?g3&))<4;ZXCnjbQca3$bUrWl6UB-eKnc5 zWWoJhx0Ot$Zdq;5z8(Kmv+$LFC}LhiT4jPLIkRQmCH5IR?$?k-*Kh0T%gs6Ly*7QU zW$jc|mf5v5+c?>NLDIfq*I4Ll>W*j9RaW_W{jOW*;ig+fNoG31oqzIqfsSU$wM!;6t{V?iY{K$~SElUsbEM4!UhjK>F%e zjG6NfeCpz;>+}yhRa@t(` zkh{Uy-}+d$eH-ZgFz)zaU77OSUH|%uJ7L5ZT`ik#cfUz0F z{hiBIhlYFjO*0KHz;{LQj|U&qA;5KjP@%GyQ0kE3w|$QS;`}P193Ep79tSR|D!39Cfy*MKLb;PeDjl$; zZt-|)EO4kb%Zbz>quOREFL`NuH7IYXAo*-IY)meCn&3yh08sJ>u2FV{c$h7I*omTo zgzmq9Cm~*8n`FW~QLk{!lmMk)s%Z7yP;(1ktP|QWZPsunQfSKJ?f9e%03KWL>?Yvp zGAtncbW}%#Bw1#rK-paTHqmW?)lxZN#S$}w65S-JbD1lUUL?8RDY;(a5(A79gF59K zhZ2Ku<=PBohA5}Q10NzvhjP(OtE@~bq-Y>8YzSyqurN-reYxIjx!x9KhCFS7XCAx) zxth{!P{k6hc;%{rGIX(eY90tL(M-Lp1}Xb}euZCG4-w~hh>KbZI}J3m;J%Zzg1jf3 zc=OeC6Xj`cR>f)E8Oq9@Rw0=&hAJ14HHoX%S+kZL73v|to10swr%zMyr3_l)t)EVr z<4+z=Sy85O+5}A60e3K?CN1L!JbxuQJr_=p)oWth>S9R7CRN%6WfH%^veQg|BLDx5 zUq%cDwZ;Zcj`vW8JL8hHqsDPY20#(aAK%k}1aULwBa>nZCWinGg9+diQ_u(oCP+a` z^bm7c8CX66bdq2SfU_C&#(#C085%Db8kiawW(`_R^z=|7i0?!=@~}n&w+$&(|&&kPws-9Y4>1)s!rw-<`U)+f~eX;sZTqEt0Tnlb37P)2m2zp@vqz(^amdzq!XlW^B3MEJyI?F5djfn18+eWBPZ=ML{emQgp#DP#M0#S1QR7QMN?&Ug_Hedi>J%$3nENn zjH1l!3?ofrjib%&4Ixfpj-k%(jv!BOPoU5L9nh1z6E{?XN>1pa} zl=~3oDr^kUD4UYckeGIewRyvqtUan(dWQha1TLeF%gcUh~op&(OT0lGf%5 z|Gt5)vU$wni8Stv-IF?@++u1%aDYcbxGMkv$^}*fFawkVi~yzpbAScF5?}?e2DkzW z0XhJWAI=8g^kaPh4gfEJ7C;0H$zx4dv0YAE5OEU@0VHr>g^+U%wLUFw$U6@VYG5(i z$b|dPA*3a{Q+%W&5P?9T2;5o?R&HLQLa?U{5(3UAs9l-#_VCt_5BM5JYzPA+X`^VYfpuMCCeSC z)ll*^`vD#D{y}ihjoH95|FFOXbxH(!l;J2He2q2PHjeNXw76@;z)+Z4yhq1htn}5) za%?qT+ZaFF5zLJAI?5uTY;6mg{$Lux*a887zP|wo9&4MO z5I(4fOB_7Y6Bjn)DXR&x@@#03qamBW5z_+*sg*7Q0vbdLgDLZ1f*>g=-Qsd{1EkVc zX;)eT!r;97tDmV;bQEhIBTPcCGmq0YZLmGAzdyUTpWg>a>+9Q@r>Uk-k70+MKRIJ^ zELt*AI7rmXk?CfbZBUiF5XuvF6SEBiLxttgZW@5(L!*_fjHelT?}=9FF!*C?+M5J~(a=&6enx|149{LBzJ7D0`BqSKDG|knbUth{^g+NdnXnxHEenP-s zzdJmGw$Cx{HeQsW?6agZ|60Kn?$3k^OS->2E_Tx&%31TnXLD$HBl+pdx9OL63a~|y z%SV$TSUs3mvN<%U3A61WWI~`HSjqo|Qaoj={@tDdg8>4T3@-qoEZnjZ!13$KgbsdO zKxPm{iFpFfkP}3HoQ~PS(o&#~K)F6pv^CTC-&i|#l1&LU$Qn18Dgt7)u)-~eDoUmV zTsc?__~NmZ0*C-UCyoYZnbhsbqnfaoU0011V{1|ewcI4bNOdP}-MbR>4y{2R(}5&B zSi4#h26}Qr#9eiAPlD!v;N2L^y%Sjt#p++t4*b!R$qGuc#=ePy^Q$%2_bzsQFH7^u zY~c=u``-2Iw(k`4ZI4^oBCqS%10YFXgZ+)u=BLZ&hXQTAMgrcmkGHVhE)X31)Xw7R z0E#Ai?^(Yc`=w?B2wDEmEW_4aZNxRul1Dzg{G_7$8yVgnJ`I(Ct^LyX8KqedLu zF*PsBb)2v{=3Iwi74F=gD;3@-_jp6+omy0Q=`1TwTsHvSC73_2+p4NP8nXe4O4(u2 z4KZ^!2aKMBRWsc`>B*7mawTOGja|gfg ze6%zhZ>@*r<_c7SSQHMw)4^}Z@4Y{>)c-KIO4mW{qj09TFzMm3J4~Lh$bvDh*BU)u z`u_AoAn12^cofi1(Q!Ar&BqP4yVK(%ds~mF-eW`^2&C(}zH#4D^=Q+n4?XT3IqkwG zL34(+2$d1p2o5|V0z^BhBJKh_?^d3!fJ1dNt=KslfGAco%Q4^+55XUCUm`tLtdX3d zrAQip<;!CC(Hn+n*f8w5`k}Irr1=s*e%J=mNlZ;30@Q1@nP2UZOlj-H?Z+ z$b8C2EGX=D3Zz~j2WM5?E1h(BNIPzy#KrZx(s*Wy&L3dFc?N6J{a}o`;EF<7nKZ@rulUD}1Z zSwAd>yKJU?5bp}jkhooG!K+Y1;vf@71Xt%ktl=1BaFj8qL(fA}?#}(?OeQI!FYw@3 z{LSm<1QeZUMUE-#A}5NOR|v^9$j+x<36>Hc-p~2M(AW%a(6sgCk({+ zGHH!PYBeJ2jeqlK&}~ZLiPPhUn=+jvGF;UkkNpV#-#@o6-3q()Ch^rSUv_;#@HiFZ z1-<32+3eY_r7cq3u-p1=zt2pW*LQjTPrRKK|2NX^Nkky6hn+s+F>ZWy5fAgJJir=v z5-o9VA5bJ(+zv5g{Uq#$jO6c;Ux0*bitkF+R63lmV}P*xz$|QSok;*ETuh(a?-gjo zeR9RbKOqiGomx)VPMTcIsU}toyY%&-&&ie0pNgNXhX#J(*##1QJ8N!(eH*{K zQ4Zii2S9N=3w<|*sgWd2h45IpO12VfM&$4U%r|m_p-*DaxFw)7#R19|rn%vHs2s^Q zKI$RHoB>Oc8dJq=(#U`Gj-Nxo(RMH z84ze+MO?;(yOHt9oPlYdr?2StXWllm$)iyA4R`BLnXQ}Qz~in~$Eja5^1S{E?}3$Z zOOyzk&fi(A85o=Y4&#&OjP-H39VWM?MCE_+eXNF#tb+th1sl+7a6qtU!ekihpzculkbEOmQZ2R5w0*K~ilf@%dgroOru6 z{CFHHENx1Zp-1LGpu}mkQ&hAyE)x&D>xs1k7~iOMPPF(pq>bACr!Ta)B=nZ)sIJf6l~F4XGHs?Pz#_ zJkHMi@L^!A4+zE+vDiFNfmw$=ltIJDH7kf%qyv;GY5`S%A{3YaA8sMXB5h9z3_SO4 zE2)432FENFL8WB(t;R?M+RYZn)4fXsV)ud6qvH_l^5m&5L0C10h zREj77wopQr=g?9h1o*M!CAfUH--8-YG%&hN2fe+AL%ONfnTisB0r-GfG7i;{8``ew21`mabEw!ba(1;{0a49(_qtOU`0liDt@XG^!GLyKD`^1EK`40?EZ@C&-M;w_M zAyqM92tR3uUH~|pmWH(22P+nBaiuC_hGYjq&D2w$&o26|Yzr5J(xP;f9>Fa|dM(!q0%-)01ipa6@ z*63cb^RCN)WymixUX2WGgw+Wz2gP$B8AUSQuywM8!(FeiK`Et;!dy8UCG4AsI9Qy) zUlnE)&}#>qGYzys>_uZ-DXocv1W=+HJ9J&0OWjOmDbXQB9% zm7xE%*^}jxkg1A?M&2Jg1pVd`bLVewD?NdSLf(|W0O2_fH74*O4kfvW&MusjJ$(1U z@Qj@xqZy@Mb-H*YS{_3~SlDohTz5AbPF)4@AhN(xYkfkhkXCN+Zs@iwFkw#Znl4X zINK+_lF+*$W-WRRx~kB5D>ugU0E5YUKT&-G93f@;w&)Y^0I1RXznw`nWh#B zqy7_SvIiMP26Y)w2zazr2}>Ax8P@da?eorGe8%0OpW{n}9n0w- z;axMy4!9P4>=A64mO3MN+sRgTIo$zJNj%EKO9>Km;w1<9NjX78d=p|KC)roUgm?un z+6KV^j+qVCfk=*#eI!+>OWtr7L`5_qvDrdb4|c-Hw7kIl$PYi!>Vkt0X6Xy0i>p@9 zebw9JH(q8E*V(P-M`qe|<*C=s!4G%x($<}nV(9#@F3%>r1vwF%5|akT3X?+y;V3Qg zGBrY!DpY7#OrvlmQB<^62cTtiLFcg_1LnxFFj%w)i2^x}DB)&coI?+zxr}x-1!xOEo zOj%qhn64Y8>AZCszl|L8RRs>i{)XFQVGAEm$P)CrBd^m1GnZt9ub5d?>zJPr1(zyiE-2wlR5X{(EiP_!IOB$cFK+D2Z#(!ZS ztUq(W0^UEGw3yI4ng1S8Cx>1u*Cz8U49yUepjSj5m{F>oF}X5K!~7I)$UKr1yp$NR z-A!Uf&rfoB^F_jMRkL|371$J(d6qAlv)o|*vMZp>-FngfIrrANc01jmA7h=Pqa%jC zB=U{h5)ft6X9-et$k`^s$P}KBDTEX$P-kVCUzmI(j!F~B6v<+UTtY-{AhdH1P&$U@ zJ)W}VF;d9?d&~WeJG3l1T>l<;FG^R;2M>2pi0rJ1N0 z=thnUz6cY(ANbI#*C0lefy%K1!rF6X!T-v3Opg8qv}O zNJ909k{B&fXBsELrZhg9b}Mu?`Dfy-{7c4=WCZDoI6*wh?sbQJi{iG-)%lmw!?<+8 zFc}T4IP*;p5&LcA3qNaJyoh}`oz>nX0hN9Bas)dH7IKT?g1Wh|)Bc&qS!3D*Sy>Qg zQ?8VWqEQmU5;QOA&g`)uQ1%${9{(3EIX*=RlVBI!@6K=6@s__-=+8_!tXE`Onj0_& z{xVBP4t5(|baBm?p!am7roTyuPs=;=S+kEC*sXB3e6HUKwterlQ9sK}qMAhCkGwCf zFAbR@3O{o{vrN8U_Ms9AK0iV0w~^T0XJ7tgod2bS6Z{gNP#X(AIx6qM7O~yw0n}8( zsWhwFReRijlod)Ye?2XhDeGH~kL-ms5BJSy!MXJ$ z<2>CD>S&1Ryc)HP3F#cpfq57J;g_Tr?^7cPqu@WZUH(fhE--P3>YvlJX5-ewC)+9zXeF@>yWw`><1B zi~}XAY#w#0dB}6NY=m65b@l6H!wi~{AN2t!SI_t!H9wUiuO(|le%bBHi~i#@wj1Rc z;#C&D)y?NarZt2?JVCsRp z{n5wu{c3siVN{$f34=_a-*tg6M3;taNE($$LRae)^UiNYfTSLr0lE)awp|Ld*ZK~U z5krAEX1THvlj_wuXIF?iwrO4mXKY;R2(B!F&KpfyM+X{{)Manb;sIoktc4EqqA1T{ zhS~8?uwnFNV@_#2{gGA9*FDZ9%w6`4qjB$i4X!Wv_ty-jsT1Pm7klaE{xV52uDmqd z%u*hB^jMm)U=HH#AL`kyX-%;w(;)vgcg+WadV!%VcDu|MmQtlY)xeEmo`AoRwV1Jc zkh9VuP}G1H8z!(n0TptYR>)?M@lO+x-cPaqk42WgQ)M0qtD0ZOr~R(m1k1wVMQ);e zh3Op>a1lsb8V~(sn6jXFO4mtvS9=T}a*&iu_v9rqo!!12vj+}iTg8<;f!^oT8Ta`H zMM}5=6XcNj*SJ9e3HdxEqZL{L6In%4N_>$?>JmUknKf0}MZBt%Xcq5gATJbr6#f{z zC2}7i-vN~bzTclEO-vt8fDdYwY1A|BZPff;1Knu``v`GfVy#kqMwBQ2?6e2MZmU!G zxcAW^?grSqbFNFLOXx3{ug4QFKQ9){$-Td#82!8-4zVS>_~Z>M(T>>A4j|3~jQ9oU zZD7A#Nfz}0jNAeqyLzCP<*WyYo_4Yi@2%iPx4wDC`vWIXd|ADKm_Uv{cM_H|e}^wr z1uZO$P)Q;cf@do!j)c$#Vg(VqpREi{_Urq!qqYBt77P$rRr4elu0v@z{vCOqi>|Gq zwjc$g^^Ph;BtoZ%z~s%8r4^%rXi~pOMvSthV&J_yh;yfDy}4{|{2L{|l=-mG<2o+4 zO^RvsN?Gm62OXBF?#9didu1=LgqV(~uDiW%E`NsQcHcc21w zAq4!siKQLIh<dnz(XaBS=o{vBsu_!4$m zNFrInDgLjp(tH=@TAVv!Kas*33s(N%LClctF?Sx>hM7kKKnuF}2eKY}S#|z83O$E> z8Y{+u`0scf=x;(Nxd~cqO0!-HdE`Kv6(MTjtB%=^kb-3#c(4WB|GS~x2N+D)2u*v)9H`*z1k{OQL7r}sJkKTdQqO|Pzo zQoHF5Pvw-Znb(r4>G(` zwrTPikc1@Q%!JA4d0ToM;tiOP*x!)TJs|1oiI8WDxDgH*&Y&jd!Fx{SR2mRN|Cq~R zCh;j?NLCPkRweFog6vwR>6UStQ-vcZOZ)v6K$1hUjk@j=o>z(T*;(!NedO*dKK;dQ z5bVF&x&@@dm-JW#y)`4f{Ac;Py#~}0n9YWo#XIZ~4R<-ly!Qj#^@=-~Q;cyFVL#F@ zV!^2TiTm~dIK6_Kx}j)>hD)(H29Df|Dm#&dx#?8K9)HvM$}j;)pDY}IVMB{$I)jE5 zH(&)aAkYj)j&Hr9cD!%*{k}MS`dvSgc%|L!w~Hq>+0$xtA8tzcZ;Rz9Bvu<)waJ1T zZ1zXHUAZ2Hf8CRg*1GwyLMeDn064a0PaRq7m(#q&yuuj0!jkaw%Q$&mgfOdm&Ka!= zTM$7ZZZbr{%@)eyO%{T=SmMI;L==qz^vv|$qGDqw*3>xN{}r7ZFXMf+xG8UDMi1g$ zr>QrmWj-MXs>I+;(R06E7So6!ebb9~WwAuC#I))~b_wocf;n_RJxG9ah&dS|1Q{f{ zATlwYnLUNom1@fwg;A)w`#U_Ap|e!j;uE-zn=De1{F-1hq0}q^6R$}A&}(vu@;`F2 z%gM1VQ5^8v8QfRAb=aI}v&++)ru7wlH>H}M-fnUe{VhV%P1ZZqY|WJO(&_#z{RT^% zHOeBqWDB7O$3AflDi3v~t|(=cd|k<^_*k@2LL(#kwyhgL&BC2@`K zwHLF^(p6L77m$s)qs{T#ftO-a(Rs3~SdRx+3%kK*1)fbflHeFmGVR)oTQS9uL=K<- zW{i~gBYO>k;+yHSrnlp!a5A?U-?QaqaF0?}#8sAhv#5F5+0PDq=ljz1pRY6A@2+3X zI!r+aUmrUC2eSvyjE!3R=Q8Yzfm&~nPhlB2mFpZSh>5E3NvbjAnup=n_IzR58|C9v zQMnqM3swPDlc!H+GF*WNlg6xgVy)Fp)`d^E6jsX<`Zp3tVR_cWvcAvv7OdTpt^TJ; zr6Qib7_`&`OTyN2luz zf`ypXrvS=!$D~RL*;&}52xX$uV}_Dz*wTN|oJLVr^ETBnXK zj_?>AuY#@Xslzzbag=RqX-mM zl#Oi?wr)R?qN3>jbG-ra)q}i(0d_*8)IOLN=t#JMK0+*|1o<4k_a|Cgf?lnsrJzSL z7@uW89!8Pzp$y&76mlPdOc~rwk;wss=3gbbry}}mhNy{8lCRIN)RDvyg=g8ilwn1~y{Ph4qF`}$n<=}dF$=I3*5wMCpTsUB0@BTu z?u81YB22r)+ma0a9nKa#!CVNlw^R-+WRv*^^0BLYy4%pGS|N%0hQc`0D@`28~QoBG-YBP5a_3 zI{KRMPaTR?x^C0?RiQav*2gHyRxeo3TORYW_4vL|GsE#1W^zP@1umZls~Gw$+}-nM zgD?aHo`XsZ*W$D}uCuwfV1w@4El%&LNcUtB_kHu*xAm~Xy?(8kFzrYXl8p0%Ju(=^ z`KQR^3}nNR(utFkvKTVp`HnrK5y4mtOcfB)we8R}2Yu#8pEvzGd}+zA)s-|6=JY`K ze>@tmhs3ATc=`yVYgg_41N_mKnt?1Hx>mAA88^~sH>lGdM2!%~=7}MTRxAt>#u%|f z6D(Dbg{ejI>ym$$s+ew+|JqP!ciR1>?PNuvJ(j#SjF5=&dY*5sx5DWk(UapP*!hON zfRiOy%?EKV873WHKU~B`PnvaCD5TV)P=Qb5X76J^ik$bd0CTh0g=@|MynOGJbKWTK z25~~vVZqC2l$|#@lSF}^&c$;Ps=p2?`{}foY?en=ANZ%(tMhFiQY|(P)b55T*r-3^ zs04xF>r3lw96Rfgya3B*)#C(OkNoAI1HA8QXUC|GDI@KT`MZV(Nk84py%ooh(YX^xLn&j z%bH6zvsL0*OrpA>F?ZUF`glu}O6(bM$nQe1)T>Lpkuq@H6t}F1bQbME0F{vlsO<#_Z#~95pvYacz#nrl*P`1{=g6T{1Wk8D~)%#3-`X za#K42?ChaJXCOXpjHmJ@8yjYSt!f!v1<)`zmsg;XVJ>hqb-KUd>e=|>n-1P8#A3Uw zuAStZqP9ynwopcOLV?8x{yr%3>-hV2pD^1|=r-{i%D7rTq3Jp>?I#Rvy!>wJ zp1(iuN^K?Mw;!+EZ|>f3v^b27CCKS^nk#?R=XvraD;Vbbr)z__p+n6cC49bNV=2&u z9M?TZS?i^ay?F(2<^o?>l@K7#8u_Q}rmKCT< z@-tZ{W=uPI{xeCfDp+Mr{FeoR_lZDR z^4=mRG78Ja$`lfJ$rcjJEU7mF!!}9>z`>q-Lxjx*2$2nur#My`JnxmKD4CaMhS^%o zJu+?tlL}R7*ijv=#+fDxyc6rFhqF!$6pqsHCODI&j+pWte6xE=`wU`mR=v#Ut~{t+ zTE?VClh`jxxu0Ub8@G}ORma&{`L)?7wOZ+`P$0v(QPQ?{>sl4`!F2hc9#S!wV?lQL zsKb$=Hh1(#$SS^3kP2wl#uTk#kC8P8OqY9?OSe*wO&@eD?LHupdsH4Vay~0P-dZG> zYPD$WRAPp>wqSv!GWS=YJm#+}6>~dqzrG_SS(l14IL&0yV|AXbcK@6!?V3`Yr@IF%dW206cuD_Lu!L&g z$3E#bbzXwDmyw6G8oj%hjrA4EH^Yyk#;LO z>kZO+IH`u;{uI>97iF{An=-XMQ_ zF9_AJswNIr80P$L0ou^jSqW_?!J)mdAplN__SqooIV+tizywe!Pw}Y8+t4J{k3}g# zuyInKdT{lN(p=5yKBz$OEW&JEp($|pw1MZdr_r>(kp`k&jnmOyDJ^zRx3WZM*!DE> zZ6&I$ed-=7ojL&KPj@R97|GK@VKawY4K68${@gcs^qd@~CNl<3k5Ot6N%hs~nQy0{16~>aEX2bXhAmj`H^ep|G~E78v=kxz zJu$OnoxW7MDLMDF>3z%=-);4Fa0-Jlk;?Q)t><$v<^6n1kbo~T8)7+aE#5#xD&u7Z zw}$*L=_L%~Vyb8@yOn#prd>Oo)%SgF?J!m*i)2&z8ULJfpX7s};i@`Sd$i6D0|gxy zV!`KSUXh;JtXGr&gv*~c_|OE4mLfZ!k&+S5M;^gr2(#qzJiLflhL*GbuA;n_CC4kL zVVh(Sf`V(z5CLKjZ9_!?)38GJdV=~oJ1C5=Oe3Hy7GMk{D(6!fTE%XhAcjjyEp-<9r)2UZ3(AC=e1gv%i3 zquQ4#kQ|gN+JSi6x+ha73BXLX!VC#(!Cmp#*N=8}@<9TR3JOqMr*0=JSdckTZbo{c z1`4A#O}e$8?M#&I#4Wl`wT-LEP??ILJ~Lo63HRkmakzn1{ks`b2sUj{d8|}m-(k6ggj|-?ushf2<;Kw^;bb(pWf*ctmwD-Nbk?^Sxjeb5YtVO z`NF@Y(WkHZiD+E!a_#WmJhQeUxm`Oigf4-cZuai!%~@#@;e^3Zk^}KLbq}zx_aMRx znb}A-O}eHSTyW2=H7Gj9w*lVBmhOBQ6c`>-5`5T5mSXXsF2+V<$-jr90@NZg6e^{3 z5>2mUPg2dfZ8k3#QI@K*G<@GjkUrn^JWaR#NCwocuG%Wh=GxY4PWgR2zQ5FGIkpdV z(kLUkd(v??AY-`Ng@h|PHMo?WYniujX>;;8HS88~qil))Cbpd(pjWxIJ+(Q|eG9U# zmgs6!=?Y|r!Pk`Ky>ta`k6bf$j1)N*oZw5=%`4=iXsjI+`UXpSU0cjCQdd^W4YZx5 z;Pip7aa-*?geZcbsdk~JgUHWv!^r;IGt2rTdzp6&E(zflvnAjKd&OmPl59+VH&=WJ zvmDcUgel*2k0YMY5J&>-8=|u$kKOFGRVjEseEC;wj&=F&{N)&q-XW_+HFFEYO8x>y zl`84k{tjCKt|N>HLA-?ZgX^71*Ddi7h8 z-NNvfzOM7?$k}Yc@ic+o>&R{}xy(*csMTM4OpmEo3|oCh`@am@T;?mHz7TMjj(R!x z!QgIxhZgks8{bnG4je&BCcCqzKvv}WKwm^F%ZfHpb+}nE%L8HT8^i_<(QPYl;G_RWS)mss?lE#<^O!5r(lZ}*+&VFC@U{`(8}>|%1G_a92V_}2 zt2Cngqe*C<(B-yYc6W%F@`8X7O4*Y}S*81DAoX zBB)xhJ_NNI)1n$6MuTY3h54m5gE9pVQW_B-D@5n26I*dNYbM zaC6MCB?9V}WBVa9r#=GlIv;onTL#8h}c^%4(5mzBiH zWqH!l#@!g*>7|mKn)1Mi(D^{U6zNUo&yNM~G>y0qbMdQ0l*@6Gs@aMmmNi9Ra)hOS zu)sI^`VJyZmt^HJea|jaPs*l8<~KD?IuoCh@{qV4^Zxu*W_+^uRh#sx^YP}`f~9gz z8l&@EAr>+JakY7NtM$~TJg@6>(jg7`behZkWTJu_9mu+o!$ec5$nc2te$b6M)~y9a zHJRvIsQqwkQ{17+pQgU5sxSgVThS}Q7-k+gEEJo@w^?54CCWSE`hjOx?=Qw{)2btYgWBRo?P@KNjUmP$tav$JxEM%HUJB{CmU zKj0;~XTi`=!+}Vm2}{!5R3@xtU}m|FEqZXZ!(j<`(j%usr9Sw0q_^LfjCooPO?gPv z$XyGPYKjLeRigmY2;4osHzs-uT6(-t7#ij@rLWAMnhGwgaEheT-WN_M*ch9gxRsHV zvYyw1`m;a9IoC?6u#`ny?Txn6$N@;{2J)dQq9B)asXEBll7u&Rrz?A#K0-xJU*Box zp31Kpp%x1|&tNQa*i3WLa52ZJjJW-S^Bc+tVJ+4R#KP@Cbl%n_EO>qm(XE)!C=0(6 zC%FK`%mF@bpgcTjBG<@8Y)(^{T7y<>Y5mx>E_=bnp{y@oRKR~Ux_mS(C`GC*@u|p* z>YvY{ZXFt{3B^k47)#2Pu94y3!Xb>8>B8M$L2%rUlYZ{OLMf`}O(--!1L~}I_ z0q;|>DoA1ak)4Q@?%T|wrs-4w52wS zNM)Z=(zGaV#GcH*nxnS4wY#6w+8CxijC1nQl6*7_Hpg(*%k2X zu{F}tPc=xh%8p&MNRb^Gw*m9#r!6C}m2j)ll^qzvbAcZ2Wg#*qWC=jmW_cFsbBs`V zsmQ%~ZM9U@vEB+6TRUx*XGOjQ)s&a)Hg9AYb|=^;C>BdM)Vhv?kMq+t53o7RpB>dD zUm=?{`S2wow^6Pl%GyHdsG$p}3YkQUusBlia|1L0D^o#IhO;a{X$Dxl#bHvoCb3*= z_KjO+P;QIn9%PTq97>(z41@QMcqspYEa13oGgb-Ub{P@ua9J!{as?u!8-i3;{3xG) zKFkSUzqXCHB|w^G`sYLTG}p&PhM3AmiwNg9;Gs`bBUNcc3J6QeqhfFzobyDc7{NuN z+-mgblEbsA<$4S1_>q9$uBYer=eKVdp}e5J2s57+>mCjd=Y`Th(Zf7=JnFW88nhZq zLpHUBc(o+x&by?)6Jc2G#TvO25%s*0pTfuo-<3KW#1Tu7!{i2K#8q3c6>d{?;S8OX zCP@BQ05u=V;Dp~8WKMD_Huww{FoIczO+IvoiRikx+ze=N65aX_08jivkx>?A31g!d z`uM@oC(`%>jc>@&$G;*Sczd4Yz;4seYoc4K!J=9~eisJPVOKo2TpOVGDVK2TSdC(& zfYPR|=2N945XsUhSDM&+ihHXQD927M+m^8w!w(X=k2y(nr)T9C7L}l-kbixQ707ZNd@4l8QsY-W9m!|&7_5KeZ~f7awx&{Zok_(dO>reob|np0NIby_rf!G zAM4`(EDgx6Rt?BhDI|0uH~!Alf!>R3%woDAww7$Vma}$3|2GRkC6Ta@=qpS==nIj& zy4TsXmJrnrXI+c;Y->qwy_T5bNL&veQyH-p^^NNbwjN2BqBG7ysU#{&ZB3%|jLWM} zGIOTlh=a*mb4yv}y#>A8Sa#Jtm`pzeLUi9&6Zu{tTuq!C$4F zI)-Dk77sghOa@wOMQcWZV=&EQMow|mQWiP+4eMOyxufta0DOYiEJ(O!`xnH}f|bAp zk0QlK*XpD``JLVj%>#xxz7*(f;*v13N2_lv^qMvtS4C@nu<<|1j19TGu}XI!bt=-l z6ryHNd2~?Aur_Wi#bQcEMOd&cf#vIh!aGNJjsPSl|Ho2}4 z^%ilg+QmDgUMt*vaqZQ+5cMbT#oOW$N9X^F044R4M>CnO+GF!~e84q?OA>6@KbtK_C0XO!(pR*XY|AEn><(-GvG3&iM=Yj!BO8*S?L(le1%8(zQer-Urh42XNL#tMy~fFBkSp=YX1Rh>so9 zp~-+95@0(d;6w{GQpjEvHbII76$JsXTdi_Rw#X*fb`)VN1gex=8N83PK;Fg%X&(vW zT5OoJV2lA58~25lqhO{S4C!?Z2*mv-k+GF7{a@y<^1Yr{XdRM4tjxQY7ofsQdO49Dz9;!T|&vm4`}a`3a*ado8}~LiEe*Y(P;*tZ4Kgi$p9wV^&$*1oqRC zZ<_Lh7g$DNmgL7rJTid*2rqNjm=e^?u;9%RXO5Q5Y!M`0i!@zUPK#`w$^o)eCSKy{tn= z#OS!~y_oGs`_jQ?k-21?L*KG9T}ckm5}MY{=_z_qaMIOoq26ZZEn{}!GjIy;= ztGd)xm!+}wWYhKZXsc8Hzg`TLp3Ph4Uqpe~YBh$E1lyRcCj{$BYkNKEZLKGxX+6p9 zA}1ylo2^M>g0S_XM#Kc4psz`SHF<%5$G1T>S3pS)scswEG_g8JeTkT0^|CGPr4C*s zmAy)BT0Tb1ld)I9A+@`Bwx^q|{XN{qtyjL3S;#!=V>hON-D%^3Ej!um@lJM!-pQ`= zG{Vukmz@T{ni|?2%(H3gY8QTq5s>GefP0=#=U{rdK?$=?B33qQ$0{{M1aH&W9hO`} zB=ELi5OZp>ZaUFYWt?Rm@2+Y+_T-6L(*M&x{`geePx|jtlwI0>gQC6p%@a?31!o?8 za>+6}$NyfS-}#1bxuRIlRM<&JpGXZ#UO*L_rb8Av=qS8Q))|A{B{@wNa+*F`0S88A z7NIouxN)S0Vv$v+4>5<8xQe?<$&=9&{~`_?gutuRq=(`Jgj_6L)aPbF7KG-jCGfHVYNAqgxZ{@ z?sC5qIWsJ#6=8Qd)>V6hYOqCjc}l6Mq#ZJ-KI|^{CUlp3o%P9HAA9^mYFdgLyG4Bo z75Q+Ec9(mTQ`1;?xi2L-HQrqwD*{Rf>p9}g%I_SY#(;nYK%R8HzdT;_liyy^Ppjr~ zDkBp*%z+=<>@fFJ$=TFqKJQ>#J?7#_DoJDb|4hgKzyJS*|GkL+64a`SM^;M%RsR!& zpegykK#1R|(l{F-go|S`C`OB)DJ{zXIxQ;xV_J0me~T7@piYajF8?F6i0j6`K#vk# zW1f{Fi{h?BoF+d3P0G1*Naq$b>DG)Uy<#*`N&{*UxD#DyK_OB4PzqH5@+(?X$m&$e z;Z(%WoC<#+QqGPlVx;TQL^?Y1I8w}#8-4$m$k?fzkh` zEi3-NLdZ6=D{YR`XhGAfTR1@7+8D3Vg1F+c1Q)0=RiCsW>hxu`paCvKGTYd7C1`te z38|gIPAUtwy$Q?DvI#p|!HHxBRG^J5Ss(JGn~Ec4>=R{yNQlL>D*l&9Tn4^ty(uf- z$voG#3H+b#O=kN2g{>)4gYc{y^ATzw>NJyzD9tLlbHU#FOOg@B5j2l&}he(iwdiAhpsb z80nGT8K|vv4XHg0Y$Dsal)eEVPoOn@VJlX!9;XZsoiET6hns-0+#mi{(Avn(K`i6W zkX3g__}kM}pPXID>N(Pb*ioFDhtVF@)T4PZcgWuy$*e!7#XOY4W~JS-KUXS<4M-XM z#d}0ryl~~F>Ad0drx&UndL5ko(&tfo{l059^$GM;Z(jX+&n};Ce(b5*W%Fwe=4*`` zxACv={H%?4uii-t>h=!5dHv1RNz&@QQ?HpY^N$ZEW*+>ZZ`Ug;hs>zCa`B_Ljhi*4 zH_2K1Z$tL_XI`QH&p2%WozsIk!4)lXf~0v)(Da|>1m!d21kaoa#Bo0D^bPPWGl4kR z=X)`6u#e%-hd=#^T$Pvuq*H$0emtS0?4F|}5S`a70VuE;IpCAvjGTO|u{`sXoPxHd zta`xhsoSeM>c1{kp`p+~hr;wWa?M}?$59xM3gXy#XFTg|%=rh4|idxR?!bF~k z$6aD@{Y>HNaCW#lwgp$GW^m>I*R+Zmqi2FGs9T}!L-(a_eu_G5&r{g)xFYWM1nB%| zlN5y19I#39gKY!bAraW-AzB^u`XXa0m>v1-e10HO#LNzi735fOB3cUU%lteVHOREl z9s3_)gqZ+mLWr5_{`MKz+LSkafj(dJ)#f=)a;6+-JZajUB~1-4!ge^-8z=gJ%)1}1 zrx{-Q53U!_ncmNGrs4t0il_R~E8-g8hHFL*pES>zlC$MZg~Xg`X)I?d{*$r7sbu}~ zoN)S4xJm38qgL#c5Mz-l>~6%iGgFYx1N!f?rdQIeDYl^f`8}O|->=NqZFBbZtQ1!#9vIsglPzgpyn%_SIMlnQwN6Xx0vx!1y&Ry!y!aU1RA*%4&o-A}J z&9|OuypXUbo@-_6{q+C(dTF+x?e&rlG|zgb1@qftIaapbpIYWvo2~cEIaXM2&R@W9 zOwzQy=BxGm>KPaPc0z8It@~dsbE^sK{@=;1o&n-EwnN*%r?t$ho)HV*w92V6TpWq# zR3{~1BNHXGKZMWfQF4L_ME#%1s-B79Ob53Oa#xyBJtLfd>Izr@ZS$^JDz4XcPZv&q zkP~gWNun)iW*d8ogLn+o5cW5B7qO(O=`onJQVNyL`NG%HSJ$px%P*fZM;rFwgF@DW z4>Eq5Bc_N4^nB|zv3zTcpK^tE@~thCt)}M5RU2X-8I!#}uq`@ud{ z5<_ol%0q9Q|IgbI#a_>YN6-7|-kJL=1-I7+IMxQpGGe?YQbnJeg)^AI&T(@oIPbol zDNL#DOkqlk3?yZRNA&28+A2x7g{Y; z_>Rfe*g8Lfb!MDlotf>dGplKxDSsKCF)7<(t@r5Ge`~Dz5@ae{FFu3B2a;r2%N2W3 zl%$4K54KqZn*hj&0Z{J|uy)ZGxBQx*sZUQb!&@ort&{+EUZ53-slyZ;IwVW%aWQ`Z zL?%~SKsb$>^R40QL!}7MW<3GVc)7H8rah$xPz;`R+7nIgwmb64PFp5h&wUb=gq;0D z+E~%7&-5LdFo!T<1R;HDaeNA)%DaNWmNN+HGrU_(RCc$VsN7=u;6t@tAD!LC@Im8~ z(HpP4y8Y>c$R7-c3eDY=2E)MVg@v|w9E&BB3l2^oCYe348>dVI)K8hvi8 zh&2&7PSa3zR`kazUfZR>X%0D|4H6g(u{{6O)883$=(8XTxaF*`k3Ip6JrYuA2ph4) z#v;UOZNR;?+P&!BT2T!Y#-9zTKN1oj4`vC59a*%U4NKRZK{&9Pi>Wt)eFdsl1P~pv z;=+MNF53D5IzZMNg6ZRsl6_5uy?*A<>W@WYYG5kq_~W1rmkDFyrM{cS-FCr`+IzKM z9_)8hkDkl=ANo7rhm;W$8JuL+>whg)auG2f*M2ja#D5u0+8A( zH5`4cI#!+*N;t)s&1tfP@WGf$WiS$SV{oz{hp@sZVJS^(fE1o9{QGBF5O5{&orF`d|c;If6K5szJt0o}O% z;~qVZEzzC+P2I}D>0k44kM1Acu3H}m4ubU|T*3469$C90!c^oPOBUUk5Cd4Zc=5WH z*2tA?4z$2(M5NQy&vxPz(Znhd^xuK=EjD}*D=mvRAB!Y0=1*czrl&o}^r46mC))?E zGd~z1#_&z`B-&|K;oVeFA`aATCBi)@9_L>%9E0^0)8~mBLijjuS!BTl@XHMz&IEV0 z#K!08IM?;YgG^C~$D4AIgsTyMTMXUw&tD}5i45{S;rZ%jG5$;|gZ_MV1ADX+ z{kA(Kc(0l<@EkF?n{?A&+RfH^hU=W+Iqg@SGj|4jv20(x_L_d3(#Hxjyc&P_*ZK~rt{&2N==kwe_T%%QuHhcm{iy+Z|B7e| z(as0eU=9Z356z8H5+9Wti{*J@u{_o4i9aDPCZ^zL|0*J{;!dx6nEuCjZYsBhG}69& zKSvZ}OcYt!vs6qm#t<1Z&{;rJTa5sM>r}&aertXmg%9eNYPt@=>*($O)opb9(=`nP zd-qka1B$Wcr3eAe0V@DIO%yBGFW6-jjBupLVxx91j`K^0TZ#09FGRb0yuo^Vkp<`b zbq*+^ z=NZq=VWVB`^a18Joa6fRug4CZqKRCqvp&gcbAzqntamu=?&2`KOi1LyIZWh&@r_Ab zYm&p6Fsn6(P6J|(9mvB+4{$9WJwX2@#@2h0VUIrgC~#S}&U*hwkpW@Fe zk5|nsH+Q|h$0yp4ytigYO~J1u&*_-#bbtF|5xjK2a6*!RzyPf?c~|Sq zr)nd}_UJcc3DIsO>xed+@(~7My3yxNb6sS=jK`;50yxWHvpqq~xur4-5k8)H039Ww{^Pg0U8Fj6VP#udxw(X}&gkrGEWsw7q zp1XRWr`PNY<}ZpyYx(~8h>6uZ|CW_`O=`xn`K#$8CK@~Fxr=@P)AKq^*R_Pn%yy4z zHne;yMt!QqjGN@Y2M`BCaP2=R13_v1und00+%}$<(b$1L9DvF~fJf>};R$dd?s*20 zT2dBbFkz|-z*4e;%^|CoQ#*n!OTi1fQ;I2vmj6}YaOPcFP%VLvbqIz_O7XptV)$5T z2_BZ>T%r;uKA1=i0S+o~3|DGM(gL)(fE8!IA$AY|t8a zNE85Jn+$mX004N}V_;-p;7Izr7DzJu@BKfAU4a28f&yLv0E%P=R{(h0ZIfF_ zR8bg(*WUX-=hRD3K1fihLk0W)$V)w9ce3g%2O=-}~_I%Ub*BX?T|aP(S8*jV_&oM){2_ zsX?tZg=(3GPZc1j&!bhhVudNkYH7k=)q(?h4>Hsj445>mk^5+7&nT%Om1GwgA~#4b zDI-VJ2*Txu$jU}dl`08H<7jEsJ;WQ8pjBAsa21*nD^so7au+7K$i6ar{p=()F`^; zJpv|zKD!(z&1E#3el$oSLaz6=A190-eq*CecHof7!a3fjR%ar}z5C279cnshyRgu6vlF4 z7V*Tg7{g;3#`T!*i(rrQjksAbhj`RP^qHE#P3k*xPXu@}77Sf1b zSDiWpoH7)-XELYJV?y-xA>dT2jhMrjTbMVoaIyHX6tMKMtYbOGD#Yr*x`fSyt$|&Py^VbzhXcnJ zjys$@oGP4doN=7TxcaybaqDnT;=aVg$K%4YgXaS;A8!!v1m10YY5@M5ILOMDB^|iLMd-A|@vmC$>Or zm$;UAk@x`#1&J1kJ(6~keUjHC|4B`gIw8#>Z6w_xy+rz#j1LeN$UKrwk$ocLayf>O$%P z>OJabG#oS*Xo_hzXbEVAY0c4kqV1rar9DOa2rzJxbROv1=+4mnq1UAMMPE#Rmi{9H zErSw+3x-^VA%@3{+Khcn#7x;tADCS;@3IiEsIquw8De?Iip5ILD$Z(-wTSgB>rXab zHp^^vZ0FdXvkS0WV)w&7zBL6^^NNfw00000 z00IC300ICO000310Z{+|004N}U6IR5!$1^<|7ojIjiMr2L?tU%HNB_^3ZjULP`aqa zZn`jO(+jqZq*KLJUqO7Tx)s+xgbUaHnM|)4=FE4lb8-g2me__s@J-5yIzx^s3OREnqu54D-wttoBV6Lj9j6?^to zT}f%Hj>0>GE+y?jmpS?>v~;MuywyaHTvahaFAeplmz;#nxKNwUNzyX==fO`L}IXSi5=w+s7IkuNr%i-*XubWXd ziuCoUjfapyW0RuB#48RHw@6X0>`RGo% z)9HxfZ+_QO#sBgNBub)*#zG8MY{U|Wop=&RB#C5FNF|MQGRP#0Y;wpYk9-O!q=;hL z(Vh-;q!XR#LRY%cogVb07rp62Uo>&aK$t-3whq=sSK9yqO2raa7oqc>@6T8^XevWZe zV%WwzHnLZ&Vq>SovV~i`mpBe^oS*#Q7l%2?eeQCK1uSHDisl z+~B61HCLRJ$&G3dDhlElPNsHBSG23RZ@aBu-QL98q`oi_ z#*|>m^2toTWn>$l)|=oU>=(gBI5&caaIXX(;hhKp!mkJ+BKTL05uyBnLqyY3Oe5^* tM72XE$aJF_Ba^viHmr^IJ<~}(BpEMs(n792Ni-m-CA4Qw(WLn+qP}D+jeViZFg%MPi@<_`}fWFCttp~bIqA^e0U&_y;!FuZ{m-tB{Ez=%`~MbkaTPfL0HpMr{rMmCr?ihHM8(9vxq)w= z>KnA6w;;0;zZ8_exoZFbG7$hk>`14svXD?#6$SvHL%ub#-!LM)Ca+RZW?=s2roVla zZx}S?zwH~_8ae;~Fr?q{5#QL&off7xc5@*F0AMS>HS+%fg&wZb%)#9Dn|uG()BymX zrpk>BCFX|C-?8wu-x}!uk00hXo@U=%3jms9Qp8P&Aq!gOhF<(;2!uy=L=032+-xy$dI2T;Luza5-R zzhj+rzT^77d4A74fiHJcqwg_0|CbB$KRj70d4F4wZvZCv@~8f@1J}s(|JljEC8r*% z#s($^2HRjp=#W1Qj10^SOgvx*rUHIP?r`GT(_`{j8&JN@uC`LFD8jRSF3*hj6!pb>3Dsht?*P>U$#;UV<4 zWquoFJ`wf8uOe1iOVvKFV=B{O%E~>Ps-ZPIxyCCG>VE{T)gON=PHtF}x_*nVPIcId zMz$0shjgCnKG=V12VZNOX|wfZxPZ`xWA-UJ!2gVHrm4*`z?KVoGJXP72Fz`{aDvhI z4c}cuJQM4{>j%kP*P(TR>W8Y<;?M_=@4#w+sWT00JFxlSE;6g*>5R8S?ry+0YyG|x zt#E$A9I@^&GSMiL$1%ZW9dNTKeZeu)R)IM)4B@D`GsEmWJJyxB;zmzgV)p5%p#rkC z4ZrRlpmf?eH_hf4Cr2wD=#XJI>8e*$oJGzl%POGqF-@nt_qL$CWpPlk+U;MEtL4wE z$kr`gzfz+ zT#)c}&6l4k;}!vPDV{WvKr}2tnsbo9kkl=&8(Z94xU}aLq^w$|wcyCjp5Mb^LO^|rm$ljlRgmdY8Mj|J)tvsA-f7)cWoquSUGK;} zFm86J<=-Q4!M|oJW0Opm|ABp$)riD3j`RhkAkYKO3$<&zYCb96^7p_~ z`x2P6XuYI@VUOZ+2#+^;d4zTeiW*j9RaW_O^`=|r;ig+f&CF6P_?+Y??bkc;g^8h%0Fe5P*tn72EJuX zK>FfWjFnptIeGrr;rQ|LIl^COKFZP#C!=*}JvO<4V>>>{HJwpuceGIx?p3Z~Zf@Ou zF=eiOz|&~#Z+)cOz6J7j@cZa~O_}n{UH|HmCvn6V^*uQ2#qx~{sk^PgsfVpL_LBcg zkf|Bd{f*mIhlXePRWltSz;{_O)CK)kU)=(8#g={JkcrrCFzcLWc|4{VJkFn_s*pSTC}C@1%S$aftzD zi9wz6wL^(Pq;hShGGmO>!M+a>r9-(`mQ{9^6>=;H1P&CeD?|hr#GZU_j(l&6GUH!u z!6#n40{NQK9B{=Ftpw$&fiev725Md?FR?7W>_%z(J%L|zm-kU;cs~}jer-3>%s~1A zX@&TXx$x$y=_kt5-K>h!yEB!QJ*~pB;tW;Jqid2@tg~k5fT-W z7Lpf|5|R_r@0BLfGmOoj1q<7v{rU<9?m(Ps_h5mm0`my-2>%j-soKJ*1EE!b&tGR> z!(T`r49{10PhVT=NO1z3_Xc0VzDRBY%Qpu7!3Rhg1Zi6Wh1Y)`sOCdO!Nl1iTQJ1Q z3424T0mrTwYV2-^^7MB2di;JMLPUnhicF3$QdE}Mnp~a`Vq~W1s%)+Za&)$Mx_rK1 zA|%GB$}G-sGBnn>+C1J+5)|f`>KyJ!3JmrH`T}18J$c)|r)#exMoN#eY@Z`9ikKk~IdT^dtLO94By>uL$#q%<)^hH1l$ zAj9sRYR{aku++}2WGBJuoUO0O*qY=8T%J#xw3}wVHHoL^JMNE*9qqpQxif0JQ5>a0RV7rh#G(ypcG&PFa?+cEC7}OD}Xh? z6;KG!0XPHP0XE;Y5x^G^2nYv+01N=a08M}+z#Cu(@CSGTVgaH6K7b-X8K4G%{NK9M zRcx2n7W%P)fCLseuuRCciq??+d&v7Q1kAuftdR*%=mE4Phf_kdBM54R+q& zLdi@>B1}miVSXYM7VC(7Rvw-P=gD*ynX!a&_Y>F`>)ic3H_QnU z_+h4_Oz;&pux%XaHE3bSh>@`{t$3GSpjhd%nf1tOytXM}rX!dI`DK(h+d#Pe#eZhgbCK`=Ei5Nf$!%@ZCU5s4gawwZK{ z76JLusA2#bIq4@X*TMSeXQAMWB+5{Slr+y?SQF$>!ctEjyF$YkZ>F5BJzn3l=FeXD zZfqIu=aH~gEMy}dBdP!xQDJe_moS{EPFl!)rMD@;KLH0f@&4J+jhD-!ys1sDh)inG zUVdxqx8qUmYORye!tVCvR;9C4z&E|gu3@koe7JrgN7S68wO{(6eJ?mDgi4!j@$kgqQQlE>f%>>jqDZMXrUbWXkkJib|Pcr zw70UN;E6CLC@74s&>!-MC}_1>W9`%@vmoL&9I?T!YKXdO>lHSm_?BqK_%E)f&U9Jf zA0KYrAO2^Dz)X(*4(}QY1dhEJJk; zW0F7hFs_|sE2#KaSj4Zr1}>X1UoJjDY`SzL?NZF)j=pZM0iSG;eklH_KJ*dy?&{1; zEo$^m$dT7TV%#_w+KvU$h*G%7{n`Pq3^DSrTHW34kfum2-lHlYF+pOZS_dgiq0X#E zCK(?UW<>+yyLWwH!-af6r%Gb|FrZTun0!h{2CK7D$}Hixu`;@=}b z?a;Df;u$a@G6(u3In7zt6=r+OHdkoVlZ6)CxZQq0nf7x1CVvha;lL|p!z1xf{o%^4 zI5!~kIO*YaQ3_^nU3%(C8U+2J%?H=OkR9<@{=ow^c%(GxF@QtU2jj zSJ}Pu;N546V{)KM**;mI)%283J>i}2mSTTVYX7EeUq1a)E*VN8@oc2(8EPr;Uhw3l zg!@hns8*ci86KIuI}%NTQez#Ld>eM_kvp5$r9%rdY{Q9ZE;=^LF>Isx90^3sr^B(5#5Unsjx1O4o)sC<|rdWwxyqeERP0*z^NMA3+2EtA`8rrX6`0>ibGKU z7(GdIA2x3H(UiCZ=H~dHJ$AfKydtRcpF04(u4mTj>jnG>-Mub#<@PU!?bn-3Zi>4; z5P6@s;SN*v>o^3Oy}zo%if~^Mea=4xc&%UNawqITB)&XOOMesbPNiYgTy-X_ zxu@TR*UCT2YoV+?GveYtdLq>Q&UQg7pmFo%B865eqsj8$r9j- zG;&Lbo9T54Xeg~Tl}nhmen8ke=)Z*h37WAb*z z`kIjX$U8JLx>W?#jd%SLcIza%>`r@~-U?k)43|E;T9-hnaW#ELr`f2cqg)*ptA2dT zh0@rk$W%GKe}_Se%Bed1_@+U|;yK)7Qz2}3EJVc=bE%Op2AxKpEf!)KhiM>~RXkAR zb2OxFNHX}|4fD8MKgsFLK%m?h>Ukb-9w*MDBKm?y+>%d4shSfu^R#J2&k(@pigm&C zVf&8c4P>635Xkx@mo_-Gyu&Ir6>Phm8s@mIXYo;@;_8796Ih+D;~~*{Sx@^iOYJpn zCs`8WWB!L8)pt=Rbj5wspL%=R28o^zrlhD(z`x^ zaQvIhw;fN9)t%nQfLoXnbkHPwGkyG8(+Sn=Vt1aBOX_mmxa*G*wer+(8ZAwp;t;f!s)!$N3fYw2fwQK9old2C5Wi}O)ul+qEh^OFLq6v2rUG%X^!O=S`Eh zmfS~_;r-E9jIukITaTs)(cjM=*?}<9BT4%!V46fadce7~4nD8$mpJ(Jt(abK8CxQu zC!;Vk6p%9XEEddIp<4+l}k>({zh<^Tv-aFIgB^d^+H z+Vmjn@@{1ySaxOeAOs&0wN|)9huy%LP8Pb0rTAPK79AZK8jbmIfw zUm~|llEE`J?u%2KAcDT94GHGgPipu?Byr}tHnb7Xc~)S9qk3f?vNHRPxALxmO! zQ9;XKgS?)9Yw_GoGv(n53^%#pMNquv*g%YjNV|9wGMUVdwLE(F$Wp z4YLiquy-U+7KXFkhKk-9jzZ(`;%WdbAk**72%|;}0?s5==2yrXFfTL;!$?)@*?&~h ztEjZ5czmqtsV&y|W-l~&Ts#J2r`Bo?I2;EK*KfqHQ$Ou3`~HixnT$&%x9i;N0cHr` zd3WF3*0_)Jv5t5r1wvtAnL{*0M<(xZLSUKG$IXH5-S6*m@@W!Qr|*bYl+j{>kkpHp z{66NPHSH^D|nxw5G1 z&2C|IMd0bA*HL+Yqsq|v29lgixowR)k`h?Qw3e2J-zrt&fa;)1Vm#X*h`kx}j3cUV zEl6pL6#3epi1;T$QqCqc1wlaL9 z6}*y|_*0y@tg?4ijHU=Nvh=uP=eKbjo z<=||zuN8DDHXGv1AGgvUeS#)_rGLOF@l4@m-qI#c+sUTwR=kF37_#9GCv%4HM^*S9 zKaDX}s)mNIKz6zY{NaF6l@CnHgwhL2cEz=?`ruOCs;G97MP1}%F<58LJ3VGK&K`X< z>N6m&%BLrSUc)bQu86*SV3(HbH+VEaxc9e(ws_A061U=1feL3M4hF^-yPew*x^;~= z@6uB3P5s&!MOUAlJL#szmap!{W;mFbz{kPm(WvxJN;bo}|8ZIL&{|8D4C-+Eyu&)x z;Rt3)n))xX&8L+~t7UY0P z9yiuNo=xliKuNNpk5&7(Uio8O|)*Zw@{IbwAU=h?`sY@GokA1!{lKs|x@1hO5|xxnH@ypzi{h&jSOM21eZb zezqa2Sdl`u28fYu{l~d77@=658QtDeM3A*Df2HR0KVz&LkF(Eg96Kig`vJx@Tt?SK z%z=e7B$B-a!l2?3t%7x#+1O}_#?eib!}%`nT&l6Qik;cl80MGbySJ@2Hv6^53jT@F zm9B^K?u~tI=fPb}W?%J6ty|Dmr#sX^Q|~i~;ry6n5jJ0MSj1Q= zX~m4jFp-P?T8$v>!Ik4gw~+2vE-OXJy7iDdvqNCuTVO_)8b*fDB|aAZu?!WUo)*M# zDGqLMUKS7X<8rqhY;&3N_~$h45McOb013B8e; zlY)bN0G)AG6Zm!T;sx1iG%b#usM&7Vi?Swt^iSL@F}3QH?x{tSae*o|k1Ou_@TAso zs@Mj!Kh}AUT)opVDq>M(*C{<)uK4n2QZWwBS{y4Z?)0s;mpX&Pr@geh_&bO@A3Kk1 zszOR&pBC}(ZWkR?ku@yKm{ooG;}QKOqeP3Uw_!x-4p&b?Y3_<$Q>OoDHfz&x6Y2y1 zaT%UMOW~7jqti#KN2Fn*G$6@v_@AEqDe{cW0i9E>n}ljr8(Gn2e!?r zx5GzW_h)CjwZ-nfmF+V&?}d8!t@$Drg8Rve3GLirh3>1eCH#&qOVft|D)mWz|C_k5 z*t6#O+?J%Wl6@|=YxQWDrmb8U|fd(HwZ^cik?6Th#@Z<|+%s9!fDrHe&&K7|-w4XfH|F_@iw{Lt4s7T1Zy?@#dZlKR#zbG*`K{;7Wmg8-=D}Se5 z5&WdY9Kk(!a4L!Fxt^We-#>dBrkjW1)!v?by>b4%veaAs0N!2-_Tn9vQf@S5B_|an>MjiRKba?L`#z&+AwM&r)^oM?D-|D0|m3do za>UoW{|4_i&@FymkAQGG9=*}b3R>_(W>y+do-@MK^$}T3OAvk5F73Jf{poQGqcKjr zA9-57Yv9 z3c$DJ|7i`2L?^bb4`g2nEybz7yFY;a?VQ)i02fIXUIhQ+10Gix6LxWYjfWLB=Z;V= zV_MnK8?9F=N3qrlL>X)EBNPi~kw^?{6y%ktn&nmI&kr30BM#EYWNZeez#M3D~Z8l{C%vzC2|9!g`6;pEwm%f5_y(|9s3p)R&#rD*AV{<>a z&#Ubw=PMd`*sh7Hqp9Bl?{80g;r3}BzI!0R;Vr(l*WR6|O>sZZT%%^$L-}Ow2`Rf# zQD&1+(IvwfWbw@(u7;yaiyvZ-BZcRNkT@nvkDSwQ3n|{{7ssWI5s&2fo&In}d>$AV=&Az8%sQx8c|-O?%!u- zvQB&wvcE>-X@mZsQa_&Ty&AW+DGr+VKUkzr1ak0o+KnIGwjr|KpcvgrSR}Gr`MI3& z_eJw&>0l(ml_}9ZrN!9{HhHs~@ehGCMl!M+`bhW(^LTp`R~1Xt&Y_p_WbQD z$r^1zwr(&+y1NB!1Dz~XHM`HBhGp}`rK(#sfiOYNv|96J$+KxrMpy<9>bfjN&JKXMG&F?l5eoDT@0Yo7r@ z(OYMrkzbls>g^mUB{m8r&*LyViisG^_uIuviq{~^1k~iHHA*4CN!}2(O~g(?&(L}r zdh-AZb}m?4WpbAn9y}x};hoUP$Wz4$&46+aOYTLoJj+$}Kz6)nA3jM-M``|3Va$)Bd`Sya|$wZCeB{$yJXNA*^mnqR_)_&zYlbM+f z^5eA*bJX8g!48|lWg|3m3L#g698^Z%45@HRl3?&VvKHt2#buB8$4>Lcgbulx z$mSgA752@r-1Q#5iqa?n6n5S(V8!P%2_1RpT+I9iE0KXQX)yLpoe+f0b~{+Za)5P= z7Pwt?X=0WPlQ`)hMlg=-VCO#uKd!6TFpjYU)SKt|PZRAa`n|Ro4;zXr-Wwnf0g#}s zTsm?C76Ypxy_L00f8vx{rV?O1kzNYz44pvthZoJ_U*d4aY}5GhZMxyhM6IkNDn3^v zOW9!x3)Npheh8?;%Jx(RR1?;ASW-`-E7Wpy%^nKs$Oi7gI1WeuD$u;y8bucs!sdIX zq;AQY@rP&_!ICT_7Q$UX*rKdWY=Nbv6_EH5Bdmscq1L-SM+(OH!c98C?_l0Ml+F5& zK$WY46_}?w7K!AyfhO*T&go22glhq0yg&GEdEtD6yNL}Ov1O+KFk{pzIjEevPgZS3 zJhNYTDddrRad3ctO8V-12%v2WI4$oEaOCT;WocmV^6O^JL5@MjTGt@_qPR`*bGWo> z+RP^w01BA`jx^;|B`ryFD8yYktYR|w9Hj-Yc?YjRVh{qDzI^Z)i$=Rot$sg$7U=oa z*o+X@35AS7>pfu_juB(FV`a*oB)vjR5q7pXi^>jL7*JorS> zg1+f;Y;acOpwha?RdU2E_h2Q5JUNj3+=#}odiTrI{rO?U*g4Sn?JOI= zS}M~p`F~|P+5qr4pDkMD5|Jxw0?jbt#nt8Q82Q?#A7OR#eUM`DSd@504~T&=IjzsmZW z*62h&(}=z%Nz^>={bt%4vma4N8d^X?;IV(dmc!H&iMU$38b;h?e!+RUE0BVJ#92U1 zm+dU)0w$n=TeE7GW-yjnRI19g%%eZLHuhaZFg3uiijg0~!QuBFXVRvZJxnw<2CGI4 zePk*3?kz~ce@_0z7C|VwZN3_9SGOUXv-joDR-k_6rM$uzFx^vA4b%T(bVTyLj*R`A z2-7)Kai-h*1-HnPbTYu(GoaJ5`7-)I-~UhzABi1aWJBY%rc~Fq`FGxHE1_#svvxB( zgHhTwLZ03LjCIi(S8Nv_W%`&#CQ|ZTYYDB> zZ`G-l@nUxnBnfN#ZwFMn^!Rnfy9EEZt?+TK=_Kn{<|OpnB0s3lb*YchzC?e#7w;Rr zzs62z6)cSoi3PH$r*d57+rxRYQq`i=U#=^Ldt2!JQg|ge1n-hH*cmwLPUZ z@UfB5$6f2JDQO_Rz)w{7WLQ)UpQV?lL6N6Vk*NY*(z;Epet;2FVe@Y$?HBsUoQj94P`PSZMB zn4zn?zT-eq#ku?OMf5AyM~L@*QRq?H%FFAe@nze2p0fVp-9j@35@8tV%UdvmO-tH( zb>tro6{Iz0J`D%Eh1vv?5+t3U<*n+(GQ)bvm*jt-N}e)XT&+B3j_F0jV>7rKyNKVn z`<*3hBY)9elP%-m#`?k=8{vd4sudc&qSPSJ)F}ZCr6eo<$#~(-M^Tc8%A~B6%?Jq4 zUZ>J;9wds~#v=>_{e98LM-S`ev*&YFiRk0&oY0k>NVtq2)My|Jf}|%Q^+Jr6#k!() zGOTQn5$Q8HA%%mdXs}HH~oVK9qXF}xg&%Hc7urzTSNDUues{slpmuDR{f z8NBN3pe%;uN(%VV#@3P-da`{Tc!Zn7k#x$0bWW@@bN4VMu`iO_q2rI}8UD6yqS&1Q z(WI=m7GH9^q^{$ks{rBN323A*^xNc;eq40+3J7n6kbGfG@7JSPi)NvwtPY_S3zYevh(=^v@d6184WdQ2jq*)WUHh$de0i6lhONOAfo+n2x2o4h%;I;SjxlAyV7n@p~ zl(+r)IjdMRJtIPc+*D4^PD$$f_*R2dQkhdNrMaL|D`e!tEn3B`VUJM74&YFfVA&n3 zz}baL1Sx6>xC@V^SmDP-!JMqhrHG&q*af99C+#w8K=%WCiWJDb)vhlV&Ai{^iW5iu zy@VxIL7Heu^*evoI)VV~*{@2j|3W^-#nJbif}(*!2ld7NY$%%%sNyF;*gbMNuLII% zObY~u{s{#C5oZQEpNL7PpQD1?v||j*Lv@4JTRxy%Ioyi11F`Tt?Nbw`M$-EMMhfcB zCl!04`EqA&dmFv_;X17AI`*OP{aBqX@-h7vzNlML>9a%&YOLu&AQSP!Mm7Z|IPO>J zlr(K>J}snSx^i?J1IoajJ&1rVoHMCQoU&tFnsCrd*$XMX@KNPql0jES+|jsNpH%Q_5j%ZsrZc&1{GcC0Zz-685qAT2hg=}&ZFJLymJP<$S z0+~0(_Qh`QvAQWYJL6dd_AU4u79R3DmTh03ZIn|OafS)2s>Gs*4 zOV~YKxO-eXs;$5G_p44fhiMNwa23(3+2XAomzL8TjDZOEc6s#}M^5xV62T36 z38wMVX9~2a_Hi-#l98rH)qgtA$j(mZs#JG4FWL{%+XD+-NuTMfHaQrW`q#&OAL{xn zodm*OkoF?JP!-&l;NIun1O-|@8rTq?r(WaEAnqCPg9zbyD zzN>5g6FTxQl1mO?6K|XWw2gMQi_zD#b&Otsr*IeyR^J`;i0Spz%4p-HcACNRK~61- z$LDox7);7m&Qjy4G~do>2a2^xdK%pCX?dFk^tB3-<3OgKzB%cciZue(;L|Q)t+K70 zw53dX${)x!aMa-VF0*g0m#KI6+wpx}hAwBvgmUQMIL_g2;7R`5SB>|f<7Ta>I$6b z?oAxSAKY+xi@e?qX_$%ckBcf{U(3>}Pl0R{wUGE{jxuXDVWR6&_0zIpU@X{)1b8Fw z?AQpS$Ld*SR^(E7Xw_oxTlCqAbFMk;lC=I^)~CPnP0itUK*sHSnbfU`Lub4X`S&V~ zH*O=>kmSxKG;~*(P_lT;q#Vn(r#eg5t_Su^MZACnP4A}%8M;vR>CFus*ZV~QXQ^Nc zZE3`Z=0#?TtfCh3Dz|^XB2B0Tb7&DGC;KU%^3`E5ak7BNfW0VbY ziMVOCrBt!ju7XU@=B@m{3b`|2zHiKfDY%r7MmCOpgcXOYyQ?~Q4i@7#ql5-|;@0{LcUGSY0yK1C9$25@ii15K$ccZfrq3yzPxJ4K#0 z|L_mG`Nmm|6thltR~oqS4UF%@KT!oe1#YCao!xdX^hbf(_d2cux4LtHkn*PUM`EgE z{vni7o7wD;LnA!}jwZ94r|tbZf4{w!hYV0i&dze@&nK$B7*9Li#5%7W#kGQ+B3lk- zOLU6uX{Pbd$uxIy;=KM0kb}SBe}tIAIe=uI#ioW)1tH`H0y7|op|`N&d&zXtdfMwJ zgBRrZyiPHsoQy1x1|vK7g`7gS#9F3Yjl|p)8FY}=ny8A@FUp|?8t=^?FMN}HFM-3= zqbEsc?&^o}x_hHvi!T><>j@|AUmo2;{&nK*UcHwm6fy>K?W6rKHX3t2#c+4-O^+PV zw@Hy=G@1!%xaL}AU~kxo9+hu!#v8e40hZz_y(eDhxTYCymFG$Cgq*s94v390zla;t z2{~8EK`tu9?#l+n)1{#o4c+@-bg24iZk)bf%#AYUd)yVR#_|oggd7?PMfm+SJyTgb z?FORLHEkJ6sE<4#R@4srtRYP+8h1(e3$*f~m11{S(Rs94K%(GX;oNmu&QHznSiH9Q zF8(@hocjRXp-$}()a3n3l6+2IK-_H|&|J2dE*_8JpH{^(bEp&S)6&%)44%f2kIpli z9G5-q9LRsp4R5&bUUd86?_c6@Jnst8oR%aQ?_qx3GY`6a1T67`>ZS~KR2@dpF49gQ zEB2oXksTIHBty*pSr~%*N3F-m`A>%4vq4Q@Yd}zM-&{B%mv-;Ut6RllZEyZg*AEE&zX5SJOUI9ljgbl=e%+Bef$Dtbi z^}O3Sgb_Ae6HGG26#PmMm7 zL&=W>Z`f0~=SvE`9~k`CfLQNwfzf^`&><7ktO+d#KN#S)(R9R;v5gez^Ibrym^L&= z%?c3cGMqs}x{4%&h?TTyxTHuYMN?`)FWR^x;MAA4Zd=f$ve+n>54QKZ-C+pG#aBi@ z`7_9*AtdACkQx1+w1wgrqmy%^`k@|LSJBXSF7^r8&Ffig(BebY{W0*qECOkZPlo`O zbB@I8iE?Oc#$I^T03HOrIRKqDHk(72{Gc`4o~V&uFK3-0 zb4&?bAiz56LAkw!RQs1mq`mLHG%;h6l_i@qH|+LE)%NOi6zeR)5U^ohWg3UrS0~1+%ggF7=7+XMyqHiwONrH zRZ2hf*aGr$8KW^XDN^+z)r_Bp`%IaQMq`tmhHQNi#AltxBK6GJC--x6KpxV33ch!q zD-Yy5tOouwWlm-)*e${^E0S4me|FLtgUq5^akp|*4Gdj7`rU;QBlY25WBRsprp|b^ z7EX{@lN~1rp>5-=P4P~g-{@6FsjFjr%T}yP_c{+AEqW4Wo^OMgle+eoU+3l7!P40k zsg;hVuAg4|lFkQ-Z1U64;|v?hO-WaNUt-FsbT!>JCwZYH#iZ3{>JwP3Z6=~NB8o=X z?MACTGo}UXbh4a@ulFJv@v@bsC9m@~qlH{g`&N>6y42l?FeZ~W4Tur>s&PZH1!0Yq zGEvXz>`Y+CS+3_g(&jiCDnT)_6DGjC9h`$E1Y{wewu1b0aSl?B0t{A)(a?lDw7+N& zo+fH)UmwwS*c#$L&u|vG>bYyOFt56TukbUqn)ANih4xVHR&ePnAr3r%`5~Atd!#)U z0)6XDUytIX7Wo774nKq6EqYgNrVb&u~CFYmA18Jx>Np;aTDZ;0$#*6Y!$N3ikA`C+6 z0CkzzCK5V(4sB?ZuZpR81QuwrTZ)>WmALeheRwH0yb3$NM#Hq{MW_IK;nstI>5DH2Lf*mai%J(kL8sfoJ5`-2Bx_2x#}v zT7mhA01w|vJih@bb|SJ>Ep}2W9#iF_TFyxl6z!cVt>P@UiFjBUI47??84k>GY#0O| zZ~+l@K@XgC*<*q{vOHYDgSccTQYBzZ&$iQdQyj~+c58~mO7?{%H zOgb9gcqtvcNxyV@Q7#K3Xx!F0^}#~GUqkcU28;);tGbQ)x$ zew>t=_Xaz%ou%72GONtqI_;3-d@9zDTIrddX`gn_WOP~0ONJw_kE?5l@610HeI%V; zjmp6-82Zr0UbX-Q8WdDkwAoOf2drM8<2;iIiN!8>dR?WT z(6XS%X8S2DxOI8^jM>n2|7tK)Bu-tOp2=%=>v^_He>w5wPE09rCK%e+$-3Hy%$mm) zU*Qo&bC6=%mB*Ks zW^+3$lvjN)wpq+U-vBiQtDrD>UA#=^^pJMT88Rf8Z&LVR46kY{byj?-#ZO|XuWt1o ztEJKuEXY*AGbKP1XW^Ov@)%z(QJ)jpW{W;sV%n z0My2XD4X#hcqR)qv)gDwk=?e^np7bMq1Xb>5pYtXjqc>}?_GbDxQyD>ga!U6wl;Tx zOtmMCN8JD&L3U1Q6*cwKABzwev+?OD!)))mDM0LEE;sC_jeG6K6HNxrOO_hhv1|E| z{2^w8g<+83jDMSgY5#g#608vOXlcfm)>hpex>1u9Gdb#6`HT6FiQP}JS81|=u*n1( zYORZWT_EicxVJcK#uz;)gcVwkyfN1w*k+wght@UaIvc5u*OV|4SL_db-4TYc|yx6y^Zelsc3sJ#8}_xoR@-7Us1 z>`BE@HrhEJ=EPec`#dxw($((+a1Is0@Feeemse)_R(>-mitVLYIr_ZL%+nWqbl4sa zo{@*V%xAlw+(0a5kJbq}g7Sqdk}U_OEk4D2@HdLbV9}X|eZsfiNX+G_r=(%jm9&Ou zkmDQfnBkaK!3Hh$aTGR83z)w(eUg=22UUz5S7aKqKMFu1gKXDOZ(89=H0wm%`=|*;4Z^2I056LB{*z1(|CE1?$%llJGr?E z7XVfGBxGK$^{($f7dF%nR324NF7mMvT$o*Yi5vsi=i70W3{Gv%n+xY!y_Xix*!CM} zP>=50d|L}Lc?}_HShL!*83a6C$abm)5?Z&_YL(%USUr)4tW4U97lh$hi;HaSfWJ={ zfJGg;Mj-x?K_WK;R+7JqAv^u(uD0)Ob`|I!ecwhgQgn}Srs&Yk@(!VL#W%8R_Nb)55l~4B~Jkk^p-hGLF{fYq6h=mFUQcUivUv@)sXwhV$z-#LW^@e@Q-)J11XQn`T#ima-hgFNVeYI%=tcWzx2*IJ=Wd z>&zY091yT6;9Z4xcocL*WQ$sMB_Xp2OM_Fz9_q&XrYgM*kyGJXTng%QOgf7bHi5*K zo@B3JrQYu_K@t=pum6bcHRW{%?d94uaN+b_sRFn67uz5Y%>+*f=ClzOZdN9C4Q7LF zB2f#PB>E-5c~xJvDJ=03bN9vt%Mzxq<+G%t|M#s!=An&Cy%pBkC{f(6{`}>PU)RYz z@A?uV_li@4z3s3)TtMI1RMlksR$-8#t^UvEQo`E{CMU&m5EFa=u9Uq6q|hHusTf5^>77;dyxXPa$YFHa{ds8$ryd=4^svVeT0 z3^i06l@dl}L=4;k^Z)fjryMO2Yve3KFS5|xpw31C#(XDGx+7CTyymvodshhlxDT`m){!ODYKx7ex4;kp_k7#8a%%~Khvtih@ zDj_sr^G0P)z{-cGtx=j+G3p_~Yz)-P;c1JDxUBV#cXqmvp11uQFF)Ko`hBB*!sb=5qScvVE>u&6jB%w`7W4*n zt7v(C<(cV|sH#mFK3q$38h<9BvEyaI8V{SwrXxZH&07>&#@5@w)9qv|4{CfW-s{fy zHUB~(lmCM`6%7n;94m}K)cb8c^ZeYFF?;*PZG zY}}jJVHwR3h@H^Bemlsu>D*q&&}ydj)umHOLU!f*JV|h+f!D3euSBsZu;N{;BycY9 z5b6N;1-4PN5wD1Y_X5iJk%#=7AB)`h_41roqk$F@ydmj>iH?br#dhV`BGN3p&{_0E z++_8vlw4y^mGs|GMUn2mM6uko$pubg!{Xu$1T@A^up(@H`;F-$794s`4UU^ly{QL2 zm7Ie|`HOCkp`e?ZiS*ZBQ$rNgNPI6puvPUOk9!FAHKzEC5%uct7n~ns+MBO>62OXX zVvq1Mnx9)|++*5JudkiMwt&^|I-fK!-4EEKNne#CQ#w78cC}#mhy3Uyt@DVskmu8f zLuNLW2q1j4t#hb|9Jio)16agJshl*2nH}>aPQ9<@rMq2k60277F3Mc)Gr#}fhO7gr zgupY$VaG2A3KFFy2)pACD*xTfCXH92ZArEu=+D!aDLjvD@{TP4n--#TaqxDPcR)+)AXK@;Kd&wOjct)p&gxJmwz z<4z{H7ulZ^28A~@vYlTA@)}_VG7U`3i&C1$HRH}(x^Zk!NE)n;?9rC)sa*)!O;V*5 z_u9sNm06&*;Fw-OqRUDO97#J@EM}g7&MIr~-k&GT0ac_ntLwl5!K&`;N}eHJaEk_Ysgi5uqw+uH$Jd33>!&ZsZJj#FU;p@} zT=!>Z_RH6<+mtcIdEUj-x4E32akHi@bB<#@;+9R^{)1vvkE*dvtkX|$UvTaw% z&xgl`GHbG{23~&2p#CE&cZ^o_!v>*j(`PdPKnaXTpC#8vG z04~3Iq6@9sD=Z0M!yP;xsGa-I3n`{7v&GBHI>Ge{JPKmi7Vi@70q-MtjKODVh_WIY zgMXfOp%8$jAtO*2EUD-eE-O#(OKHkfE_aFaNdF+I$s=88{nVMo-A7V=WzYx0ofEW6 zN;v^IuJ*gS0j1tUbYw6{eQ)_Ikop5C^(}uVw^FJ9hg|pRrXSa>a~jVd)V<88*RldT zU8C3PEYb5Gi4NW~^iq>~$lUF!@Q;*V>{$2{=Wyq)SuuE8Zq_A3x|Q_x*gWS2tH%wN zZzoTmGi3Z=o{F8e{C#wg~9ZcrgMBWF~o39yU`R zN*+s$uT$%y?_qD_a)d)n7Big6Z-#}|71%7N^_&9|JC$X^wsTHQ<7&)Gz3OR==bUXC z&$adt_4>8Up~LyZJV>QnI-ES>}?m4ticmP1BPADM>A{P zs!PW#tb0;(A&m2N_Bt*sya3kZd?uEYG|BE99HKiDh+7Ka3dRnLVzD$QbLSW$!CEqw zCY>|0=x9xXGFppW3`9jY_CIakFOH=5i|gGgaS9K;&P_Lu96fT8**b9CV&1ZN_*ApG z|G9k@7^kq-hw2;FxXokx^r|vv>|JxoHg2(||A>)&#`N*_8`8V?kRegc%tg!Q)=u<# zrwweVUrN`Yg@2N*gSiPzgOZ4svEvUcZKPn%1y%PuE)-j?8fOyrXW!(%jjYT1;#p zA8)^G5yrl+Kli-e{S!Y?dOyQ3+@EM~z9MBG6ICe$(}${s#6XXrp;0JCt-U4zXP|(6 zrkvz|_>TNh!|Z9Vyv%+0;lF+v!|I4mk)=cILb94G1YH+YY$4x_f2oF?X`RYbsxG2E zRJgs6UD(*D^nZO)9?6`7Gb__u>6<2LO{ilUiG|5TmY~VEMze=81504pff<#Zpw$~_ z=1>O&6Ad&3tlFfN9H~+b7)^5tdmn%1I@D7_Xev{kl88L0bCKpeZ?3SL|wq&>X zvq+{Vfx7qLe!#F&)y(;H2G3?JlrN9!s-i@rJ{a zR!1*tEJ{-%AMOMJT$nUPPV9VcZ>=-Q~?lj~wZFDrln99yb`>9?HfBr&535u}h2* z)PcqkXNf73dbdU3(D(@QP_m7sHc+Fqu_PSrx9y^=^$$JE%$v8>v9(}!fqaT3_R&JF zL|baJUX|%&M}M|!;lr%D&(TknoC5?evu5=QRGh13> zrNq!tVvqs9W_R0>B{92WGYn?w3!u#jzzgL}i0RJsVSZ#{QRKaD5sbFgbO)B}2`tyO zS2&DLUTq}JDvNFq;5Js9h<%fG8tqLKn6Mkrt6%%tm#9tb-6VFii4kD6uH7QQTQzuu z58~aUz2R*yJOUjfz0nzrbgk*$t9L7lrP+TmYVTtfOwNp)ywZwL=Po^ZQiki<84OKD z7sXrb5v3qOBH?H&Oo(M+le6Cm6|i@FQF|`^;&W z9B3ggW#8m`(B61+nOHW)HGJk~Oaa-9%`saBeO7|AZBINc?TLr|o;1pXHQM#WbJG%3 zP$tfhB1@N;j|48=Kv^8nS!Mx_p^8d)l&@donL! z^S-@x_iomnw?5Bx-P5b8CQiL-_Sl9S7h}6S&3#Y3&QHAUHk4VVFPk<%`?6V?%YI8| zHqjcQ=2UIYX8+%4&IVdO(7r+2F5J16-lUCn$v-FOaHTen&cTk=!M~-uoFg>>=f5=p zxDSlq96iHC6tLNTDh367wwVnvDo9kB&lGKh&d2pz=i^Sz$MYYUkK;`9;cAK^;5!Gk z?WYtI2$}d0f64ME+RiHM{^%12R&guy+M^BAdF3(B{U%<^3}O3UgfLb?>56PR+?c04 z7$(mq@rdnhG@$J!<&7Ttf!Ztie@p>B50D`-TC*|L!PzATZOovOL(H^`c~Q~f5KZk# zL$z3I!V*V+3e9XGlvora8JpjXsL)MSrN#s-pbD)KrW915>rf(LdX;6tfLH*6#9GUbw!2#+P0L451TlB!0_>t29<`p2Ft_MO5XY@hSBy0f6EG5;0`8cLcG94 z+BVj+*i1+kdqlE;zhe$J3OZA&(;nakRTY7*5+8=Fe*PGVtkmBXoe8-ix6hvt+MN((fjn?Y1K$kuVz=sLX zq7v|^O~u$!(&@=aYbohavq0EP($Tv0k|ph2MzyWv&pvj6^&zFHWH^@AQ}P#LYUfm{ zJtk%OJt6iYwwF96e?pr}l6fb;ZRc+)J&ryAn~fS_-miCCRBd%ymFa(~;Udby=}i~q zP1H!MofowMy3br0ZyvSIXYCH@2&Foc{pSYDInfHAKHR?~?XWsvO_pMVlVgrh3w+iw zx60XnlE6tz+X0p9(~NC$=zUc~%c*16s1{@CSvaJ{ZZ*P=2ugEAuZk{J zo`rjJIl!}=(h1_U4vdD5Zfxx6#>$o}UFwz!B2;erR4WclvYtBclfZi%W5O!a)9!8V z=-!qyO*+O4*jXAMVWCM)JIPzhF!(muEyYRLi2~^`0C%SuNMNzOnTtpFbXiuQT)!I-FRlq?!G|K$Y^CyWV` zUS?l}b`z1<3@qwM3xy?PUSJWu)FR@ecDy!;!;R;u=&Xb8xmBLGBcNeqP52BEGaf~Y zEuLYar9Kv=)JLmxIMs9&%V~8cv&G@`X878rtCG+B3*!^7n1J8ODeZUJa)JW0!hPL( zQuhrhuT~oHj=7puTkv-N{LeovBvH3~h?VNtTjcBHA899ER!THtGv4n>apJYQWG`aj z62RJswBPQq!(j)|y0zb-3BYtK%5H_$80bz`iTM8`y$~gn5kvEOu(@u;-JH&vNh1rX z7K!PITe4=WO{3D=CXx=2cl%a~-wVlIw;#gWn>)>7Eflbh5H0RPh`H!09V;gkYKsii_*!F^b0gVpYNL{~#wD1IuR`a|S# zrEgxBr?G7w=?|Js?VTs@kiS=2D9$1Fy-^X8@&sBeC}tmS3@Pd%M0-&*7At;um;XIVNg>)H1$6Vus&(l{$!k9D9#&Q4%Y8=qdKu}{@20jxZfL-;6(XUN9wEth&PBF=6=Q>-`T%VmLrAb>(m_f{U+|T?3uvLr`?7x$E^+c;D zgUF8Klm1E8^^{i%7p1I_7UC-pl%hVORe4lTQ0wsI5rTE?;8N0kph)xL z5b5zyZ7>(S$nh;xc1JI}SPc0xL%rmC{&~*ku9rg?jZnz0|A-?Hr+QbC3f>~q)WV?SE`jr_L!Xt?uTZ1<*~EYH<+mp}N&cXFR#kQ9FP zUEQ7@EX4{BV6RBP*>=W_^`dm2s3z4d(tz#M!NsAQY+Acd8Ps+d-u7(SQGZBfUnoKY z6KxNvH>rNOdPXJMfL)?AR)J8K6Er*wrp^i;AX zBw;k&(4?j2mk0CnVz-5k=0}TRPM=)ne|&Q8Gfs}$wGOAZUomB;&rk@ZdS&teWXfc9Fw+atY?Lxr4dvHN=JqpitOIK_Bqu0PZ@(`m+NHtX35!tMtayW-vO&a)$1h){eSk z+Wt4XWBOQR+q4#$&nI2l)=rt+Ka({!EHeT8w!ZWkDQCtUcp2T1@yW2g9A?!)-(On$yPXLdh z7b$Bx6ey8@cvC*y_k2=z<6D`&%kGvB3!?nS(fD)k%TJOApImiyjM}Z9r;c*X!k0{t zS)_Vc_8@pzw10tD(~Kb#7qDL-gins6jUiwX!Paiz2&9P-c1>#}mv0^(Rd z>n~%b&wu3h6EA@54Tm|$g->aHa){ZdJQ**Ri&HK%G{<@`=O44DX7x%y4Zn?CZ>sA*E^`Ht{0rDvDlMbB`Go{|4HJv;x;>8Y5x z-$hZfO|^e#qbfH(sb`XW`Xi-lSHQu)ovu|Wx>o;}=_-XH(SL(elg9OT5%--|qkC5J zu1T}ONZ;ea50t*O%rC#0zG5^ag~5C58InSR(s!cbaAU3FJ=WT=P7-O4fw(GgB&!ui zvhRO!B{FIO#n^NqtZxy$b}1ZfvnW#31`<#k6~lJ1;`c652QKIDTCLkVa6iw!Za20G zZ1HsMeo^DA%NEKkcI){HvSh}W!rRu-8ME{0}DA?sIiBV*zOx@45>-Z0VzTc{4Z;zSg5vd^xx1+XWB*Z z`)t6uXUL6=KM54tJDXeS}#^pi(0ai%t6K?F` ztH4F7jz~QOd{_bz`(q^veVY6LbwU2nqNg~ty3!(6H3^c>b{xBRVN-?d93Rjgj%l=) zva8UI@MTw3w=%Xb3nd^(|>@bx0>;)?&S zi~on0e{^JDP~Xt#oJBIemF8^ojCf)G-iOLuiGiBZr4LPbu)nX*qpO!c6zu)g&Kq}Z zb5E(i;I+!=_v@cxUt!svn!MT9EPag>J@D}OYd3GY*reJ1huPDo%}Km9Bj@GM`ga{y zGpc^p_$&XgdDbQKdXfBXCp3At#a=_qKTjUWJr1LQN-=)(}B#%Zge@ITr%`7>=>MZ{zq#Jv9{t^6J=FB z5YE^H-qdl?L-!ysbPodYu*v7x>4ADoo6QI$8iUL95fltDsO1!?!Wp2L-mF#$rBSL4 zkFw4rOCq{fI}6AB*jPAs%l2ej;WzhVS7Gn+^-Esbh?tr-7M90Wh*93*N0uC^0HLQnfL~R7WqNP+55kM5)nt(0Qj^i<-bzh|K6y`0smZ{*u28py z!HxNYNq17c_ZHzpT9a{p2lxKfnvB!toh*D1O;M3mAH_oFp7P;fD1B3mOh@xos=f!u z@|L=%-m-8HTQov$ZB;=BcB+ zBzyyyw=;HiSD>!884S9_VT+_LAyIo+3aj^RZ0VHRs$QmaNy==BXJu#7)+yM(JZOuT z1k2!UDIQ~Ro+d4&*dmE`F3*ZG`?3JImZo~_roU4}r?LvbP1f-IofxA1u+$z7*kU2=qkVT z{JuZ$%3pu$rei?|FR;!V)^7iT96CX`3kLDmy*VO2?Ebu}%-OjkLfeOJULo&&f4qD` zevf!w*!?cyp1shOWgao}E1+LLP)3=;_q4a{dunH0gVEWxv#wRwjJ>9^1o{kmuaF8X zs_l%^`s}*EfetF3buiV?6Q8k=10Uonp7m*=NZQLd{UNv5cb100nV8<>ppjtZHN7{b zWDEjWwR&mKVgs;_A6|2Cizp-c@hd@CC%foM?_Ehd@hVZujvV7;Yu?Qj>l)Str_jE< z?{?t-(q6qEe1eU74{gORy@IQazsoVRLBD?~{7ucbZcb)fllCuzYg5Nj8$5P%KSf4YdSife``(0 z|B?ACQ0UhwK-Q>59%rTk`*3SaP@%&QYJEq?bZD*b_`kvjNt5vFq&V1G@9`_dcr#@~ zkQV<0U8B@_yw(9Net(_Ee~V&LQONGtXSDMfd1t8l_!V+?plN=cvYR_9`S=wY|D$ag z8Q@E#^}mGYQuQ9Uq){8U5qW zPkPm9efnc2uuBG_8Jtdom%G!OxGjHUekS~}>lzy5F?Zg{<=%NG9oy(r4SXK|lv-1A zeX^!R?Vv6BIa*VadQOYlwLMi(Vx(2K+@9jN&cYfW$0S3;#O~4L8I3h&i z5hx{6o200O5>i1`jDjEpqBImrMUxb%kf;i6T9Ve5qBy(0a}-4?A*4|HAW}j~dALm! z5k(e8JX{))!nyO$?C#y!Yn$>wx?z2%y}kJ#GxN{<|MzL1FZ^@|=rbDa@AtZ9aBMC3OTY;xae|B@=r;y3tjFLqlPsiQyW9)F1op`Eq<454M(?0H z20XB_NI`s+0S{bdju${WT9ASvB?5c;o}lgv)50D=D)6;r=5=GHpY&4MW>QuFC>z zxeaKhL;!mBcxB=*$r1>c3vKa_^skw*D2%0gU=UN zaQl}5<;5xN-SQZw>j}edG@2QfgerYNk<{X<9`=H3*cei+Wk_{*z>o}A7|oRMe$yT7 zjp;qJ7Ha3vIY)8K&^hUtuoyL>X4HnZpq_d} zN%!=LQokm%#kS^B^=Ty7nVBo({PiHO9Bk>{lhtS0%PU3X)l7>?Q_W^EDZ653;h#vE z7L#y-H!WuXavR&VrYqQ-*bQ53TZeY*B^6IT_KeoBLAzAL>f4rTr~|~bUgcuA(IzFgStM4Lcc_#q#Y&ln!UN2IFrFivUxqJ}< zHabZJ20ERGh&jZ--&n*f(-5WLViB_ff^e_e~f!iUxRSE z(Yoi1ki)=(ETZ{kI;ye&-6+JE37~ag@UimTNe046QL`-UEEds{Dadgq)ph^ol#uJj zt;M6=mCvI1edQ107}K};pg#}pWe?&46sU`jL*YCzR?G|2hdnJ0<2`?7Q+h+XlRTjL z?R@bV!zVjEG!6A({sx}+-r%Ymejbft`H8D1Kbd*?IClN*P;r3xDd`*c3GJI3{S2Pg zBTBG#v>xg)r3uU|lCi+{jv-j zWG8wf0O5{!gB?p3Eb)7T>y|F~26ybfhrav7qWd0NzKVtYoEgHq^zFJ(@lv5#T`0xBiga>d*ZQcBxFin$%6P`NUUeTVs4-tgmwYveURUa!-C zq|eWxPHrx*k$IvK(W!wuQ4qt>VH#kG928W;!5HnP2|fH=8QySX8OSgulG~3jQ5?Hk zWSmeI9PInxK*nK-tb}owwFk3f^9`J_Jwn!jP3Z5aCj)}1C#yggOwK{F=0iB+0(ASB zfo`8@%H;~8K;Ptn6$thMLwB!gW(O4om0=F0GsGjsc)usnUBc3EiITKiy z*_zR0T{b}Rjb#(fCFADc$K`#geW6#Jcj4^ExivFun^)bvXKwR4zI*V}l^|QAyI6XG(g=#O7F2>3 zAa{7l9sU&gF5kg-=LD{VX!I8d|14WOZ=v{E zc7e25Tsv=GaVq|#IF*YPAI13h*T2M@`QmnB<{J?W;)GVA71kjw!G<7Q|Mw`F54Mr_ z1iCAL$Q@d7Pn*@jRL~g$za@%aktN zXn`D{+8Vr>9!8AaR3}j~4D>ktA=pLD2J;?wO&m;HZ6ekW-;>!@3``6hKnw%`Io%9_0001Z+GAj3VBkplyB0_? z{QL6nG^>W1`&hm3AX9YckLoS<(-t(TaoAKj z{N^BP>9HQ5PPk|QEi}n>H*>;e#G>8EgGH(mGGWF;4-UuY!{2d6+W}eh~_%Y zd)Myx{GAn2vi@Stk#1xwAM#}bu5-UZb)(hj z!YkFS1FVPpBx~RlUZlwy-NJWk3ckHpf1{PCqa8>v$`DIuQ6?TCiY~)cmqffVhc@NJ zxH^IJv>Kh(+Z!6!lnwLpz1X0B;hb6lk6Mob8IM^$&#Y?2OEHRm;n%%lLPV>Bm{Uh# zH+qCJUW*trm)E<9<-vDfggK;Na25}OIfSut2xlz8=}-=L{-vgo%emM%i-+NYS)9aP zI*fFFcgJE(CsH_zW1K@tIPnyYa8!gtp&Uy8vmKR`f_!e@2xnzMHa%n22;hSG85P!< zRJPcr@-W32Bcc>osIn^bkEH*FDXwR@9^o<0Vu6u_adQ<$L@|bg_GrI!U|1HgG(93K z(MN+QwC2hz)Uu7%x+=hT@QAgP3h>?Q$OgCB$ezLd2U#}p#Q*?!+GAi~(1F4_hI@=Q zjBl8ZF?%sLF|T62#3I8|#WIg&56c6VAFLUy%UFN0^|Ae8Z{gtM$l|!f8OOPR^9z>% zmkF02*BS0A?khYgJa2fjcz5wJ@h#&Q;&Ukh>?} zp!A5_#-)~GyCO;O#Ux6+;-(LJLVqt~IgO7DffhW;e|Jq9@jzYOmfxf!i67BH?c z5i{v96)~M)reIcKcFEkpe2V!c3jvETi$#`3mRGF&tj<{b*)Z62*sQYku-##&Ww*dy z!M@J^i9?(to8u;@BxeWbPcDaCE8JAv*0@`^pYVwDnB#HFQ^7OGi^pq+w}tl}A19w5 zzHPo&{EGbE`IiJR1WXF}61Xl%DrifvU+}KrXCZnxpiPKZNJL0cNKeS2kYAw&p_4+t zgoT8)g`ES!Z{c!4cp;)MVpgPCHtOn1poj5000620RRF3 z761SN00B$@0001Z+I5l3O2a@DhW|;cQmkS{tayQ}bff9TD+;2BN};$=(S?E}wYJ5! zkz}g4>LZ9xr7p#_PvFWo@Xusg6TyTz^PTIQIRhXorXdh~QE8|>>Cmd})5gq><75hP<)RJ$4;ex@Ngit4p&a?PjB5o>!e4 zw=KIkfQvRHOkBg}Zj&ms-Qf`)8mOVJyoa>8)S9SLD6qXS90o86Q+L>9e@< zDr$vmtDm#X=wn7XT$0OVj!zk!D_O&4A62xtd$^^|#m%47t(X?~1f93A$DTb^S5n%l zqwvh2OG$gsWtqMTEp6&mo@$~)zE&|oFLia(T~5MgT&Tr!7HRo=^I#@<> zC3m?TQShfNlC@OvzxV~>h7;ZcarvfiN zMlh05jAjgD8OL}gFp)`2W(rf8#&l*dlUdAW4s)5ud={{fO0jT^7FxN<0Y0&XJ?!Kl zCpa!K?BD~N*)LYHv0Gx<#vMLN9EUi`FWUIcQBL!a`^pP%gGEw6b)fUkUKJGBH^!cyv}=P(T{qmd9zgjvoCRz|+Oiq$l;hIOpv z8b{c`dN#6&Z~Wjo=Qz&|-tk`S5-$moC`pnmDUvE_k}esNDOr-u1uk-htK8)hm$}DQ zPDu{8xGlMo$1cg20x6UtDYp9Sni~VUEnFYeG_AtattnOQF_aFaOIhBo5gAnKN<-;T zI+ZSEiLz8#ru?_hZK|uix^Axt`op2>s>-H-*)n{lE}z944mGqFUUi4hW8M+bP&$;( zzo)LCyZ``r+C|Pi4uU`s1z=Wi!C!t{Oe~C8SnV0?gocJtLE;r`t!=c^3+M@~1}kIl zDLjlmb;NGId6Ue$iszQZ-bl&Gib{w>StU{zQ}W7`RE}X&j!EieVUQWcWah7iAUc7=JjWA7Ax9u0`V` z(a>7o?uV=Ri4Xa~UiORtrJ;)xJ^=8W`9~x49}qH8YE0}*t$#STA5AR)@XJ`Sfv(t8 z-|;6Fn)*ir@&5pCYUOV7!~Mic1pok1F-qwGSTkdNBLG0H3A=@`t@%F4(=-;U-`G=-O! zg>v|jBC)-H3H%tbaN#M?!HXCv5qCj>21CJb{pJ6-<4ZA)&nOfLQ5YBDvkM35`6-Yq zV^G?`C;SD$ySU?O2c>P4`*WTB4ehDsS?OEt{nT|wOj6RArC8z_jrLA)TFJJBRUQ0EI+$*mS{LyxpHTeL4@FnfMmf7i4k?v2sDf`flrqcShox$TqFH zZ~QsRy>M$%E?q8IE_>#$oC`T!12As>Ck}P4E{Dn%Q0JY1`orS*4n@QX1!} zy|cVGO)$6~43EAp<#?4vO=hlfUgq&z=qK_+o#a4?U0+q(W^&L>n2 z*#+pkTJ4+tb@V^~2W+E4%->|Qri>ZCiu=s%IJH6}mF-xxfa~>PCkBT&TX1dsu=H`8 zntQxjP;3IZFbvT)V4iz^4XuGc&v!xShU-H@@2wnGxljB_iVLNcwzrUNk8)AToKQvM zDmc6WL=+aqq){c{7Chh=f-A?jxajTI46WEnHz25;7Bx1Ba9ubaF(FvDQzWW%o3Z}} zC80=I$UR^r8I@4z$h5mWTd}bq95Z_OQk$rQs+vaE3_HZD;l>)*tjQ>5YFd2tfsNK& zxjqA+^^@39Xo2nDa%6r;4Zp>!*#tF;P#$~|bA!tEuT7*(N4);uBPn`~*W$h8GQa7^ z`2BVuOp0r=d#QJFoAeia)D1ldqXq)C9})fKXg@vLmbdHmKSOrf#Vxt#Byv$m`nIxF zqbzZ)gob50MjB^yl=f_FZEQjEteQW9U8&kUj?^-&$Re%CGRVXtiFeU7Ex$Fb_*En4 z-+`U~Q|#cs0rxO;!Q1eFMCE#&N?Z>qjEbRn)~P3ruGb_(4v!GsRgOrp^kyK#K~iySsC6x*L*uDxMwPq-D4VEuGNy?;;Hxk#mWh~4g0LBufIh3#qAR>75xN?exLAgmYK?Iu{2_+`fkTw|YN35`c6&-E`%@hA$7oZIUd@2Ok| z>+(9Rb$e4%x06*e>k7{)*vc6e$?6Ee=t~62N##>SU5SfFhddTC%JcUkPC6h-Hmh~pxEzM%rx=;) zi53#L?h|?7sv}jc6Wxu3eacOEKJT?#pO^Zjn^)>5ngM-deM^riIEre$GCxy!&4JRcIT|sfTN< z>#)%v=frxe(N#y9u9uUp7kQpz-N6DrjrKU*=hjv%m2jJHtV$X{1W#*5JFRrd!(A!8 zj}-6*qBeap+sFeoOl*e5Hb8qTxtb1p=Aag{?J$kj{M_x%o=0b!~yjJaBzyKnjaULJL?6i&P$omUuBmUHB7S+u0G=AZL~kV#m<*3CH?5 zK=0qs9~q-xc;k?IuwN6#-_f%MH2v|1_@VK(utZ0cFD{kyB13D?XOyz*=!JlVWgYmV ziJIw5AGn!v2scWwED670N*r{Oq+*aAj>~|0t=}%dph%ekp&6!_s^sXi^Z0uWmIn`E zc1`BPkLod3O67-2l8$AAT#IMIrAa$Vk`85ye=4&EmY@dM3k@}5*T9)TG!0Tvec zV)kYKGA&>-EcA;t1S5e^`HN>OJC;qBK7IW7@1Tv+66h?0O84Kgk4*GhR- z;2Nk@X)3Duh2Hri6zTbgode_pm{N`xq?}A^9i)*mi$Xg(FJ$~j)kg5|$7BO&Y_dVt z^QG-zFD_&PHb>L`1a8s3@e1K&{0@d+t?|qNLA#eVqqwCei&NI9WEmsP)4J2^WQAWe z*;~!?vL-3}<*>6hFw;Q;!*?bRUrh1$m&X-g(lrt@G|JrBHo`~;XIN`TwV`}=teY;-r@ z`kKN|NkhGz5~JTLdU_ex(<41S27_%t6;Y6YBp=w6|IMqXXX2-a0VVo>N z&V_H~RghdZb+i7|>Yi!+QgjWv!ok2kO&ks*>IlOvQQl_i!YmnXO=nJJno zn=70woh_a&-?v{+&Q8os%}vNi$w|se%gbM(pr)s(t5K#S8l48C)wx3|)oP7;m(MmB zEH>NCI_u#lp7!S}Lvji6$qvoh8vJ*0byUG&>L3HKpa+MVa|a7_l?w~0Dd1X1%WD#r zMj1Y*m$OFA=IZU0QM((6{(W$`Y#!5+4)@^P!s6EEa^JpzuF`q*qKQqR?FJ+frXcu_5DN=n@qF;efpQ72`^#)+4;UhTU2 z6TLvnNq_K_eie6UTv#|#aTo_JbP1 zhGLg{a#&^Szq!+;t!;7oUevLBUAr+PIbTMC*3c0QxC|)#AcX`(lwX4|r#q>^4iw&} z|0MVw-bVXoLNr{h2yrJjI>R!kKzR79Z`_TCwX3vFg$uaamRXd{QB1z;Omz(dXW>Hi z^BeZ2PrrXj_`h*eS23Hxf(UxBd3@(Jo`sZmpz+p`&3D&o{I}Tz0swt~1K`}&I=#X5 zkQbM3Hd0qtyH;1TvZTof)K{b-fCNzFWAYv-J412^Ap#)gKvDQBxP`r^!;DRkYN-^2 zkRLmX4cr4VO@)LJv%_L``Hr_zCrLpbytlu<4P40T(^Eaics?_B-LAN!_KVYk9DkV} zjVbpDw+&7jOu@-3GkUMl(B9aMl>W+M-92ql|DAn?O6JB(8hjW~gUuT~J$pB*y(Tq( zv)!Cc)-~Bn((1_`wPB@j;%umeJ(XA{R1ux-R+T`W6w-&IG*CjxevU#>mX`TgmYCpB z7CM3}TyZtSpn`D^^vPiT5L#_=zlyhsfPfg@b) zHyU9WOm)R?ipQ{>pz_4Bqy&#e*yO93S*T>D;hVze-qUOB3|LhMGro)*I;Y7^c9{;S zrHm=#_){%zm?i9Ia0SC!wGHU;D9sC4_Nslcv{iMD@o|jQ*WQ#djx$T|UO5jRdJR*= zQ_%n}Dl9(6UeUP4qrcH1aY*uWuuFwOEdXkk#Y!+S^UUloFt*ynrKbna%ECre-yQ`G zQJD!8;wnVo8hYE&Tlw3plsfWr{!?Ew_N;hI4|Y!pHwX-ZT6Ya z`<$sSnQPSh>8ox!C#OfCe>Z4|EEpN<>&7)diyx!})0Bcb-EWYP3|{u9^ZDF$K1##f z@7oX*lSK&OJd9?J195R4p7GkTIo)T8(7b{8JiAZsKwFP_6*?m~s-*|At=GEV8%_^! zKm70iC`8%S%Or>Xn^URcaYaL;0}7)Q{)3jwt79M#kU$|DU*9(fB9LhbY!K_Ro%0J=5g1S6Klp7z^+hb+q zqeBTFU3`e{7vg%p>bi`Hz3y%L`>;XP3b*S{w(JhycS{&mnJixKr=aJp*W)Pb#&1$C zsb=afua6t5aXPp2(_pwa-dEp&OF_p*A7xlEW*_`M3BQ6})O0a#!|hTqZrkBfFkbsj$JQt9#pZFq z)<;*N^rF#9fAa|*^ojSEJKEE!qd&Xe&K+5Hi~i2t<$CfnHc#1dkB;Hc17{+|RQt{W zO*EXw(*e>r_-4&#=poN=#068yaS8)ZY%@sJ`)R%b(GETIhVta*z~H>Rfe{fRlwV>+ z$N(kK%;LY_g<3n|TFnYU9mJ#f&x97K5ykMrWkgEV?Bt}%z9l-BzpsrEcLYmRC=WO- zUHG}!@Wt)j79#e}EC$KS9Xwd@EAcZ4rAyp;eU!E(mq>YAIhb@+#lkoY8b{rsmh5CR6AWov3NoJ@vnNI-d3ilRMb& z4W%tRzRIq`%NvgCXiw~nhhgu_!h@bK`BFAo#Bdx>c?6rTy`3))+TV3@HfQ&v^x1GR z?ki8{&Z(FJ zW1)91(f-(k6JB%WdJ8^3IaD%z=P-xGbI^$*!+lbvx9dFCb(c6fDz6Id8F#NEF(0Ei zjEg_QmRe1gS69%)XDx2+c(#ptbZy$znY1lwdrlPd6gg+tdx7XiEGO-*X~$R7U@K)h z?5$;TI~1nVm4$(_w{V+I#j9R#w0CH&@q1WQ^H7;jOOx5g$n!ErTx7fV*A#VIF1I$4 zNPlC?EN4!^p^^q>+1ry~m#8U)P`Cwh#@fVlDuy`r+1LX^rGVsAbz<788zUMB@Xs8u ztc%C8b;cocWlIpaC$90iY*+9$-b&Z*)PfonJnby5i<);&5Uy%`I>iZGyW%#hQzN+A zxm+QZF-$TfuAHMNGEX5SxMaZm#oxV&fgpduMi_LF+s!XS5z0c3)UWxMLkpqO&=dWT zvq$@P33i4supEdTVx}BqE~bBO7F|J+=g|zZH%oz}CGud9PE1CRsrqlQ4%!9-Fz;j0 zSXPT(vvU4GeZGQpsmd=0t-X##C23>_shL!9c(vL%^tp*_wmBhc(XZG0NzI02kgp}h z#e?|Q{3B@%pH2DQXYGNv%W%o=ukxpshI53ihKuSz((1NHk0DbcKNlds!oTYigW(9M z%$QPxE!0TiNv>Qz!rg#Yvu%+16;RVZWw;rF1V;{9MX~)7mGg@W!h16WoD%9 zQPGh#QrJ|>HmikN?D~^dN#%%Rn(4tEisCEM47bmv99{B5uK~m-gmrdAbGb^-4sPGw z@JBSh%Fm0m8trbC70E!q7^Lu}-`A6M7OyXDeZQV%E>-I`j9M?!tf^Fa7Rp6Xe}9#3 zCe243H*R-vlza{tjd}UKdbJIQwQy*0|CKYvU<7T_%p~LcYmx@;UXvn_8MQ}JR?v>1 zw+QFg9QbU<+1SrbV8q^Cp)mm=>4OSCs9Wh~w4v`3ZBVKrq|TsnXYH`nd9sI@4~^|WQWmR!J8%lADLwMg~7RL(TlOn0LVeR952Wvm6B7S5P8Mj)Mu zuvn;A^E3b?!4D-j1BT383=AsGWl2&$8cu3nxoB{oG_=cN>OJ+w4M{BKjLrL(O(wBI z%y`tQ7E|d+X-Ytvu1LU2i2hi)Ft)6#FPbcIFJUMl?|KW_ccoMFU5#oXtL+$fE?f39 zboFn%A1}$DZ2aV5;TXD4v0SHqY--@&DE~fHa^w*DX8aY%|LV|706XCb*swsI?E4Ew zYufLeBC^wlmcZg`i@uQSHbY4sX!v@pj7~v1#a~7O2(e(ZG zE0M_4X2c=0ZgDwc`Cje59>~LhvR(4|xEho|*4vZ_PKFY}A&?^UP>kV_>{kM)Bx zhM<9TsC%NKDjOr55$3>5oU_sczaZ|dJ(Jj*+>V>nje&4uL&Lf9{xK`5n(=oSWgd8i7bGFzrhRQ!wQKc z!$U5G@A!O87Txtq+J5ZNa(sM|rJM-LBPI{D;m*OgCDbz06kknU-8&zei{H85q@E`Pl&TJjGXtqBL8k+?6R)RrOw11@bwKO|!G`|oE(f)TMY0)qe{Hq`fD)BkqaA+|?$A>IK-vB_nz zwYYb@>HKwCw*bl3AXiz2CDtI0!SL>O%!-=cV^ZNTZ&E^UddH?U0wxVE8j6+N4L zdx!K%qm6cZ4uCS{qiql${r&ENh*+UD0+3?J{({=7jX2)-5_No>FG^QW`}%W#W*I5? z;0Xko%igShXsxiU|7q?N^6tS;4n&NV2t|$wS%9794@Ez4O9(;Spp+sp_+1uU64#fq z0^7a183yU<1BPwda|GNqCG7cgxTwTd45nD}9pZq%><$|StX6~_6*kG4gFj*9)52Jh zp0GZ$3^TxEyB3z-klnq!MKF85JlU>NP7+nFQl;y?QE=huJ*NAc$(7dgJy2V9I<@;F zbv2$Y8}$8^G3M5`=WAkL#O{{fH04vj|H4${cV7zOUMn(J+pkw-W;(!*b9rgcfF6%Y zz^YYf8ED960>G|>rgM#+|1J=ye%f{^-;hyJy*6hWk6!Xf>xyyi5gDP$ituCBTGQ6I z?Wv0E`wmCeX4B!U;R%y3<|Q;Tg7jPkCyw~EsoNWG`Ru&Q*AD9At~c%RZ$yD7lKH)D zP9%!gH|*P114hDa-r7^G8gRwxkqFV>-^hGOyNoy7Lm+*^9@M{YUlBIkDxP9o9@6`# zJKnFDf~CWrYM$l^E>oUPs;+fEE?%$YareB@db}^S_%iV->{KmoLVDmxhdfCA>u_lS z#(ZtZ3(m~}=T$3t6SB*hjz=l=+17pS(yt$LMTsA(q{hvx*3GEvg9r3m&zQe$WnH$9 zJT;^qvQTrkAS$!%Wj}+SRM<91y;c%>b+;RS*)Ey7&RbvTIQon7g5r+yr_+WTLKsKv z`T>I*LkELql*(Bot;Bt9+F7!}WKb=Uv%pQvy7_@w0q^+8PrAs>}KP z>!<;rD^~_*kDW}iib7{Tmd7Z#mrI1k`jaFSqu?k`7YQFnaU)8tY1@AP`uRNFP9pjq z@$5Y7^V#n5i`8Cs&R=b$r>MC=xY_$P;VJ4)*_fANYxZGqrqL)TN-;itt}d9C-)SA? z9);E4U~x%GoXa^`k82*kZe#){Fet?=G}n;W*7CsC!ptnqRv|6ve%u$YZ<^uejkse& z)(~lz7&D8c2uRhl4gnPLeLbr<`nTwgZRdz<)!*uKiI+AN4+=dGxO+2ays@d2tT!P& z7ymLqpvEmRSBV{217rAaDk_gV(d3EDmIiq)=N;Yg512nkTtVwA z_vHVqXMR)^L8VW8ut=XHb>QXN!)wo#2Xq~q0sijN(#qMONreI+NW$<&?5o8&PR+MO z5r-a)Y{GnK>WPy4dP3yJ{2GzjTE3w}Psr0Z(VPFmh#a0t0D=+vF=na5h*~%+jTP_%8to=2sqr6 z*&~@Z3tExR7u_E|nB16C8T_e)n-q-17h%ig+|JN69)wE+5@{$z%3_p5G1Q$SK|0#d znYTw}-59=~2@DW??{x8XO*42c92%p-)gxL`4&n9<@^}!?fYrm3cskrQ2L?@IdO4h8s9_mcxp!)Fllp{RiE%_8c0@n5QdUhK%LEw$qTr_9yu*_AT zE-SOrf1zI12M*19j4{XP6+?1I0XvkTRr)zAcas{ZjU&$5bW~>#ISn=#rbD5-R12{l02lU)rj5d7oZ`jB`CGo@7Jn`l?D- zwcXJ0d@#M7fz#!?g>aY9=yaHRsl6Zf>}uA2eG4j-4d8G(40_!Unz~%szd_>SB@Rcu zNCrGv5W9ozlBFmQSk@+`-tpYzt=mnkTyid zx`|-B@RaeT?HTVy`|+t1+I?e@Q2!-&2C2|A>J9rhGFc zRAG|;NK+DoV>T>fL_JbXJ_W>3xx`%|id{IcE(k>$GipX17 zU#s%=q72&!WCukaEvh6y630UhmfYDJSNs&<|1^Iaj%|+Wu zs^B=&ZmUs|1d+bR9`Q=cw zO!s&CBgb}yZp!mO{ujKplCN6nQPH>PXu5`M9H@&*#6iz2UxRET&l>I*{r7Yck(Y*| zyCQA&vJ=Gh{@?e7es5~C%5g*qoOpZ9wTru#s*|RoJ`_>Qus*X zVJ0>Jrd+J<;|0CQ8`zF7@|G{<1imqtPMejAUrZ6DkL79d#IY9i8N1qf!qmhOKruDG z$lujP3cJrm^xx~+dpDcuvR~587%Jv;w-|FYHN})1gV4AcsUq@j^$28TOzrGg?C*M~ zJxhHbNs&oy=w=3gz+TW*%eZ)kngN+l)1U#SH_ekB~ ziTAi(%Rq=X`fWj1PB7RN{2?WIU10b8D_hQSK8DQ_8R*6VM1~9j2$zrnXW&?z7j6d# z)ctBykzR+*4q&Dv-?eh$_9-Kor)K%NzIN&1cM}366q_;+pI|k^sXf}K-@5*m%f&YFvX9`YS?TcWU;5O87>nVbRSws5WxMPD34XnF?`O}P=TP_AhFf-U{KRq>)3m& z5!Qk`&58{gNCns!z?|1KlMlFsR`Ae-0pI7KYPiI?w$wLb_vw2A6*Stneu-zlrh>9rSw8@JkF_K}e3tl^SPqP-47 z*=9DR=zHq3+fwyBE#PUyv6B~%{Oh-gBa>76NNCGVpgbV~E#r;fQ9p5jM|7b!_k>)s zAo{F|AfPzXSXT)y1ZyyI7jh+^-1h^UCszWaO-fiW-2~GpjhBL&(en|4cAS;kHe!qU zt5PudNw%E_lX5JJ>*3A)zPj5@CJNcYqrlcT2EF)$&n#P;@r6`jvpn0Q_`9}ScLwQo z&)~*AO0i9bTe2eHV^!_ndWdpKsSdKSKFD8T8pcOV)T+X$DdI_asBHDI@k6>J8NBt* z7tB694f@?7_j?=c&KJe6w!>*`uF3WLsJJnAN4!|6xs&s!%~;pxE%iW>(T3By)`DMV zjL2q$C(Nn+R^~;Vv7~wYNU?K54Gjg5L}2W*BE?xO>`b-`rk#ZLs)Pjl&!KFuI=_0CBdhrCObR6otI;feB`28 zK0WRQWxd*u@_LD%-?{cWEk{H*-#+%`59(+Cq;LDNme!c|nm`k>+ceaL;9^L+f=Jg4 z&oM}5iuT%roh3Gu>^aRlG1RH@c9^m=lv=oem%@>h>)^!=;ebGRErtoIbRb9dmC5vy z*Qk*~rc*f1>KW6D2Ix0Ui^MxyZWDiWpQR7o&jpdh+n%wM98E5YHW+T0XK{0!*D>6? z*S@@WTam0)XInFG-ZaA+uX8?<86RLOo<*Qwm10u@_3*}ywcwk`0WY}xGDGGFYIPaJ z5-!Eh6~_xB2h#A|9X1nXE@h?1y-~ap(xgOrjEtXY$tN5QOZhlcAmoEc2k_q`rGoXs(RD7xszDJFi2b1)uOg0XmB~$99RfyhbNxu5cT}_k=`yE zRAtPqbVXn=87}kY3HeqHC;3n%ut)S2lD7_kr%q^rA+jRh-fm1eZ6Tb^gd?uc54*SE zRC9#VszC$BNI^ik6NgbAHVC^mwR$4OdNKYPZxjA+YoP%TjBmX=Z@tLB9CesAV)fa0 zzppURB-qguPR)bn4okKT4#(+!bou+%S-rP>)m@dQB~KM^r+wYhb1_D??dkQHNQ{X7 zRv7$(gpN(FS=EYxbXh^Js*rh{MOOg!Mjjc|CG(UX)8JX*a=KJOf(ipnfHT@%)x_Q0 zu(%Q_%dOJ>?ZHPC#jmDqKTjXeca=G~+xxYn3Z=NK@Sz0-wGnYi(9#ix54jaAG4pdH zl(>b2G|tA_$_lwX!)tTlI1uh8tr*1`&@tnLRH^R(ULe&1GVHgC=Ee81w}z++vfa~h8=ynkpG%cQej;*#Pe zTOvI=n>Z#rw4DaxTKsa(u|%OYI8jTr#^x0Y^d5DIt{{THz%3TK#3ifu4AFmfb5Z3Cz*`h7qxhCJz^iH!l4Gx#( zZV3AuyC-HYqgqU^Cn(jvIhIE7s8EysA3=tqH8xZgVk^lo*rc$PVe9%PJ+<7)RL1J{ z9CPh>ryyC1v~K8TS=vL&DS3_b);~|OLEnO;Tf8vA(#eW_Jy zr-jrD2ii(tU^IL>gzsCnVc!%8*0I6Z!%x!zUt&~eyg^?IT0A!Ww}Hg=Cmi?F$5++6 z$9d6cOg4u{|IXp(u4Xr03~Fg~dw%KTFO178ty-JogWGdr@iQiSJ7?$YhPZZB>n;1& z2xatl$SmG5I+n*@@5az*&x9MWQ2SnXN2M@^QJ5?RgaH{fh7o_|@#O_EYnmDiv315i z76M|`QPB5MEP_x}#(IM>T`te&dR57bjibqR9nsY22P!g4EIm488`Z9s_Y`#Rs#7RM2)7KJkP%IOJrj#L?*zU#! z+3$PX&ybEkhaKl`D};W2HR_M>@J(h-jbwlBCky4gC`Ds-q6C}h91%iuavei%BCbFWYS%DJOpb$bl^;Jxs)MhvFXc+ z$6(W7X!U1X! zzLpg+tv=(c?{!zLq*EiZcG4jE0Ms;wVh0-x1x^;&E4{oaIXwJcp4<5RZrM5B-_gD| z?p|{3sal&#+fERYiMO~m)_{=#+KlWq?~66`%ov7IJiQPH`7=Gi*n=`r$7B*iD;Z1! zw$U)Y(%+%+k8b!gV^~El<@$q9_%;UKWW=9;tTd+Qx~(S8(&Mi#c20QM8_{EGP8`0D z<6BiXuWLGl1{k!-n6RP2u`hBv<_W)qzQSMT?Rc9@bxyV)5Vk$BzIU@c(;OaO6iqK_ zed3k+jqP#NP~hH_RuM=t*AjU#%1z8lBj_@16q2rIU8$5soze|}gAkn9>Qg+AN0WIQ z+WQZ}MI*2|_Vz|`T=<+Q=!TDVpO&5smD&E`bzW7b!X*gfrhF|qM+ezfy_vsjG9Ax9 zau>41>@wRVj*}GApM$7iO>af~LAB~D#+}71P?dQYYV+_j_6L%r&jWK(`x_WWQ&u#t zghfPs8Zf~f^G8lqH3x*8-0D<}>MZ}gw3;?`o(u+u$LHfDH=gst!>wGM(&FC`LxL&@3dT{C$ydOG`c=_$KRj|OT);#QI17w-i`Y_uHrucb~?_KMQ0jZBB~+?zeOk-!(iJ^Xd}@T z_MEv;!FkB#*ja3PkV+)wHERTR%l;+ru`_w{lgQc!{>lL(jBE=m_-nR~0)&KXeQEYl?j zDh;WD);N_%qP@=>*{)UzZh|A>$MtpGoaeEsI|tYAzhuH3dA|wo`?;h-@xxDpfKx@g zCrKxQThOhJ#-%*{sjd(9Fj~NwdSxUche)7-+x7U`%;x%y>ywu3i3J^!D6`O=OT6SU z3wKHxP>y+CShD*2AbI;W%5H2(B1HImQbYJ&Z~FgmdY#O|*up`Xx%D+XNi-Tdi3`-D zX%U@C$sMW-wf6P+;YsfLvrVWW;GN4*hSP3Ye7Zch>xO^Ka`cNzHN`EiNhqpAe)pCN zXc8OjiC%u)ZsPGqN!Axtb{q|F-qW_0k5_5SIQJPG#NQToGr*um&zCJ(ax5JrWTAzZ z3M+Ifpz5<+@q_`|`E7d9$sw zOjG)pCjAQg;bKAj%rBj-vh;Mb+ytfj#a)`{<9-_9iuo{@c_&!)j|^zb>x3bL1FW;n z>LAcH*S>6d`_k54Mp0fs?@n{Z&CBs!Tfy@Vk9-JYyV|&%JQz z`i>KZJCtvD2Pk~f*LXQ}y{~zWwnscKv2c6ZOM~Yq^5vGuO+<08ENeN2`B}sJ*(Toy zfX?c>8rYpn^m-mNbiIv7)x=ZRruKQ)bdzn{E1w18w73glYsc5TZq`zI=58=uFM6P* zYvU4|%54Z!H+9Xk4hb+8UQ$?6H&yIP9#Mwk=f6i@?lZ*g@(&A(336O`>LprN`+?Y z{+=zL?$j~M(#28*?O|UpGZXM!RM>=_FOXWoQ&u5Ej?Sz=yrnw7(D89F`Z1lN7p-L~LEo>E^mjb5XT$0G26XSz< zIK~z5SLVHp?7*r&WM$Xb4N@qV7x3p0gWF*kt^KoYuopruN~b#_=?mnRl>v>(Xuvp? z6g~ueL?Xt*yzoyLqr0O=4=$n_lRXxg9SxciMUx61?`E2uKt_XN4tNx#!Uw$8aZjO7 zv8?V47_Zscqhgn}*}k*5-+PDe*UG{^<0%YCMbhc?CJ(4ON>`oV#$SeWiR5eW*40g- zSVDUR=}+k{WU0c`15zetu9kFkUUxf9>xGd0%esvxXIQ~930ED@-c8F#_s#)QmfI4o zB-oZpwd`j0dr^Sl|VTI{5A3lWRnZaC5j zmTX-4dxH{)TIJRzxx8XE-S#E;uf0kQ`FyLKa4&(hy2Uv1$-_!1lrIjoRGquIp+a)2 zA`9Z($i}}$ZlzB=rSefFE@fwOew#EtL)7Qbem=;YNzPr zWXEDBKFX=t3ECf@I%=uVxiGtMb~+;KEUBG%%qLtpu>@;HXhEhjP@j;Q9qGV0Uj+9vJdbDf59gG zl6N@h(=9TySEZevDBY?QJ8GX9e=IR|aUR2wpSB4~+2c*}nD}H;D_<#I=4FGi=J7<= z`_-yrlTAwv;Zn73#h$=zYwLQ`WTxjdk0aH4=60##_g71or0g$}-9M~Jd2rPd zwlo)-V~aIsvNRXPso4gq?AeC!ON`{WQ#`^+-i0i*x109o*u%9u@3mibxux*xxHmk} z+he|ZYv3-^>@xQL{ik|g5&4l_GBaD^TQHZ+5b}I?*p(S&1C3@)VjTIgj#&=_MKGFV zG;RK5NNKqEIG(7(oQn@$B3`aTMJQ0TT_#If=JTX|&Ya!a?^BzcGc3~wYqe!%QoC4q=+2g1N8@x@I@?sA+=7y`)a>7pJ}sv36}~>Vn`nnqBTblMMAV^#yl~ZMc|YC`NAFxFcvH^9w`8ZLyMH^&Re06lmMM74t^KfHtxE)F5 zbj95Hj2cpBrT}FdJCj}3$xt0$+0XqWY%Xwzt=^aJfN+BAz?kzWh3}7{+((W0o747z zhj;ib0g$l3_gBdglU&#ODERZ~E%GRp#ZQn;C=U-v za41f55i3JHaStozzc&%MA9=eY)+nHQWz4IWk6n%t z`~TE1`^kKdu%ACfGkGpgGh`d?N_tRmpc%0P467uViA=L z_6C!1CWgry%meHF#0!7)YG*G?=(v44F!NeMLJ9^@GG)fSlWzc%Q75Fz{VRJ5jB|Eq zh~6RwrS&9E27Xq&S;dgeI&;;a{=M|_?7rp#Z-g7m4Ei6iy;&~=TwG?MObn${8{~PY z$afVbdVKiA?-;F zvVS!2KusZ;sOM?riBhdg=C|+UG7f)~79?2NI6o-S=K8lx@dqTPs&L_1+@V>n8!9O@ zc0)SWn)cvZh5gFA{i>*T$Mm0{cCz;Ol+oR*qR3Gg8mVeTnaj>$3k4gbA9G&_EkvM< z1rxGNmB~|>!1zQ@QI)xRO>y!q=(PJwS`Qc5*9)t>)e8jC!pf@_?Kl;F7l#`cdOBmh zk!5XmRkXcz&+{)etys_Ek(1VC5qSN9(m}4`Np7Fnw=9t|$eXcv2a)Ph0pQYVd8jIU!bq!g=KGlgR zHCRKT|C<@Yjxqet(e6Wv>~xEO)m3K0OZoT$tZR6Kz|cgd)s6Q%^UqiPeP_PWfNhd7 zUi2xu%Ry!MFie;INx|yAh~%v|xUeFR3|j7I;E~qNOfTdPVI>ztrU*De4HT(?oRb}; zy@DcnX()U+!oQ@ z(TAB730hNQD@;uS?XP|8Y}rPZnK;P~kEXpJ`%dzE za5k5bnEitqcB-l*zvyt7P4R&n4C5T7!OvjT??1^p6i%+f*et&|ASkFS=|3%C9^%YO ztt4Y4T%gV^iN&sw-*BLj)uMpNUd~T5lP9%5S)Dea z9Q+W&_mT}D{(e!@dhZJwIP=6|x4WaS5!>_!4t2III3Ie2ker&z{1VgVe9TF-6M8N; zf9kQXp&bYr7$Qa&>NDaW;h)#7S+=yM@M)JJgib&EsEii1Q))hXXwqV>k4t4qU~~V>qP9qt=^p(68CfEA)+u~>5uou8S@6G8z>(9V9uZ@mr+by zUOcLSU0H}l@4m3wJ-yt;qA$P%5jD(80B`rcXpk{``=K$q`+nh(BJGl6E=w!jhLOUv zG1aqC#*^Hr@IxWsDOp9()03%{(FUg+refUWHi|WkEio@dBjUY`yVbrPnF)xRBj?#J zsDr%4{7|q|^NIT$zVlRlRd|rvCO62jq=y#FKNDqYm^cME9>l~Jf1uB&AvRu8$N_1x z2MDvv%$Rq6N-wJcDV{4(_|otKnv^Tv$}(SCjq6L=A^L{klKf?47B$E=2uy&6;!NvmnP-cS(wa-^W)R2u8)2kY^17ynU*v;U746F3)=3^kIBsD}(4 z(!4lbf9U=A3qYnai8}8Ov4vvQ*(_Gixm29bk%Bo4T4xfWZT!|20{D#Ovj~0i>enA{ zXlPz|FB-N7`SKq2{Q~caoCRl%n9ydzlqC45wEM+$5PJd@AxaxyET(4HU!e8{IN)WJ zf!Z}v&N=a|(HG2PPceTz@Q=W*o#=x!uARsa(`(bPg2|%OA|_M&;mO9Ink|MTUO4?R zyetM@uz-!!POpfuQ7_&ZQLMD^lQz~ntQaoh&DCtp23;|NP-M{4+h`E-#BR2F;q)tT z{o8<^0@XI>>od+w%rRChF_i%EtWnzL3QmpLSzjxdMKA|ACw0^X)|P9rvo>eeiY@3v zX;JU>KF&9i1|-CZS1E9C1USprd&$su9>_wtX)z38gWE*iX$KA5 zx#-}~xXVHFf7pcJpou{T`dKdOVj7LGEZ|^_8fZskZ(`U0@=*TW%@YT-;zl-efCyR> zzv*#)L1Od(ze(#zrw?d@9%uX0SoVEtk6v{vZrFEG~M-cqlmM=<7Go8?Ql z=%aHx7_(q&UAeet!fbEky47L^U2Vux4CjOFd@!OLytIxY7?wTHh_XGM)`Eg*B*K;m z<0ve>IN<)k<1eTVi6ebHXIgY?o~WjyqN$z}SHOXUi1hQq_YSk91`pTGIgu%9;$UK=68nBw-|?P%JNo{spenNx!P z1?~L?ev!EVP9bUC{jCHdv~1?g2Jnz!dZ4ePSmoLjpFd!5L+fsEDtTM9gnvPWZ5xf@ zQdz=|W@`(db#0z=tqm)QYg0Vl03FjOt3H2q+iRnybjx=2olK&(u{u+6B3EOquOF;0 z4J%mB?3_<;YjGK^5LX9?3#I8ZrUSvVtTG`>TdefohWKo2se=rOiTuviic-N3JU^*T zY$}16>b0URq}NK~(rY~e3I%@)AZk+`RyY}O(qv>RK!pXA0Y6p^N@s3G90zpRD-elaQ9w~f2^G-&UWV;iQ=yqV<(*8v^3O4(cyw?JPJEi?xU z7i+vkAu+|V2xwD8i#=7dr@q!^&H;jD!pY1Cwv*JR(GyO6x?^S$%5$GLPw&`t2kGDv zj)n=9JUMQ5`t<2HM$pFZ<5=iMuduGH;3_MONVmN1mp32(*EQt4PxfB&SYgL@OS+Gr zpuKqgr9H-8A@=?J_h+BH>fP?=X2t8F3b+p3a+Wt?gR9!%u-QZhxVyOnygbWobbxoHI4A^g3z_J~DjqC!Q={-pAszbb z3e%ED5aM=sR$hJq{-PK12w2GDOJ~jB-ZPrN+bG>(QRBASzs-%iTXcUfqmpmqnLXgk zUvJR>Zt8^r=1Yw>@B~giqjgLjxEqpjl7BXwv}=u%FyJJqvMGU+@)~lON14!Vd6OIN*=9Lce4$3P3SW3a$s%88+jnGvKHQjh=F_m^&7ggILISoCR$)&1maM(N>AIqfw>OZ5Y(x z@Kn0#V-yTD)|U9Xf?-|C%;{bRPF?O$ITh12AzEyoUai;50sho-Y+|WEUIa7y&tS8y zc>kY4?B%U2Z}E@uyFSVMK2@1OvZ(LRw*bfa;FrX>hm)~fd^S!jGf;}C>kw0Ot(b`< zN`=wgsuZ}Dd<{y(B;J~R)`et5VYPx>&*DHb4wXm8}$_dlXhnxc8};KfH>V%?c5$)AndTYyUTU% z{79vR@@=~|__kM9_-eF$^?xZHy=TdU{0V{SIwLvqN3v72pP4YBGNR)2QE8Pph-Tk$ z{#v{}MxqL1YE+_$b3{Mdh!!w5dPeKIrnd*oE1hC$R}-5=)raCb0=SO2krOdYnGvX0 zxQpSFbwIr|ZGn>-IK|9n)uXy909;^Uv^P$BfxC=abI&J<)IM zj4{1D(#_BJ?%AV{_WRd2ODi8JFMp`)qR~V9Pp-YA*We2~zC4+@Oj)yVKn2fTtkuwY zC~f6;$p1XQ%;ieY%cDjv$~AlyIe54Gp#-xQkrQ>gsN=7XBl)FV3uxl zW3&p}j@4j@Rw6uOb^21%X#bL#&BowVCmV$UZkXO$SHT6(97w{9yXL7RDF@CRNwS!l zb=N1&B_UO<=8}lJ!J=z=Rv%7c!1+1AnGJAGZw+TA&LU=ThFZcIq;RGt!I^54JqTY; zIG@4?l3&ivqhjjMftc6;)KF_1#KhE6bHX|qioSyMt0NiYUotv)94a=+dxkxK~&5W+5 z&Bo{%&}WRk2LNuI!dS%J)*A9KK%T)M&j1k`j41grH3a&D=~6swDSAUCC>WOtya=#+6(*X zWDlIP!g?yPlnl~#+no#g(lV%$mkLeY;a7A&a&cOWXm*8c*>8F-CGxA^M%+52o z$kQku(Tiu&*usU=55V=WG40TEW*6u=Ga8q%vKAvtly->z&$DKi9v2$<2JyQC2Tlek zbl)*raNWS>8f5x`3xn1l#w8x2rnHaXkT+wL8heBqjg)8TBh!j=Wlx=8cKGT3%aGMT z2nA~U0`_znK~^Q!Axc$AunMdmQ&CkVyj5ip>>msO)A^d8tW?9opWTRjc=(+aqT`aD z+Ruaj6&_eLVAx%~-yIa6JX?GJi?6i9jdw0vdUxKY8Q0%N-v5#WdsLVv+}rJrIZGrt z%dyAXr+ogLsk5~2|N8E4+Uq3s@7F%{_xo3M=vhZ$rSn(72lB#R+}u2YB@KO_F!( zks8Jp(jt7Kjp94Y6qmNrY%M3d+|}BFX+7!1l$V@sLwQW+zJT7rxN0%xXkEs$QfM1; zzK{^YRNT1LY<+|ZO5k83beS#JnS8E*Ul018F$G2siJWs3%vQRUx1VDmjQAod&2BBn z!Wb}oMrYSF!_abiO}nI&FVA#Gix)3MU=3mI&;RU zb>~FPx>oxwbS~Z%leS)yrzMg^orm9|G5I3!d8g@EN;9aR2ZGoWQHohuzFpK9{uaQn ze833H2bjr=@~7061t*KKw>P4e0+(RJVlftZyG&_$XWgjOzi$mSYW)_FuQ|Rd8CG&l zLu+>U9CW?6C98i#^^5@C{NrMkXy~|dPM@VlyCv*&YMs?yuj@A)>xkpoZPDgu9n$5X zM(ZQ|+;mH0VOBhYnGGyGE=KU()k=GmLy*lF#r5`8>L1SVqtP=QDbh zx^|1PUOoD#qqAi6uu3FCh7#+M$|+?R+&XjYWbM~~cNkN0!L8G$%xuiel1O2wGHcn= zzxv8kmn};RNr2r3Qt6d9zt_KhqyJ~^?G)GMcl-Tx$DrlKgdb&h?%%52VX^GH&wt)E z?Fi~OI)exJ$;*W>!*iA?JgMiN!jooLN0>0fyq+8ghS)rSKOV_LMSiQfJo8wl?~Lg@ z;whHsGuHj)|9;&x=O=?4r0b=Rq%v}9=Uz9>0k)nebg>?aK4ay=ug$e}Z0$e6+B3Mz z&bs!@bFV$C70&Ab=V=)Uq+|Lrl=_E2Bt&~FKo%vEglEzqGdV}dVA%n%1i|xO(i$2+ z8lRpiWBia< zYxo}AdTpZEU7s^MJZbh_(%3tNBu~Cmc~3OjdhQ*4va>$v*V$^)uebH2-&Pav);C6P z9^1yiJM(i*PxkJ2wuyJNCxYUB2hBKV;$+hxa!`5k~D(GAK!N*o>sp5{p zfzw(_L)BT>pXC?nBpwAHphyezvZ_HB$$IarUSQH((Hc&A!h|sba2n}Fgub^778WIn zTyGe6Lm%yw_DVzD{jq5sE2nkbR!bkXVD+k$be@|!OKultJ}{z* zN*Pnrm_}-shcm`5t)iUdmNZ+ z*&bP7ggsDI->|<|&z)Db3VhsfpjWSb%fHorZM<&9vSrI|ShiHiFoHu=XHOeDc3N^6 zWXhPalSxJ6w%0*fZ{5Ft>kK1EBEkJJJiJq%c#74kW-FUWCOK#Y1f18$FrHZwVeEqBs)+As&2v&oL?tW7fDOA< z{95Q{`VrP!N}p+Mis2)rX~Q!OvVdEKEx#CKVdLS9PC}-dwi>!f{Myhwgj+5i6fQRX z3VLTSN9QqvbKsVo7Cq5O%e?En%=-6IeNty*Mn4@Hh{L}HD%&WST(v4ldS6vr_WknK z4)ckY{&Dh*3a-TjivP`Lf8pndp9me{v)~zXFPaA?{6*}}&w)Q7+){py>6t(L{!&wC z{rkPaC>aZSqV4B=o80U<-zLHa8sT4iV}if;gA2O}zrIAKY4^Oy*4bD1jZ8S}bwqdD z&kOI{?a+hphvW9_(jH{j>8<@v#uDd$aUJ@g`EG}{!XH|R?SyuO+E)M<|2MiGJJ*|X zoD|b925tg%7#Mtt$xSb5ceAG@>7XiRnNG~5rn~`mECKxGNeIxz+Yx^F4caJNV!S z@KaBUzW!KyheSSwS6WL%PV?spzkv(_f6N}iCp1{aQvHpVR>8ir&}U8PZ&+k?MtAwT zMboBVyJ-5fYx(yFPPlf_r2{7}xQ5EMyFs>nN1ywSK3`wGw?nkVVzi~-WQ|6Xo&xV^ zV{#G=YLeujW=t1X?D_Ef8{!T1^go2>a_&w(Mtjx698t`MII`lSGNSZ4CKN~_V+1+_ z=W;guhR;l%c{2>Iliu<=3Ln)k)qEX-*U?+zhn>*_57!xR`l#?Z*axNDuUx!{V?*?5 zw}oPl;e;;G?{yq0vN@?;jWg@g;Wi>Y5scA}7k{+YRb;cU*+eA=RbGllsqNzn0roOW z&{W98iFJW!yd)EUE5WV>F{)%Tl~6{R_r@p_|K4?N8qeovAb&>cBdC?5O-~g+vP2d!>>tCafWmBDb9`*e733G!5(d+ zKgBtR&b(ox@$+yzev2nq8{^ZGhRt#{WWeUln@Qde-RAf4b-Jqas=LVntpb1USzUc) zg|G9H?muY%BKvnQ-|cvvRuvH^oiTU*lO_=2T&eIGH{#DTo#c zorgx8u~?mj?x(mZ#Ty0W;d$c}J?2>P0A{j7_)Mib6V)1$F$v|Xe>(VhkV!X#5?u`}wMj&fMWDlKNt4FW$0M9toXG8z08E$YFkRge zrlf~CfC-Wst94r?7ub)M{}pIbGDznJruBm!_*>h0*2AgAbi_SS3t*9sP*|`^=oJQy zTGm0Ib6+c`w(FQ5q(H^Jp;F*gOf(h5&N7rA5^;I_xiAR*s*QmBQD` z%kZ!qhw+rLqBo;>sN4Wv?EogUT7XZ$iOFhi*g%JB%dJKltJZM!IS>>6ZxGftJG92& z+4+Ow)ErLW^oiCujRQ_&vDPTfHBhQr&EP0^jQ;<{ElySC{x4zh&+*_{nDYMsP~;}~ z004N}V_;-pU;yG^fv#Ec{5D@1bT0F$-`!2o#LZIeq#R8bhm|MT7N%ve~B zWG^K%NU&+q(-0DiX~cyMRSUMBs#F2x};fUpE{U5{cD`@))sutzw@adQ}@ zW(D0!Z=s`5q_Q~(C-c)-|9e(HHN)f zg+_btov{(LSu@e7S=eJ0quSbsG8x1XeSmZ7MY{OWD_&Gd3VW}jj_WJj(HL+aAj9l) zp~%_yY4Ef}#_cSe93~;b$ZIV!Or45PTBU{#(vKR_VAso_qb%e z;XnBl@U#PeEG;}EZ;5{be5Q}nm7KHB!Ea0(y4}raaP`1XGHlC7@&y?(Lcc)BTtSz; zR;qDO%8_T^SA)nhGf<+btgm4q&!Ep!M+V{h1(u}C9RPURV_;y=fx-xeHpT+RKTPwO z?U>`3dzg1Ie_}CViC`&XnZ~k?z{M#eIy2iN}Ix4bKx^CSEt*2Hs`7|M;xHA!8PW{~!fPLp0E!vKV8GE-zlWar2+$f?QQkY|zKreLBFqEMqS zOEE^VLh*!>h*F-?6J;LdDCIuoHOfy^xKu(^%2d9ncB#3ktx|iTuBM)*eoMnlV}quM z<|55^T4q`;S`W1Ov~9F=v{&e`>9pxQ(zVl_r>CPgPw#-fi~c12M+O!KQw%N{${0>C zk}_ImY+?M$q|4OHjK!?aoXNb*{F6nU#VU(ymTZkwc0ji(`yqo8vhg@Q&jv#~)4tP8v==P6bX=oX$COI6FBPIj;i3 zbIzZfKe?=PRdUsHo#Fb=ZH2oB09165ssI210002$07d`>0000000IC300ICO00031 z0cHRI004N}U6Ic(13?tWzf(m-iHZ;s%*lyu{U`~E5J4oZLkSlSySk;(?aFqRIQ4H3 z{}g9&7Jq}Ii|@=1w%y5l^Lan!&6~FXro;>c^q1BdvnSkVDB&z-|9F7=9HSyTL~9%K zmIbpeEoT^UE%&83%k|6sScGqRfG3_T4`K(;mWPnTo8@6l;KOnb3;41;f(tQic@(E2 zwmgQMc(t4-zgr&1jQF(tf=Tgh`4z=XzEcgoP=&JM*BWsktNwM|bR&80d-brTIygXx z78E3Ee2Ah*T+w%jM?`4g8aF0-NMA^+$tOyzponE^2OT44^hRhiUssciIh3NwTBB_J zompjem02D|Q6$f_sr9{zHC*=LBj6t4mc9`6p4*)+t2?4Q53t9cBU4vVyQZV$PTi%V zUw2t%?24WiZJ)PF#N?XE(Y@T5m+o>BF7pzr`^+Ley)V}@DVmunp4>!%>%lCCb^dEP z%#YZ2ft{yow&^M3h;P`(F;1wLY|Zr4DyHKGI}UoLnU0*3JjfapyW0RuB#48RHw@6X0>`RGo%)9HxfZ+_QO z#sBgNBub)*#zG8MY{U|Wop=&RB#C5FNF|MQGRP#0Y;wpYk9-O!q=;hL(Vh-;q!XR# zLRY%cogVb07rp62Uo>&aK$t-3whq=sSK9yqO2raa7oqc>@6T8^XevWZeV%WwzHnLZ& zVq>SovV~i`mpBe^oS*#Q7l%2?eeQCK1uSHDisl+~B6CLq!x>q+C;p8)od}cSUkq9NxU2?DgS-!S(I-nriP`JZ!&U$3&eQJ;yCuHgs731V-b_@E45>V_jtT=fa zCFL1x8>Mw*IDoxmlmTbXaDnoeQ32c$qY6}bN>Pm;@Q~IP1)ZDg6X=89P#A#zL0AWa znXm!Yr@|)KxC=wDsf8^tdYJxd_4KLJJzX_b(=(5o zg19&U2=FuPy8$Tw*#+2+1=63VK=005--k4E+fhJ-idH44fM%s<@tkFW9r{Z`8l zTq7F;djJ6HCqGd55BBr^2vZxmIuil_(8WI*L=$BL=auPy`#xbtli5` zT;~tZSb00h>t})|fy!*P5zOlak zE{Gu-IHJCxzNx;kJM{3Je|*Fq*N_Zh78vai0HBNrI`X4;UoT1Zutrlw9qr3ll^`hb zk*QFCwk#yh4Z+wSIh-t?MvyY6s0l$uoZ)1mcKpI9zJ%k#bf zQC`x~Xr7YYdtaH`-g4U%5No)ZV3OhQ>vbf?ITVxD-XZ3L#GkjGSw>d1tg-CI+Fq=D zF!ic)uj--G&3R5Ec$WD@`qKH*6gc?Ck`iC2XF#V@4>Td6*4iCAOM5u5jYZ5It%@q7 zRto^HN(-f|VO9s>JJOOx$}zRGsK)!_zA@IP{jQj{cHJ3*uFzzD&$j8iXM-nAitVci z{nRXXZuZF*?hy7@H$}at*1zONniDxT)CltuoOVn@9A#JKAAZ>4t}`ddY;YaRsBk+Z zeW;8+m-Zy))UIPYuv))#J9KU6bw8Uuo>Mo>>d>=3nv?2KO!r1iUyD71C_h^?s#c^kwH5`GsOupnTdH zoK`{06LwX06m=S0qRA*-@C|yVM(jqcMjt9omRFi^7QrxIZ_tuE?5?7YjKRNNQ z-~Z0>KdVn2_n#2FS7VvlkS7DbcdP9AY5Pw|yV(C8it$?|8Q3sa%Z0W~)YD^e z3@Q1h4IJ5nRoi=-G^1?j*du&EyBY-hBLRy;ViM^G11|pG=a0vl?J-t~!^04gM-tJ5 zTpL)m$72uoaGDlY2bEQdx(R2sr;90$fv6+s3^WfW`ZMuYMPlcANv_NNcd~t6#NrY= zy9|I^dhPVb)0mbgC@e6(w2%3N>5Ub}fa07~dhQ^%=sfdBmal-~F*$dI#d!bTQ;6Z5 ztg%v7wUV62wXh$*@Y%^TL?i3d*g8#qdBXVZH~UoUIysDW1Kk3XHr&n`CqvfqLsv|9 z@~6R(YropMZPxI;+6tFR*2Iy|w?Q$1KDaG^;b&WsCVtTKzgv=%>L8xXFTceP)6nI= zUw#_jo07GG$E_gQBzStSHXbtK9J*AwN(B^q_el54XMWlRdh~;Knk|DGDbbl=ihbSLUyJY}2P zt&DYK3#KBjs%?|=91*Poxp40?0UuG(5G z{~Ejtn`ACUwCNrU(Ox?}XRSHD($w4>!a3_VY_R_#Q6` z+5wVH!Xulh4 zHbQW@sU33ahsS@;7LtHwi+#VI-{MMyt=Y;z#3ehCXh}fm>(sVzgR?H{inE$KN#*4) zHAldYQ_BYC*ub!C{-O|-)V`YZmmfv6bbO*fS z)B6u&p2RxA6Oy!?B(xqREHF7Vz*`5tvl0}w95p+?N>Fq4ocoGoAOah# zQ^_N`02&(t+J)p)9*#NSBqAT8WnA{d57<~Sf;VVx?wrdz3$8E|q_INg_>NcM{VT3uB^$K2d33y~s+thkNqXlTUX==`xgG?gOVH#BA(;d(r>(Ekj2AdLw;D zhkGc4-6<*B38Q#JeV|B|kMB7^qJ$~Sv2iH{v%SB%{xoo^31}oEGo+9OdZ-z!EG)ku zI!O=(z{wPP^S>Tw>O1uk5|9!Q;(}hq1OigT8*JJ;9zQzl4~x&p_=%wZD;%5|P0;)K zk~slPtfj~uZb zB9`xv|68g)L9}xMo3ghT1Z|9Y&TEM9bx*EGHyga=v zzAnEnkPwjpk|L7>loXW(mIjvxm>8J}nkt(MoE)7Eo(`W6hzN-hiZY86j0}wxjuwv> zgam~d#%~Tc1O)~=0zH9m|Ni{l_&@bG;p63}>1*vT!J?t9PqSAOEXQ=Z&BlKhj%a?@ zX*RijcEaIs*lji1j6LynJzp77N=wajYchW-CSz-n7jS+#Z_#S2+gY2izkwV&fI!IOGb``*2rDQl>u9U;8yxPfSi~%y&fv+~ zJ8cloFQp~~2e>CjxBvj4++g*9A;2A=5zq?g0gM5r0CRw4Ksw+W2nKKjIQ{8O*MNJ# z7hnfa3&;l)0;&K9fGt2d;1bXWNCLzI+5s@2^d}@yv)V3Cn^_kh zmbjtLh(M3C9Atv7v1U4_5Z(fp_Y4^si_=T@=>w%ET`&Q66U*uSdd;PSOt}> zY(O)=mWY<`9Z+TBr%D^x)6*NjNKpntCY)83Tc%&u2Xq>nPW6MRX96JBhSWS@@!=83 zQ5V`sCu!l3o(!voA(4`Bp}CGWC%%dWUnP-8yQQRg4nkYNM-x{2^Vt=ezIn6cZ0zs` zUo>zB-1@L&xL?LY*D;X{c?_xip+tqnRbNA~=Xz+t50&2M1e5)bZsYuNAe*n&M0wL% zT;Q41AU%CIHt(h)yVN>nqlDe;DlN+wsAk@EXM4v$a`9n?gp3BV=H9=g1KxOPYgx_V zK}9?{JiiND&cmy^G58xP7W*1Bf6kAe8!^!LHvqwXeW?dRUq$sv;Ook2(aV&aFGDs_ zD$kfaM2fIL2BHF5GQ|)`v7g>>jNS?et*cgNCo7`798+|pwX3tL+R)BiRLo>vZ2Z7& zw#SY<3DNk|_w%c7r!PH?!;bv^itl;TiNB0L_9WC1*RPv31cejicM&rErhqM?RbNzj zaj%`P=mJFv*Ba7}vkD!PoauaA5A3kG~|A`WC& z5m|`uwIe040BA_3}5Q_zb5WaS`8U_i}e3jb$hWKz=ZQ9+2jtB8* zb|^p2kG*WS!379xv^ruo9eE2SFf-Nj8dpa1H~{CCXTqiB{B{3(#;l550*!9jC2c)UVu?j7IX?AQ z-$Ph_89{r$$%TBAzptTjYoxYo@Y2?^RBdRZfH{rF0O@`z>XT{YNSbpdq{RK)bu1?L zpE~Vm#b09Y;HDQ~dpvWb6rXuXBV{9t@=(^6M0RAhLllwv23B=a9OFC!-L;4y&@9XG z?yU?Mvzgw0i@bX0MIR5Y>u;|1f|t>YyS_P0KAUqY*Rhurg`JG7?=vGX9BA7e$=nWu zk;II#Y+U@?w}ZG3x`WR+N8feNz2!1l0apj(X_~l}zWwda<##BE&DX~P9i`j{WU~Y9 z`oU#wm}feSI?&7tmzCLeNEIcgnw-neNE$Kq1z-E~mOsjMElHH0L+1dWIo_AWKCCjw z@rufgJ_V}NwWUs*xs+s8mK}_S)J)y)1QNesTlE$_JZms7*_p@A&B{e2kvEW;BBZ>a zHL3dYC6SL|5sc!PdNe}5ZM*Z^?bX=qG6p>tp;VIR3bqqHOq5yvvwoAmPxe*^Dp^DL zS37o@DZzMLSu3Xm8ZPtqPn};gNjEbI4O|1#h4_1Iiz_~P(hqyPwLl55wC1ErFqt1-8>dG%=X-c zd-JB6+Y2er!Tbn7%<+LTQU%%uC4SvYl1oN>%yqHCp|R;8;IrTA(Oo@@iv7;-T<#s| zb6<#AZ)>2sY|`}(Thke%GuNKKe%~z0++jTiPiqdA8*N=#IXk>_BH*>1r9;`zYSV7K z3EdlN-T<%WdpnuP=AR;M?P+880neXhm#v>Nx`i0WZ{g6&iA;f=lL%2dvQSM#+54AlZB>l-k z&yn)?isfgp(ktc~Rd(6L3>z9^1_o^Rh8<+eomGg?VYQjbPRh3}sv%4oY_i=uj=&!& z+dZGLn>5-5&6Y6cX04XnDYc{u@9?2(9`~q~xVVYMoA+RG4sS97%e(i@o0M##=9{}u zIpMw}47gMdYdbV>i>kb&mNN7N8)HrI44GW+S77Fj3%N_)GFQjdStBUC9h(pLJ)-z! zSNh=q%_G5VuEEIq9v$Ncdti+4z$v`6Xwzh|@h@M*0W#kSqx$&xlh%qpRzB7Y(9QL( zldT5(_l&NQM2O#s7VBG61pMOfub@i#F7%}Eo^`LAdBgSXNX;85?ckO3z=OH!+^V{f zqWic)5tA9RDUn>r6u7;_z}?6h>nN>Ng-t95$Y4e#maeYU$Mu$uuH?(Q(#YvKP}{Wr zUu;|`iBbr2Mo__&={n-3*snKp<{s%TyYp4$S0aK0 zoR%d1g;5kVbHB7FG|D2KevuFhUQw)Y5b#DP^ijc^l0b;ZkAE=2c`0^beb~*9aF=M( zL0H`pdXwK^t|tBq{*XB@BB%+zPeoKOdhoS}_r69NWhvz=u z`RNvD<8f-#*G}z=9qnLtr>M?S*JA@Gq078JSlmyvKYus4YJH^H$IpI0Z3_M#J5XU3 zEbaPL>ZDKz`($iTq#6VWX??#)MuIli$LGepH#VA#ja246>nSGn&-MbhRQp%$*umlGj2l&)6@^3a z(x7f91Ug>piAF=AU4a6Z3C}F)tgaNDMB@AoxCn>Fgd|m3%W7g)HBZ^qf3@*kNCv zhgR?N)q6{q$5(pbyNAVc&jGr2{S)u2>Y(5KLkPi#hL@x-&?p=9N+;;3>d4IAJ{l)I zR49q+u01I*AtgcBX^>T~clYrWyJ{;>HQFoWHf%riGAb+(N+la1w>zej_V{c8Xu1!#+9ah<&?|A z=0w_>+nsOX{QPYk@Cg}3u6uKp_F`QT>(a?1atXKDo9BiI&H%zw4~!zJXdHoCCGBau60#1xmf^sjw!82q zGT+**!zkOZjtIvCsK0$Cw-rRC2uktSCY#~|5jY@wD!Am64p@;a)@8x&; z6|jS_w2eA`h}IYe+o9`n>Is%jS)p0hT8A5-KaGX=sw08z~C}ELjONe@pS6y1KsSShaYrnXY?87WKV{Eo#`XZ;@{`_Ne^?S5Wb?NcGd$I&X*S)z|{e3l7LV&c2 z>|kN>s#AYPC+NK^II+)a6+`8w`{|lNV9D15!hK&jU&{Y#3egPrrD!HG zXPQuTrI*)_Q0&0PCz4G=mSe9!2ZQH~8SjsZ7-162)7af_%d`b&#}#pW=#=yPen-o5 zlBNTNC}!Gw_wrc+d)?5zAup2Q(Qt>nSsC~9;#8aEKJUk?>+}ndyBsbwHJW)|N4q=#uDrUQ>gpqXB>sVmmvM*P@rur*u1Fgv_K}LCa!G^-@`ApW% z&wQIrAU_raf)h7~V zOW3H_h+yGB1Dd#k)j3-EL{U?U6E>4veXXB-PGoa-X(&Q5@cl|y?>kXW<{p1r>V32v z_$20V84WOTym_;n$o5&(*c~R%S1~h-6YyPWJ3fXes4Q>#E$rxSe|=yfs4Vd{?Dzb3 z>+ZRWhLPvK{Fr*#M@h?JbDNKBDzEN@K2|QiKhn%$nCErbvzFFpPyLX^m>yk-Vwif` z9(!5XRf*(QVYo37hv=5XgcBo71a^-e@_5dq48VL29!TGVLp_u37^7)rHzcoylXy)A z)T~SL7pTDp`u0{dX#Gxms*kC~{!{OLSP|h}aT_^5$NyVXcrwrDAg6M)zV$VUh~tCJ zX>ilwB+cYVUWeI2=d|MAbdADsODpme$4?c-Bf@$^20^2*PBOiao*VS<)(-29FwG2 z0Aq`M4(H7xc)BIyi0a@xs@5&U4sf#@;wXs&`4us}Eul(TIWufOC;YYqid`IqCgug8` z>SVX`L}FPx-$^$Zo4ImsDPw#V$SQym3^my}pygM&ve!#ojf&=xe{*9%^Q2o~bEB)I zbSz6kmJvwg2@i_Lt6DvV;Mux8hPn+`hsSG$PJhMU4nkVlT1+uk+{ozUdjIdBn&Jwa z+&$=Ki134nvOI=aA7dA*n2WHFiW6C_^GVM0wY`(B-pF7-=qRK84OeT;`}OSc=!(^B z7OTypBn-NAkLfSDzaGfYsfA+X zP^CY7T7iK}I(^agmoLmggJ~Hvpjm}J(8)c)+yWo4kS~CLV4q74_ZGxDK6@sR-$6zN zx1h&lpIva;XZ_E_Sg*p}C@<4E^L|Ul`oTK;SK>b)m$6kODfKMHGd*{&Xm3&cL%B?itoPcd z$>au!xSd5I&V@Bpw60t&#^a{B^&hoU-6=RLPUh| z{)m=Hym>b7R7)jPq*2QD(b39p&d> zVuCJ?Xq1712H@L#ge;7?O*$@X+$o5WIDbVHxwZfd*B#NefLig2N}wfSRgn}F(S81r z3t3dps$YD&itd3V+k=9tmLto*;PLp%&^@uVq1ty#l#bMlS!E;%_n$pj>``7$p-yKI z>Ftd~e9Bod%?r7k3Yb9K{ScuNfA8?yRQ~NU-HSZuRc?72IoUzEj=1n9*E>VLtl95j zSvAF!bU6Hnm)NL3eLmTVvXXWe1SgI^$?;#{I0iQ1;JubN66-1q(L93_0%4Z67XwbF?l z;{ERwxUSRe`i;D=|I^IuTrFqrO_CHi6)DKL>J$MTUD{ov0v5N2nfCi}`4^0M4WZTQ z6b}M9y|*#qB+_A#9%bp&zU|SzT27R;6#b;|xp5K0F9!+6@d8W|%UJn{8n0RDJ2~_t z92`bj^rI}f8igwh17k;!3~fPjlEjJ4fLV+rtYr+`#d>?-OKfsgPQ%8IMo-Mo=!6eJ zqFk6@%t%3aL+vKpYq_cLo=&kYz4ymQor`ZJO&gosj~kuD#@oiH)&c@co@N^c@3((r zpEzpryg_GJ+0VM^Z6Q2DZ5?tGR3OQgX=|-1pq)0#z9l~u$;+!3qp%MIi?u3xcBb1G zg(C;mOCbQJfvi!VK#I^;LOV_64kbc5*=G+4V5KJ2YzEZm-h?f^A@se;n|mrmnZ&xn z%#u%UqB(h4md(huI6T)nPh{Dpq48xQ@jYSb0!S{b)H%x^3u-OK7KC!5NU$4QF5ZfL z+~ePXbr1Fn4t1q%^V_q;#W&HFO}z3cLB$i0qYKv*p-EKkRBX$5U|P}AAuH$qR;Uw8 z-7a40nLH&Tl*o^ju9!W^8oC-0G5!KNJtAcOkfc5R|#`zXVh*RlZkABdSJ|N1u!Mc1I0R}d@92Fp4DG|6aK!&^Kp5! zIdqVDtKtkAkBajEt>r%#W;JlF=9)~ew}(jUR~W9k++hTgmO`e_0^f)4j(5gM7Jqk( zdcD=(FLr&S!t$0dNk?$Hdb7RHg3JUMDLZBtRFe^|jW2LH@@bN5T(Z-QIqf)L+BYLH zRcC;QV%0iN(FrTS+7WVSXpt>~m0_i^JA)EibG7F$aa#b<%{SBdC^7o!0jio)S5f+@b*f&M`{Z}i2)p{bNB!+H#6BZ! z_}B^uC0rmVxaSzHD-Z}Evacj$fUJ?pjCNhu!^%N!yQ%^(=7O)E9H!daqZYS})jk|? zygLM{-P@{&y*tK>+u?e7n>ZR-W@~wU2id;*F2A%=o<>$;M@S|-sjw|!SZg)NHJ`41t{0U@vp2?# z74WKGIWha-dZ1;W)*jG)dr(0u*DJsEw;T9Vy>UKJbvK_39tsl|(B5PO>x+~nqQyCI zmKv}-XF;lEm6Zmw++tawGu4{kf4p_Oh3ac`A;(Y$=qRw=_?Zt66w_RUgk>k2qws*+k?)-ctl$H zKf-Lf2XxGSv=)E(H~IW(HeKFxo~Q1QK}N2~_pJeu0yj;;pK(zKomVT_+6-Jyy}fGS zGKnXo(*5|vN5A&wgZ$BpVVHN%9;h=X1j(@$q54IUXL=eTA6_BJ*$H043_;0*tmM?j z?FNs*t>k@COzX9rKe>gRufKEua+%?8lA#4|-`W#hVbj}=C79v3%qUwI!^4mC;DKC3 z&PmC2en55_qdActy-==u8FU6t*c%vQ8cqFX3{FJ2$b*0!i4M&R86}=hjqd*VMU4(b z4M86aA0xs_Ov&UQgGCFM5_+E1St4Qtq6_5XsNoYW`|+A}Ay4@9W_p=>6nDw2522-XIlO9B)J=f+Z8g0u`rWFtwOi)-)obs5c$pQ$4Q_KU$NtCWyzIoleKF{MK1pySU z28WZQF}EhMp*5|=_I;^QRF2Ec`p-#%>{<#2&teR{>SF7HgI5B7hRF`_D;b|MvlI$9 zPY`=?4X~qV=EG?5NvtEm5l0H}RFyo4wuF7bEDFf9AOkYQ67eFM#PY`@Y^;0MM|NDQ z`(&eSu*7$2@hd=l7vlJ(uw$2yU@{Mi$q`(?CYn2Hd+?MFroJ=9)nH>}sBP1N#7vQO zeHN||=Txx=gb6MS6HR%P` z_vs9DCWSYy-_9>F9cVDkcxKxJc)SCIQ)x)M+Iw)ermiC&Y|UwmW+@B{z_=a8zJfO73SWzvPH7`0bM| z+S-p7H-EK0pTM_)O085~=lRe!K=YU=EqgvK(Oib^j~{$zbA_u*H|N;(dJQSvE?6I#Y@tX`A;KPE`gneuDvCuH`zu z(Hi6Zzs~LlK^(42o%FXi4WLIg1iV9i z0=)papU3EwFngHa<7Q+($*p=_&m^@_@#v#}6BpWiD~LJ+(20!u#GT0em5ae63#46Q zvZ4`X=8LoZiw30q1>KtncP$X0`V|TB`6b>dCOk7~$vbqqq>`S_VAfkcBtCY!T<`ws zu<3|-*I$T?e&-GO$QN8R=JU*or->WO7;VO$vJ1hA>kH0qOKn{~*+TTE^2sQ@EH4f0 zR^k6H8rx=K17|dF@|LHKn=qzfa--9F`Gf+zQlNazgBY;|xb?R?3bxUf9Y`N%Vfk^oqvW z%r|iWbK{KY7}JQ>P$JK;A>{fer2viWzS!`KyNMFbu%~}ccg-{3wE?d=lYJDh1s7`R z)3;14znjx8uZwB0r9$XW8JQr<65p@;UY3bOU3~`n#=e`XvNzR)s9Rnc)2a~Qk*!%9 zGvRi^y!)}Z&!jx>5NTiTlngx2?&ky$j9zrTQw1bEvA#{_UgIBsrb$N)TpQNrf4T9<@X?ncQLyE7y-c2!##qO-e=WFgVXYR$b)$muK z#hcr=j*i5Dq^4(Zm}6w^cxqYy-6y|Aii_s)A*~i-+RK$@Png(~ZDMqeO-d=J0;;Fl z(ZwKFWRhA1?riuy7XrEqa%};BV%Yjq0EiyPf;&rUea~L#5l++{c!2ung473Ylt7G6 z6<4!C)3!ktPI|ItHn%uY`i?}Yowwsc7^WdLRZgGha&k- zLOo}mD799)ie#29T}w;f!{RB5xm#BkIXo{oD%*3v)yM4&k}$Sh*--=&W_#kBH~HO1 zN84iqL-b;a>B2CrWtkwBHfx%NSKkwJ5)7-W@!K?jN$EoqVY@|tR^n@8Q#5Xto{!GKoaH?eA&93PDe zmC37S)mxvdkG*-{yquFfCd1Ax{NMV2siwO*QsEIU+pnQV@YKhYBH&!+7bQ(`W?koD z6|WgAV@osqHZ5M?35|E|siVIZ0;KJz4ya|hCm?E>JD#MLGQ$l9aza~O~ILv2J`{2(%=E2BlH3vpoPg;aZK5{ zEE$r{ND$c_;bT;=3u5eubD&H?YWpLF%zm;jLFL3L^zni$TkqJLjZy?+Q@_;~VT&yb zc0PsN&l5*iprcw5^q9 zxk>6t9QD7hbXOT@`H{A%RX|-kl?Tjztn|CaVhXI4cS>c_-U;FRkX7P`wX&6eKP}94 z;x2xrCUX0qm3K(Lf*tWy?cMUA;P>I^zTHsTS00w8R#A5F_1ex~pFFhJis)PGJ2Mf` zpx4wtxbt~U2{CHfiM_w=KmH}-!KcN=#@?-*#b;j3_>{h-xg0FHtYR0iVB@2iWFfDy z6tHJs4iQ^7cfhT-A^JVmQ+Bcws()>4;3(zFdNpFPipi7W%}74O_!si{tbndgTzM}w zZp~OHJ)#U;|9iZZL!`;z`_Q>Og_R+#1VWYBtZd^b0Hs|i;P?Iwv&~L_nf;E+ zZZA_m`XrQ!%1Q~EM*6Lx>s%E#a!WWYqiMj33Nw{N$q^zENDESHSZ4ZM&(fmTJ1U-* z*WL};s+s0{4gT#~1D$-snckd2XLI2l!*}^S|Eu#Fh}Y%zpSu+?G$-%Huxxu9!LG~K zz}?J6twOjq23;0!+$}Yu(44mKp}F1)bX(kcUSe)9b)47kg4YnQbnQ`G9h~u6+AAQu z_w~m9)B@}|D_O3;jrs&8kKyC(zg~gX>N9l2Es3e07-J-zAr$8%g`^gM{N!~0zy_)# z3!`&CvD{*Y87YkMlfM`^5IGYXSCBv1tmqs!fWb-3>yWY1xCbv=y z@4wPYh_1VbZXrI`&3)mc=LS65@B1BzSuG_3i`G*{=_}2eEcey@fKp1v_9i2Nn^ZR?W>&0ugDB3T&)HP(VyXnH=xCLlH|76rQzeid1;H z&TN;O?Xhd<*i~%eR&mG|vs_txJ3Eegg3AQo&g;Vr+|&S>mx|*5{T48o%<}9ra|P0z z6z1<;O%#YvbB79MWnK+AF7aGt7*i;rF%j^Hun4r&53tO(L6FFSm97u)MZr2xQR1gr&J2iE_KP`=qV7wNYVs#*{0UsASBEwKgnd79h!*c!=v2Glvo zMf&IE)RSv$4t0*`H<`_2x;aX{Ar?p^XE9uH3HX2s+L{hRqh9gF=$6#79h)BPt4WYg zg&9+Lh60o) z%-nL+d<1WJ?Yvb+N6_WA*Is4%$a_y=J6=f)_=NwEG&xb}?`6*i8f^flI8YCa^@s}L z-&b$0&Byh{cq?CwN!hBfHLAw&P2&QmTC5&if}TU0l)Q~?LyuvHIa&|99-62__cK3A zyl4V-GBpRIl-<7EQmAClcJ;a^v`cHvvF3byN*xf%ijAG|a0zOwaZsAtT(v9hsCE)1 zh$d)w7vS1D6hW}CgaT?WG9n?7-s7Pq-D306@Z)op4@H^x(sI786`>W_TAu=VWGMo7 zjYW-dW|v*!+f5#Cn74Q=RP)@Co_Pe+c>00^T>ff^CRihx(q}2t(8nJ}x@CC^BPb(C zL9e-YHM~aN#wpl>DqnAy#rBpP&ZA+m#B%RV^3JCqao2noI@-iG9|V5|;d8xKMl^Lp zG_M@j((~*7_WRbnJ~S{gEQ7T(d2>eo;~X!)sM8sS+O*&@QnDeE-$ZLrpQNOg5T+@X znrjRO?7N(%nq1sE5<$h9--W8Ihn_H!QLhQ@KlO#~!V;snZXn}P=yhKaWE!b27c2cwYFB!?6Q)J$4>si>wePc!*ASTz^I)r@jMg!z9dcG3 z9>)mbCF(neYlnj@$bh&P(CE@=jqGUH;jBA*Y?5izNfp?R#o(z>XAo%qZm7agA`K+k44>D?w-Ba`2n5++0D-};{L9ey zI^;(2wk3EhwC6*p136?lB0fAIE2844+c-t}#fqXw3vLg^=Vyyh<#^9DdZJy)qcumc z;DtV}%NJ@SITFEF6}igx&sGwZVm3a4t%$oRZ%(N*|27(k=mJY04mqf`uIHk_+pm{o zgK$hI7i!+3h3-Qo(km9?lUdgq9Pb@5X?Fa8HPUv$A|=wuWKpm)7zmW={tskot=@+8 zQ>_W=Insdipi!j;S*aPm+Lie5GY42!LtgQdGwMM5&`*1 zBxqTQ)vWAd!Tw{{P>2eGkWeFssSM#p86Rj4R%O0LpYEj}Bm{?_>u|xeXXpRk;;Tv5 z(cJUOTITol+vCz`Ah-{nH;N)5XlZ5bpF@y1ohnZa(1gg-45Lb_3)s7jfQW!fHRG_& zYns{d@;0u?iI-6_Xo($Hp==^#oWQM`^2qi#-#QHCRx`$sznVV9rmh+XdJqC}cVM1d z?H?_;x|XWQTq(|h)6QHH3T2Lf=%|osa$wjK5aC2#_sZh?(pQ;90yzl!Y3yLO2EKJX zZ&PdIi^mFM7gk`3^xId(T4O%Aj^1OXo8+MT(%&0A&j9?46C&b_Nn}b27 z*}wb?*SHB^y}$LujkVYniyYbW;RP{uS4zBuuVc_q4P0+|Mk{qTXDiN15hPaCl2Qg5 z2!&~8gu5YpWH3lsR&2C8pQs(&oXIlzcDUyYxk@UHvvx&)tE52ivo^6UuwuNX?(jT< zs;kmlzE1-V-g z(I^5?p$)v<#S?Po$-a=#Zi-?{j`pm#tt;Fe`vhSz2vpRZ%%$MWpBVP1k~hD=pP*P8 z-h;ea2tH%UaOE5&J@dQcq{ zj=G)RmT%!yZFN_On`QL3*2#~^-DXLQY4RVHfazZtFS}Kw@*|ktM~$?H$%bkWlz#g# z*p1-j-G3DB#t$=Ek3}_da2GiHZZpKyU~k{_c%{-g4*1&8FhQ#Ty|y&sHKqpbV4}~ye`NHUW6s~ z5P?n)$8ETyLz#16a%I5{Q>gD@4*fpTp00pWf>}C~rs&aSqBa;T%Y-PN1FgxM9+FJI z316V2VL;J6-bXqpJf7|@24%@$X-brgun;K2VGpI~mu0L=AO_Zyi15PmIc#=>2cjfV z>_-$-5bOVg%B&i5Yy3gwY*L;GIcL3zy3gZdGzO=4hLGSvbgA3skp+=ONVA&-4k~Ca%qWF{+RT`s>kN91Zi#( z1uR26H7*06_oICLMhh{Y-`x)h{u>85f9$S67Ag4Htk&u1IC^zmcP>|v{G`r$+G>`2 z`9Cw6CbN@rv7BaH(m?pH-452Z#`u`*3=9u5TdFkQ5Nvyr1>Q^YBARbU{45~SX|q7q zOb};GxD)n{nq9T{gKCr<=E=b$DHiCZ9|B{Z+LUmS_@<;D1M6?N(MZtXUE^sCmd(p< zk(VJI;Y|DqO6Mxno8%6PZPNGWQ4!VYrIrx$zXRo*gpJ$vHc*N0%!VD~WNpdOWBwsn z%)L$^a)!Ev^=))|(|l{L&IIdiE<>K|_&_jUnO#!!s$I{t=3dN4!&SNVR8;CPRINXj z7U!f%@z`mmvD zEJa>5b-)0bsLB8+35rA|A7N_Jy~ImFZ_4@&lY-s3#iO;4F!ivC5?Jx zWKa4Z8eh$PXg^SG8c3d|-+8U}Ig1`2J$5ugb5cn~1JQ;`AA4E^Dz|#?$2qNs3a)d} z5^Vx%$qoy}<_Tpg?+-=4n}ZI?=N=%awH(jm8o5t)RM{ChxZCq&={4;?(j$^GA#6RZ zpHk2gXpWohK0i#(ikG9r=Wtg!U(UvkbrjJxSWYtJ>>&Rq0x|V2Pa?_OQ zEmoh+KZWjDQYd~Qe5><9v5r6_U||#HcIZV>J*te+-HvtHq%iPxU`L=cAb#-%{D&DU z7arUtJtVyd7}C!*U|k^E3hfRn=M*bxsc^fOp0`IUO@a-6S-Da0PGuA=%gP#4WqWQh zeIu8?K~rc0+uI9R4>vt8wykpSlT2>1wq<1nv=vz*Gb>caDzlSktVS>Z0&X`4Ck8USu0dP zRh5<6!7p}b7p(W>zPbLIwK@D+$B;=ASGYYH!>5j)TJX_wd{+}RQ|IW7y3-sP% z*Kv9S8!HV(7Q?Uduu>Ln7btnDeO`t3dApAxscLn~uO*k`>rt!n%|RcH$3~(!84P=4|kR zzx(62o^7_3)HjVa-OfAPFFk!#cAG^K9~Ff1(~erSXp6a`p?qnU_srIr!upk`MF$UP zsIke5&s}?w_Hx7IDsOF8c))ol)l|fSCk_U>%;we!?ZQUjUI$jy69B!QVDk5DB9Q5K zYk#@^?scg%K2NrhOSNU%spQh+8OZhf$mzmAgc8mOx{Id#p-p1KE&@7uQq7p$Z=ls} z$(jo>rgzPVq7(bRBEjk;p86N-`LiD$J4-u2MDD-Yi%R4!f<5)HzNaEw3t*7L?y9;d z-8^+xZUDSfM|V|S#8jzfmPI1%SuOP4Ecz~X2UIe*w@294()$Fq#eQ~lHw087PVD^G zR=f9Ida;Zq_^TC#H1WUf@B8a02rP3Zipzq)JRv-8YCG|l>GXPqGf`Pl4cDviD8aWS z>LM-hI*P|Qyq5+It738Z^Rx@aF>Yh9I8;?#QJD&ITeILwSygpiv?X;l&2)>_(Hh{x zZ|xisNOl-O^9FO+X}=7>JDy~Fq>FHa`-qWl@K(CPH{k~VvU`JHxM2-3t+_tKJYqn7 ziNypOj9(|23`T2Y__YnASGUeGS^FQm@<{uNnD)Ja-CAa;Eq&Mf}nEXjTQ?NqM&1VtbqFPJQ5 zjT*ez!vHOMlQo&C(JtFL6L0{64;gqmA($K<1RwCF$!>$G{mXVZ@%bKKeB9~Y%!d}v zIqNoSeQo7NZb(DSFm|g0 zdypCdA)6wRxF{jNigptqrbt|(HLsEZUKu0QBZTcd0EpO(m{(}`vXGTGP5!y8))tat3Bp5?oBMQmz)_;IlE7V?Y z`$yZ0FY;Trk=5GjZQ4q5CGP3__*;ch(l#y|sEd0#6NE76NT>Bb>?!8y(NnCuv76^w8%T z3frCh@IG;9Z;H;OZ%JibCNgN2Wk#p*6k9N6O7x8WQqI<@Xon4xTo0S^0+D?|xv4tQb;1DStYK z&$`aIbKbxKf_?wXBWl7qxmqjG^-;{hp*`A4SY3$Tq|%bA1;J`x3Cwr2-dPKl010}6 zMf{_cr0@ffZy)YnJnHq=gbzOW_pcLpBQetTdWN4!-Vp}FdWD$o6FZO4L=hbw&(wZX z%TN5_2exK!Yr{w_e1nsVq5r6zQ0Ap}6ACgHU3;6Iv%=cDu_hUnv&l?Bia|G_geHdD zo1%<4Lsg#W7_+HyRP$rLrU4G0vQ`^$VRcnFAYIvLnK)s|GUmG?2L5IqahHTSqayXs z!N8-e;|ps@DKUIt=$Y?^Ei5G3!a@q{ldxchQ#mQEttn0Wph*WPY(2rI5`h?2ugE9$ zcf&o;zsY~#ZIGYe0Ped|?+LkrjeyD;OIUSTdv=6OWmYp^jvdflPFhqU!>M>XRAX*; zM@V~yw^v8V^eag>;%{`>{l;JT)=gL5nCCBibKR9UzT$2kJM=`C-_<;3_z6ytUvXP} z^V$`+E6=T+KJSD{D~_3e;?c}c5CF_}Az(Hmc2{j8Lev8Q3ILma5+IR3-TfR-1nvHg z?$dSTT5_FswrtbR?mols6S=R!CLaqJN#IOe21w+6T-<{@+Z~M?kzN@d>@JCxImkoR z?gwq-%b-HhO0|t@Fw*vuvX!-;%)tbH?Dmbzn%jF>#Ttw~h^h&f)tD9r_S7i08pY#O z-SR%HLQ*qj_=MY;u)A%r-(7fww-Xs2^ym0J)SnXGR!TN3Gz6PM{-`Hp_?q=VW(;<#jr{AG zg$wf*<~@`5XMRhAWO>5-KuO(rtb7bgTv|A?Ze5HX@%$d@nka+Qweyf(vGe8{rM^aKcB(OCw|b}A zhU^w|CK_V(z+e6F$XZUd!OK=W3Y^jgJH5W4U#z*MwJn{?dg%QDEkM)i2ra=?2F4D- z0r8xga4hyau>dykF6qscFa92Y@C#iu5;=rD%) z=QFtx&qb$yUR2Ey!M=_FP*$Dj0x6&=BHpy-W|cp@PM0DVOe48NQuDAF*FdZF6FG7& z%jeBEjH)&h*3Q#RV|{77DL%oZh{LZ8^I`TL@n^bA zv*#$cG5@Bh@1bky_^{vUl*5nD8*a~G&bUBW_tg-kI5o@?eVFaM!&MOY4Hi0#4c>d; z&F;&mjhpNATry?Cf^$q&rs}JjG{yne2^mDy}be+q>LvtODq>23Uxg=7~>tb zQT)QtfkdMg`;J^pCe4>6sF_IM{;^wImz~Q+E<@TH$8xj>o#}NrgdcDfjx&9K&Za8)C8M`?GZZnIo*Z-FKN_?3e{9U4i~1Pz2f8}a2QB-lDoT!d1l2-E z?3(8dqYnC`2b@$_eT>?drm>PEAE0Y1K_9zKm)fTvk>Wi?cd9z%kDLmV?}yH$>tIU6 zC+9JUiFa57?5nl}cpYP5lMm^vrWnFBM0#jZg{hC`YO27|j5y}Gkqj}tY6$bRf~@7* zj3h>4oJ;R7tN^~n`;R&F{^IYtKdUe4zAw7N*nP*R5RuM($$O6k{u4NBZ@oq{l5Bn5 z*nYT)gY1s6smEnjxT#M*$zyCJD|-N;6i1=0iQ_p3cuJt!c1~Z^z<$q?DZOW3ui@E_ zJUmrSTN#~e*x0U0!vbkMG7)S%1MLl!!#$zb*;+v{0<#PHV>;RiXAyweZ8`YZ6{%C@ zGcf}s%~PY7(<;^)#b2XXoir!kPrDCnYW>TeN11MM(-K1V2oDK9mEf)Tumc+&(Vd-X z_ruE^JPKUGU}R-dWT2H73`VQX?)LaIb9%)|^hbOJA{;)+rX09Jcjr_Rw96(IUAKy? z+e@nJS-(%M;G*kk%g@pW9=>bZS4Cg(YailQYBy@X(7vDiTCMKWXFuNZy*BP4y>Hf4 zfLRf-zR_nVw7$`5cWX4v{^|ymSfEO|@ER&lOg9l@&{Cp{66=T-sjQ z;nw@Yq_zA8vP{l%5+(v@lKOg^QJ z5+JdsvWKtF$$Wi!=5^B7=lq?%J{^Csj`Exy`qm%+f~_N1x3yiQpq@5c+X(<87`l!& z)hf9aduk<6q$L|`X=^o2Z??9C2pyf+Piu4M7aN7Qsmw{vx$*z*oKc4#an58mm8VCV zHUFwEYm@Unj?TOIF!L@6ryOJd3u~Pe(?^{A%_#|f#QE<|;RQDTGwJ-p+zWloUnvix zUolZtR1R{Yz)JJRl-%v@VI~~HPBfv~OPN%t`=29}8m7I{T(s3lLd+~Er^Z@27FCC7 z7lPc_5opLjghMHv8&(RVY98p(Km<)xB<%kh z+c5=vMDkpy>*gc!+#X$T6RwThE|!Xy(q}J#rl(I6Gzg@PEZC`hM&>Wq)_?iuQ#8Nr(oc#ThU6a|y{fag!77;K**mA~z5-~TL&n$F zUj6x$kIx0Nf1$-grR(@b7gZ5qN@>OSaqZvQ^*K4a+lg_&iDf*q$u7qpaKMW$&WpW9 z9jS;b-yL!FhH1F9ySYv880&L;j_sptCfO*F##>pq75l!Kthy<#pLMtAbzl@VF1s@r zFwIGipYkF;?FV!n_IwMRliu*Hi*%uSV_o0wxvsnAn?bInHXNH{fX?y8!_3kB|K%JL zvfJaO9Y)+r#$l!kDz1;I@()s_*2i3>l9bKB<~suBn~naIzUE7VX@^cW_kU)x-b^-G z)#pFl$|f~iU(5S>N}2UF@6jp!#pZ3J>zsv82szA3E54}e0m15z&^m`h$&mAhk^%54 z%@24&_F#)nofNdYXWyh?(zY*3yrB!&zUVMMWed?>dcf}{&@hV+>87Wc!?dHq;Tq~W z{+kW;a`*u0*;6m+TS@DzCmMDCN+T%5Gw8Dze5eOGJ@Z4iCfhAG>Yu;|j|pDd5FXZ! zHs147`@{+hXYIBuGkf*L1mjGwVp4~N9&PmKv$96-7Mm{>Wh1nRL2n>}u3cG7Pd^Ja zf+f-KgSS5@snp*4`h))SKREUF4Bv6*?BFFu`}&vX-}+K}o9w=3{?f~My;u7%xqu6C z3-uX=qO5uxtWYHwW|doIbOIq(@Sfg3Q`hX63|flMTrH)^38hJj`}JO3L08<0m4>tu zH}&8h+0DGtA-x+%Bj5cH9-;dEF!8*&E)mGO(c=6mC-havh@11L1r{}rTWELvE6;tW z!#TKl%w^d-#9_O4ZF=aum->&@zP-HTfnd%{S!Gj(O(FJcXT3gh%FLTC+W#Evw_yi& z2_H$H(A?KB)?H-wc&@G!Q`(G%^u()np*-ISi&)m(rLLA%W@3Rin3Q$BQzrC;Sl91P zi1p*M5S+g18j1^rd{&)pWW{-U@3|fiTcmlhpsdp#+e`Ce(Z15V&AEq7?rt{M5uD>N z7?Yjm))KqDDcNUkBvnV)Y;O7t+Gy@YMJpI_dP~vZz>sN)b%TdtV>xjF#@&#HVv+Rm z|7jYA|GP9qAJ^}s;a;ZajzC0VI#YU#7x&6CrDHw!$bXHF4e4}j{6p#Zgwt0Ojzs_4 z+?dppzmuHdZWVeYlCDnaQ{>HRIY4RJ#_jJ-(+C3W7+8b-BQa1RVe5ZN=7C|QYI}Pd zGa1{QD7AH>qqD6649G@iKo0uDmY}T)&BbES!++=rQY#C3b1r<-QSRyg%C5kQh1#fE z4S1;z)~-H%i!}VvqNcX4uhsayTZG}K-%0b#B(ypboiEmLmdNy7`=Lehi6?O{D6Jv5!n!~Re*Z*=;?4J~bh{s2lJCJO#LDc^CJAo&qBsP&MzxxnQd z+$A@QgSW^)aAPvtC%EIQ1r%J<4k+t=O|^u^(ib1PJve9KqI*_-8pq+6T|r zJ*{KMoMHWx&nEoy)32)>{_yN)pQz65y399w^_0sdln(gIdCMQI8}`wax8F`|_O^*l zH-$@|ntmz<3WB4StuMcM zNVJ{yZvFBxsk@f=j(12?S5dD>yTC1gJ-%J~lvJIu(Qif=<13QE| zhwBYONe$8Vet7NWchTOUq*i=c`?>2$DeI(F*K+!py8xGax%|GyoC{<2voZUB|Cm#( z3z+V(V{S|hcQtdu_c3U)oDP`mNcrC>tz`opcj(Y@<5PYM*|-a()Kh-{({U$((~${o zPhq7Sf0OFMrwl1|{Yj@h>I9gB$V+eN{8Gxb{x-*;&nXyH{h(coLHLBiY8mb3VWr-a z!%c=h(>6H|^cNg%F28_MdoVQ#vH*Rvea+;{H{R&FhR%W)QMvJ41P)HX4jAX1c1YjL z-_z=ng?C9Z>@K8*-LaGy_CVy_AKtwKxt#X3e4Y9Q;QoSNdX3(H{2y?CdS5YhUjf+{ z?`vU|C~#x3gE1nT`?u1MH0C~|k9*q=e{cKY=B|gfg&vW2oPUxpaWGG(IFAl!XIk7H z>5(sO)Z^~mPkLrm;-)B811rWz_aLo@AEfQbgG|NW=@1#9!V$iiEH1|JeM6BD&kX{e ze@yxga8M1mnoGeSK#r<_tp&p2RD#wcdrT<^Q{JKGy6^$KiJaVQPbK1*vQW9FG7iVI zPHgZdeQbPoPN=M0e{6jBIrE7KRr-(-z%XY!=m#m(R721ag8d16ZU>ER@VXx6sqcMI zm_+_D?94KeRED&FH-4Vm7F;&`?8s-0uZ^iLbvJ2`z5B|SBs=@~6(>9wa7(^3W-eCA z+dmRxtX7HFd!ETWB;Wz=Xp^CEuut%KPpW>B0Wa=?_B0HuKbl$;C}pH|rJ2PMDq!Wh4{$Ev~r3UyLS`8Mrr_ z^;VZ1)bXbV2y5-1?NTz*o5Iodk<9JXmqnE9CwX1Pn%!%%hRnfcl1uwAduW5nx8A|- zk{8a#&XQ8Fmz^VUbAkT^(044Jc3U!<%rnZK?d(r}E zCe{f$)hy8DOc+guL)rwwHDu1;Q?^4N4o^E+rg)Kbhw}C2m47W|y(|Apd^zK2>&mQg z*txQr2J&h9O6`^1w0ULI5BGJvy1{$*3$NcSJWYK{NDrU%@&E7Xfin6C^mvcKi^CCw ze=O-AqC6tYdThUMnsmqZ{}1y>QePZ_FrMxNfWs2!?KJ&@^a+5BVzK`()96!#BC0$+ z=$}TiBS-)L7fGp>gkA0@EUv?)2mB_T`;b{jAi1%x~Yg6h>9YAvpoPX^icn3esrII&%CMXzb3+D(Fe$^ecU`E20IV2PyH&A*AyH zkoc+x`N(EOr#>W%xeryS$%$F52}v}u`XJeWeV*WoF?q#;SA-|AJ(kgAw)9Tl zWBIL^tvIhzc&xH(Ue&i%BjJZ1{`TA2@gIIDgg^X{?zPr~r;7u{XZ4(gj%3b)-UU|; z2t9HZ+Mf9}FO{<(cc>+KG-si0#?O1wauz(SLs(nW&pa(>rZbUQVWZpQf>nl>X{!v^ z=qmp~IS#Pa-8l}76ME!0I7qUmFy=V4UOPILNjaivZsnq@)(;G4P`;?0R&xB=yi+FU zcpC!162j50s%+#C6EVtH>=?Q08sv^F-)&!y{`ex~5RY>`Kk=+mo*&|Ly?(P-d(3PC zR_VVmNb3sl`Q77TPkA^$K54pVRk^W8RXJ_DaA+Vn`K*lgJ+;5l7c{+~oc0AZ^$rB< zDYvc+3Zh)?HaiOhr&WvR^ieI2#u0V}Er|7K3VH)MmPVLpXVCSFQ*A*%6E$i(v3b8n z=Y8-0{=Ad*@i6NQuO9wZd`FzSCqVj|JI@No4xPNPCuzU2`Tq#>&qX=<+w;%sYySCv zG7&k@75> ztf-m+swFehlR&wB5h(9}l0cbRI)Sot{sRIr~y~dnskXYE=wkL_v&C6c9l)ltu&}6}3@Oei$`|NTP}OjQYcP zZ+A;HB7&%h^@TwcMEzl-R*gmzLs8gz&dluXZrMT+o9x|tcQezSbI#11nK|eC+V7wH z+ADF6TVK2|16*G{Z_TWg4e2H1nT67iwBu>jmXK=;^h> ztDMy}0QM8g0PQ0zqV3IaSXN0xDW5+ivlttM+jlq%#=usTXc=(*9!8lInCiDCX;wh9 zD=>|45)2Nw@egiN0AVuyc$I{Hfs{B{`J~f}?9={0NjAM0cc54VYB-XWQ^}sn)C2YAlSi}hgn0UrL zU>dW4sm<&UcgrSeKD2U+anbf0cUn#5#~#tIxpm>DbEnMy@a~(QYq{wCbp!8{imEy5 zZhm@_JQ?3A!~+t!Qwg89{(D)pD+3hx{2-cFyzgm@#E0+Q;0*eUFc_cDQiz2 ziq}jAQEpfoaWaXr1JmNXD+z?mln14^l-~una0cZsRZLry8T=gCpOXE8i^0!vcxu#= zGhdULqkFl7&N`bARo*Xsq#TC1_!0qGnp9X=Yic1hog&v1Q)CI9t*3Pj_p(!7ww}fm zYLCuAG6h>JqyvjBsT@fsUjhtK^KGde$=N~6#=#SEVeU@jyM1DpJS?@scagE!dRd5i z`%LK%Q|!Vb$bl_(Dc_y6%R;QZ0GK#py z@!m77F|*pVBBy0e`$XY&X;`{zB6YIXne=$+b=t?W-~@q|2ThlEA2%Yo>Z3nk_S&hRHys+dtr@OEq<-)~`V(Rt zf-BY8b}}Nak%nQEF$BwJq+}T(#`qg&3`@EEf)`X5fdA}bTwcckCv6WDZ(MrylA=K9 z!DUx175B|szNY8ec`Lf_04s!vUw%m#%FT7OWjCe|%0sq-@OTV3Pabj(T(W?(&|y{( zz%L7O9KpIhxNi@)6lvHO9OBY9`6z|24&zs27ravN8lr#D=3Zg5*h?`r!vaq^#Zd@B zDj5cB24Ax`52_~RvfeC4DZW#MKu0kqA~y%obO{;bbg-Xdd2yal{tIADu^Rl0CgBF5 zuU=pWn|)r9Z5pNoQtvIKb0~r?$KokPrXNI1nrj?lNT`kb?Q(Efr@%iiZ$#VvCeSNq{nyyMwm@V$UW zVmnx#1gVfV$v-O=$UY+c#SrfLJd|7{CD5=5TXvy25Db|RiakyJis}w%u-s+%(kjIx z!%PZ7=5;3>ey_)>?{ZX;bl(Nsy6H#xehqVu312|L51pDLE7S%o_oWe`3?t*3?1Cl)!`Uu&#GPRX(}H$^j(!LdFK{r+Y7Fl-^$4!N z^m+*%B~qc2Rr=2@ zdVqAMchEkJ2j(;6890Bm1}6vc{+|z!%N0IfO1VvA!ngdqEK(A~^D<;OP0D&{v?el! z3iWQ+Vqu^M?qL|Z4DT84#m$i1yM}X=aik>DDs$Gf!ZSFv*7_qRTF%W72 z^-d;8kczmM*C0u%-jLqYx#8k@@vhG6ZrAmlH9J#e{#U<$B|Vs0^x5vpvOS^Lvc-#1 zM|;{4bTuZr|~;8^aFkGwjl+X(lzF+t1G%b7Y8-nIJMN=7{qo(sQiww4`U1 zpzmwNP=duiIwEknoj_tR@{LC;OV(4y#N*gyDTShhGIJGDnHhpf&S6qZMM{hovxc+k z_RD%%Q)`@CU6O=Lr$WyL!?Ws5B-phoS!`sk=nS|uG&~8h<6wzqycI051wL#sY!hVJ zb`neM$?V>j?|^!&#tEAzlu4GBlp{R>UZHTZ$!LVfsd&W@!J_iU%!-@pptP(!7rdA1 zqa=dqD@bQ#1e5LpUlgs`Yys1*te?{CD8??Rp;Nl;t4^HIsK+<g)K}dakH=4->=w8M-CCxj_y3~iiS?uZQ_X$u0NtH7 zf_R`DV_;-pU;yH?mv5@X z^V@u7kmq0kfpZI2@WJT+;{R0GH?j9KFfnidF%SR%T{8?r004N}V_;-p;Hdn&7DzJu z7yq}NeG@|pgD3+scm)8LSO**c004N}ZIfL{l|c}O-<_TBJCa6*VS*+ZS(GHHrKzb| zM>4UC6#GG=vLeC@IV5HgDoTi0lm#kML4k#MtQ+yCEC>sWy6DKcP$;@ELPfFw5+`yEF}V5CHXk2@1nLszx{8MJnf_)T+S*f5I-64==T%nc7XeK8*Vu zkF)Bjc}TS=rme`JU#5lKsODU?Q|e-bJYkzqDbxw?g%iRJ3P%>LH9qY_j_hmo6q5Bb z^w^J4%9G|T`;pB|>Nc60upY=Mn3y?P|nI+{qnfxA8eO)FbuNGRGd7ha2a#AB|QU zo@l@MEDQ*Sns89fVK+6IKI!+;;_O4r1gFBokHm|HMIQoidmD{EwoU9d>_<2hI662* zIAb`sabDv5#wEbj#T~%i#UsPBj#rDfiuVv-9^Wl~4*m@OGXe|(iv<1&b_sqGnkF12 z{6oY@WR1ujQ5Vq)(IcW?#06kK$vX$}ys=0S2PneTQoOmUeG$H^+wx3dye)qod}%@T^3yf-5}iwx)=1U^pf<&^p_bl7<@6b zFl;b9W8`Ku&6vTs%tXK>%jAful4+aiF*6o3AF~@0R!T3NocDzTbkZDPI5 zM$TrIt(!L`A4pBsl;nAESIu{e?;pPt{xbgC0%8IV1-uC~!vS3ag8~x*s{*G4ZVS8- z_$Np$C?IG;(4k;1Ak+#D0KyX?MIm*eGNIeTR)kLj0I~0vM*si;0002$07d`>00000 z00IC300ICO000310YLx&004N}b&*dm13?(ZpIJp3C5V(X$ezTZwp)J_5+Q;#X&n$Q zJ=krxG;CM4Q{vRGAU+jWarO}$d;-tTPOB@ilbQLwf99QM-UTo#auA5V!PQWE%tMM| zPGP>!Bit7l6{$3>T`X!A%&KZO7`f6sG{7m29uFgry5BLrwc!lZJCyF;qccaJB8XyO_-Dtb&`KvG>LmeJ`?jdgJ3()v;yOpq#kx03Z z18NRcUCGr^isCyXm6HBQWtp)WJuTWEZ#B^&*HuoWH7wTotz|Jkr0xQh57uncQ$~qzIK&CgxUT4$gI%jA#Z4;qd1p!ex42HS zxBur|GUBqgy}w^ncbrpOht>)G58O>`sn`ZzvWiWv8yx+A`2&@@d+2!DZO~Ut({T{M z@$W%tq3pfEy~qFmZ_9AE(z^HFqCi_jz+zS0MsRC1CcbH0FKXN~8udYqd&Lp;!Dn}T zKwq`w|MSgDE}z^bcexx<@TV=3wN&xH_yQ6o(L`e*1}ipViNj7j2_%w4GAX2zMmiZ} zl0`N-W(;E)$9N_%kx5Ku3R9WJbY?J?v2ctQ zTDi#qKCy*8?BpOPI4&{l-~*f4FIKU!TVmP99X?7Nhd9YE+W5^;PVvd+~pFNxyM#cNe;KTExD4%F3FbyDU>29w)*Ru8w0v6Tp!dlt-{o;DOK$;ln$j! zS>CP@8C2>@L+MaDl`dt8vQ$~7{I}0-s;j-aZm$XY!=dV`%BFzXGJK{kpT!#vHMAFA zb%)Pm-VxDII+V`8r>>y9004N}J literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Regular-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Regular-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..bfa05d53f4e4741bf790939272eb128580116bc1 GIT binary patch literal 25020 zcmY&<1B`D!)a`FPcWmC#9ox2T+qQMbxMSP4ZQHhOn{WQ#_mY>_la<}Q&faZvl9M*= zs;jJs2mlE1Q}jCk$p6_|zW??AtN;H+L_|p%001iaVSoPzQ&JFEQ6XWGA8z2sC;tHz z@Ey>csGO|g4|f9qfd9n9>9q0$@QEra3jhF+d_OTVKhPuT$6u6Hq-FTwVt#zp|G4I- zj|f9+eY+p-yu7#OXe7|8z%1XC+_lON6q0FZJ40HRJ3lc$i(jP-wfwT+(``X8`>c%@aD{g42F zI>L{S_X8q$KX4&4YbUoKj_#-ZpLZZ7BYze4t!xc{>Y52Z{Z9N~$vRjv*jnH1r(bQo zpFI5k01N??Yol*%{KNVF#AN*Bl8oIS(A(KMIspK>Ki>}M@~6#1h-m5D_7c1w%y9~+-85yy3TsviX|miRW-{IAP%s>DExr9`s)_9 zMk&=jffB)L1$$~v5W zU3trM<9%}`xI5D$CvIk7=2b#+@b65}TP5KbTP{o+${4n13_|~y*SFZ-A~)o%H1Vyx zJ}hy@Kn<*ZVRp$f0%wWsJ&&nu|0v=bA zS|s=?0c^0~4jmhClz;xM3wDxWr3&(nlT}tA7G(!tk zmEWNq;a})3*loY`9g`zBcpJa#eyF3eKs*MONsZ^;G$VvrwsNQLY~*E(Lra-EPG&{C zy$jRslI~ZErcVu+a=pOV%ZLw|Yd8l$2c&+^5j!eF`rWTA-S>N5Bq^_?=+_Penf(GYl+|L@6$!IG#j4DtT`@RB@iVaOtAuLfGa%cBMo z!1j0VCbXXvF8jYq?S(K=>%^A0_JZ=JQ*7J0!=s$6NY$VFx7hJPte({0W$8>BhN<@c zBo=L13ow%lRFg|ulkpOrCa6tO z@5oPXcO{B6CZbjW%7xV1b(v4F&zX@O+6i6X20n$F&lwr&P2*kgd9F|g#qUKd7mjVp z&fwOHtqI#DoHqY5x?8-ApVw?j)w%T&2kIfbC$`YRQ}r+14M7bTJHQlVrR%cYp@%~6 zb3TPATkaI@nWwgcE38%yk?&)w*HRr1%oL}Ub1D~2-z41MMIT9wtUlF;WZ%7O2&J>; zdD{h2w^sKTzWw*F14VeO7D}f+f2mHCI#Vh7U}w{hUolp%mZhUy#uk|wce*J?fIqJg zmH7cAo}3Bqt8f+vAc3z(&jn~FAQ~mKBS+`k(NM*_ zBlC;+*%~=1>=3@diM}mWAPc-Xg~7fmZMY3fcAI_IkTEl2a$=8k8rCKFLuHji0WL~N z$Y$GcYl^}X#*_CYfBr0NOxKpZ;#VB=GoFQkHK1D{mTSNmXyljo^ztd=OUNCYjAS6T_6 z2LvkC2h7v(H!g&4Z{=hv*0d_tL@Lq^ldmk1ryQe9I71zAg4N5}K-R*7=of1YlrBw_ zqRo;aELGxbHv0h=>ur#)Y>{KQLS+?0@LN*UuU0V1G43i!o}k9qtu@-O9dcaO$yNQH z_On1N)?l<=dVhg$2NiDVYZAK9KWDHBQy z%1lxfo1{=l8ym|moCcr;kmd`8%CyMLCnTtr5;N@qLC-Z9b(N2-Sv*>3`;v|DAgbW(;fe^&IT)f9daxi&Kpn z#u(@Ug)x47PXprq)nZ~`I5Ny5W3us4(U|~^(*p}*UgJNo}P)X9wxX5I6@1^ z0loh#6%2%L16%-b%pi!-EyDu+hbfFng&xO{lc=90zYxC!zYM=PzcjyYuLOaPeq`Pp zNbok*_jkbLF4&o74+g06WDb4~el9+Uvh^?3Nl2y1m+$lM;cxg)+L!D5=kIM*_$Xes z2fgnAFL)Q;m0P|3fJ68+yp(O;f}4a#@`WHF5D^ye7BmqO{NBJSz=<=O3X2Pz42=!0 z4v#kwKY>1?JiYxdadLA^b#`|!VPa!cWoBnsX=-a6ZEi0RK|(`hMMg(xNlHs>O-@e; zQBqTMRaRGcSz22>UEXiso}8VSnVOrBk&=^?m6n%4q2Q*csjE@OBWj%nqt&@XO4VwO zdY8{OSZr3?%{uGhC+_y=D?>5~@yQO&+8X?KadkAoVX9yQ@ZblBnsWyW43!HDsVR_J zN6Tws=0+J_rjBRN=jV|T4FM1uRwfT)&hKB);OVmooECsXZd-DAOleL(jfUc{*$=4T_Yb^# zPK*Yo`G*CLU#A41N9p#G0oRz5ZR7B7{)@W?baVwNMSC>7MG9ZdOve`EHI1>e9RZAp zucJ(Sik8-(XFKfNpb@eBD0Th#d5G(yE z?l8D;@FXa6%|v5Vu!v6vv}?3iw{dkcK+MB{=tk8^MQSmwR$pWb41V z(xt6!ar$1=fA_j}V@h(oj0CS?AR2HQko*1;5)e^-4aS=8qyj%sc%SA=@IAbZ_Q`~7 zxLOh7N^W$9qgR3S@Lu1z8xLz&X`KoeaJ4P7D48RleAk)k8V1S2h3@A!>`kA3|B~=~ zb%3ZpKd!RJLoE_vg`a$oJPMULq=*q#seu4RGEDPe)!$e5P&GEI8o6!Gm;*}$3o-4{@oHxzEF(|I5r!6rumDC67++GYhF;OS#iPLJk~BOe#%MFlEE4Aa!opq(uPl<5j>^2Q=4r#qlIE8= zD%3Pg9##lu=)WdSJEkV18o6&;X*gefyXJGi4FZ=Ltg~&4sJnZlxZ@(?Gg*wcAkdmxJ3cIK1z0AIco>lr1JI z9~`T<=Lb8kXO8>Z6aDuFw#oQ^N@&3BV>9E^RTs~)*Eo@X$z$Ju?vPUL$*!o5ctdf& zD5Bq>WC>D#$|9zD%@&1^kiR^>C4WYL(OccuDE70!mXj90W#?(C%#Sb6y+7K&)T{WI zHS&bo^y0C>SBR3OI+khNjjoG{)};!K`-NGM&j{FrC8Zmb(cj{9^%TTJj zY1nJ}X;KDQj3H8^yM-O#$uspX796!PfyG=qvy>XURe}6=(@q0}jK!S`+bREwQ32Z~ z8wYc76k33j=-f;|CA$-43P+bg6$(i*R@mraC;$4x7WskkR1_qbX=y1TQEp}}eax`} z(z>9krl_EX|K4Hl?Y5sd+10m3!6E8qe}18PrPc9uvrfxZ_gQE-llg{7%s@Ywefnx*(J*({e3zUHjE~P$J_0=Jv8jvy}SAy_YMNwZZ=km zCnYHzIS~3=hC-aRE=-8huGxiwRhYOZB+16c;zU7-fSs~|#;#;#HP^r<&*C{pBFc=s zC!3sR5_$(Xx(qEj3I3n5o!a4?#ZC>#jTyX;?v*fCKNN^g3TfCxDEVOONQephw zQr-O@L~3on{EQ7q1JD=*xWYrZYArQ6h2fg{00$?xl5Di>-FtHQO0CzK07@0^j<3G^ zX>6C>oZfyCsOsqm;ggkrDphVTxraj#=$Wi10_Ha{!m>QIZX$MJ-+ucF0AcS-(Lx``^xz`!QDNc_jfQ>>RDb5T{BA81^ zt9AvY@z36w5TCqia2_z{#TzhidMlD!zHk0u>{o<;6QX&ez(&4t$cGO^W{T=b|sN}In&ccGfJ9v`X2=(1|8ZYeyi(@*l_JlzkB z^95I(UG64NZ!*daJjLJAeaMZ=Pk5RR=Et~(Cih-!Qe*k%Qr^kA4PYQ zF3Mzj-^kgXsF5tEfi6R6=Walgklg*aMNNDc-;n%u7hl(9P4Vfkvs;inQ(cv1FMOfx zRi)g`^R$I(wYl1-W>0jIfD$LmH<%|N1zXmKI0^bb0G5*;Xo6#m)NfF`eL%l%UP{`I ze5tptpFginNP!s57w+DNIQ$y_O>PYs5?B%)p-ha7VHT_d|rrkYCaQ(PETzP zXSpy}kR~86AYX@w4ap^O6B9;uz-PY}l|$2%cLgI{$?`E_5nR1m;RYF4#wR@n#zHpf zOPhm2A$a;G(k7F=Nv4HZ7=4y3@~H6!UM$;KHE(l(oa1cI*O>1Pc>hIGI;w}B>->&U= zTt&l+0vm0Mj`K0Q_tq$lX0}$RX<6oGu{av4EN73_ zc0Yr-1?^Fgxx$}?gAD)0N9WvQoC-jkwE`W7KJHj`n@YE-G_&L2Fbbe>`lC)6Q&I9U z%XRZl$xj_bgMLPxADRFjpmLWYzY8Ga>@$V3y7y3Y;fE*!!qI~@D_}#C&xA^WC|j;H zJ`mPe14FG5Tmb@UY#4}nG+}}vin&n@dtahzalh1zgRGYyzE zz&<5GEG3HS5YJ|SMmiudIS@p0DJXhLM2#!V?ld2__;PudhN`cXik@kg5)}#{^R51| zX1KJUSEn8=(2%fLB*-f)Qo<21Y+EKArs2b0D=2DHTJ~P4C^P-u(wYBmL7G}S?CIbl<2|26GlTRKVht(`omy0+ST96VBi1r&G=i4KzH4@Q?MJ+L>c?1xK zbDL|MHcvyRcNwK4XtUoRDVL*bu@Zmg?)LqwZ7wf2f#-O*gxBSI=IrFpYF6qrb?#>e zL8ZSY_j2Gb#6Riwu~+*3q$}QK=uyU5PcJkg2`wUQa@co(Y%&jMC&QU!W4-!-lOi(C z6@w~Orx6M}0y#+NwZso!wYqX*68;-zLQhB}Y_|0Fr%baq)7-T@DTIQ&jJ;0?-A1On z70L>Z9ioW@k^vm)mI$OH!sXTFCiv`5W*%beUE%uQaM#-=o-N*Nx2N^-9(K~U`?#`Y zc#sYL{M_jwSZX@MT@>D(>uKTlTQcoqwV9~}&%>-{`t^#Zyw4E2V9IZW(0~oTJhX<( zNqv&?Sn1L;7z^?Z{o^)dyeb?GyNv`d`1%q1i;x(*M=kFskR@ z5Gbg|S1Ri*j+lfm+G&%`h6JrzYF#4mGoddTD@-HY3V!lVC$?f~A9i^JPorL}#PK;j2sK@f66ZY&cbbLM4L#RmqVf?~|J(OMpX(lTXPB zh`pL~@Z+&zb81(D$fEhTf0uIvm~-}z9t%f##w|Y?hbmzB6;QCS0ZJrQnpYThqj9?Q z+u^`zpPoSOERHNms#DyRI>UXQS9kM!eJ>T7^>lcPJ1^U8qr5`SIrP%7UfqXnk zHmfbLN~5OQ@U@El{rheC)6pf-b)$Lb=KZ?cYd)S~RE3SpZcvdyM{hM`B`atwRK1`96O`-VjKcW0a z0N?e%zG;)%>rwQEI~f53HY_{{ncTkW*t{B!+NM5)m03*o zFBhfx=-2sv&4s@>b=Vi|#$(kpX)(QyE5@sJ-A)_Wk%u$gPBL8)d>kKa?ktO>^)ep` zr|mP#i-H@q?D}ri${CqUGyP!?O^{+J7I^q4O>Gpy{nJ8h+%Kk<#DN>ks(y-ZZ1v&hEG_CiW46*l-AK=kl)9-1_S3rjl6 z6s=VgzUwp@(eVW;4tqos5i4bJ;B8+}m_g+CN&No)87t{dnN|nhiy;rFA*=5k=*Eu$ z{C?lXehs{Xm*7}-PZ>(t&$XR9KLKu&ul0T#b)Wyp zMoaF}RefZAOf0qGh;YGaEH)MI?v`3eKHU#ef8q)7kUbYGMVN><*zAb#mcT>ZXcO#y z3HcftJce)INnS1yBV^7jTwzqz{a93VBG@ea_rUxH3BufFcwhk#PmdfcGndnlAztBz1=fB@Ad583DRe z;Ke@oV{j0?t4m=7jiq$i5B0WM znSugCS5}-y0qGap_p$~Oe0wHothN8(9$fur%lWdE zq-m?w;XMw^c%&2U=}t3pf2{9=7DrEng>RpXkFcGvfxy3_K+KhFF&wEez;_2gz)em9 z+JH_nhO^R?6r%*!W8eY^0(`}#LC_ixg!Ya^OnEOvvr~n~K(qIp^YftIJ9(v2Z0R=ZhhVW&RK||p2q&zfrsCsS@h@cX^aA*9M zJ?fS@w6j_ssb3*qhqwFx($nDZSWDmFKHrynojvWK8#^6OtS$B#5U;%mH`d{hQny#J zy6Ztd^!6%DkucA9|Lk3HI$l=ptd%z*JnzFEXL4SB6!ZV(F+pw`szNFi-z-bEj{K}G zeC@i&nCN`7`lGvE z8$ITqfIp4-%VuReOS5NOe|edUS`-TU_z3Gf%-nvywO41HXv#p?MtbpHuy7>o&kJGV zcw)}=*T^d?w)B!PM69Kinwh#IE=~!KA+g%-J@gMqVtkPBndRSD7#ddm6OYuUuHfa7 zQp0Lb-G2~%X^a@81{Qy?B;5j1^}Gc@5q>p$^`d*Dek}!XCCA;=DR9!pJsW}z$~>9Lhx8RDxP79I&` zK$@*nsqrEi+Cmx*;2CsZfIe&zQ85mJ5Bw$BomycA|4y!V3njR`l>vu4WP{89v+p*W z0GU9y_u@;1m2>}o)Jc@jTS;Wpwu&gjhZLFkiiE>AWh z`3l7Qlo}?Vv?_LKPR+}T*Sxp*IAf@IbmCVA`K@x@1(xF#*b#_E1ekdqbJuUF+Fpqm zZQU3@-FLquPWx(nX*M3IG@^`_4M_6DWwLAlvvzdT4rb+>0G!xi`N|9cY z6M(xSRlL#Y-4E_nFaG)3xcrUjsq%8yg>B>JJB%?ey$MjoU0D|tSK`-t-ESEa)Pc+{tLlDH(*-1bV7I&EUXN+KTZA9-`T!VQS?2|6oY3_|T;+aHe!SBD2Eu+J0`L(dI$0{+e zqFr2FoQbztV-x??a9Y}pll3T8JXdU^zUm{w@6lW5Xp`wFYE^jhbG*>&u)=vVUk%Zw z6!&U9j6mO8V%*^h?-Idy+_hdp(_%$NgDy`;7kV z`=Pjzvb5&<_7c14^6zPS_TzN-`$;VX`6fxVPPg4q;&}O46j#mtd0{v%xS;v%lV)+W zwi3?k1Ht3ShNUi(J-aI4L2KN~XrelD zfDEr+sE(-bYbdziMGfDvRnl#LVfyLy*5&qKblczV!W~il&4IaZR|upvoI;I4mzN8A zP5y88OJ5{Pq~exMF6FJ?+3da;C-)x2!FS4s_i1q-|N2SCv(gE!L`{Mtfae8TYg%;c z&_3Htq70o9>ZT4>i6!6eUwARSFBw2cI}HTT3!0Gq?nHb2WsD1LjF^13o(;S$7xd}r zx$#_W>OP}T)z_1BBpwjt`F!nVeAa!XuPYmLU-g+b2!A~J{`{2jFn+$l?9`(4?j3*T zR7(CXTqjDWenA62JvYAv*GrU6{gj}TJbZ4M*|Qu19~fq&+~WHC=|ElxkOLGWFD3~+ z*-&$DnQby!xbQ{f>=NVna{}#J@`j%35d2kZp&$$QH;spo$J1fwemZ;_7mwtt&FOR$ zr9lvIEt*0@L0=F+JOMl#OtY7jY2@k)( zjuxuo?9+cs$o5U3xY~~(min)kLy8P2=`qek?1WK=BI@#|{kqBlIFl(pV{}x2A8a)P zrEe|}OcLndePPXru6>dm&J0j~ZkwPHLJtviDi+eT1mA-*-e0bzuF`U3F! zfR#e1hA_SL!R^r87Zni2Lb+e>OS76!m-Fyx*_$3#yDBX|h6ivP_EVR2b{zXRFYP&^?e=vIdy;I9&!9dge~hE!m`%~ zQxvMN>V%LEn$e4{fc}J{L*#wG;4&{+_r-bEY_2EzO#%ycsUn<&jS4)0N|TtDhbxf+ zkJ?y6l`;r)6}dvc!=H|3$id(ANT(Cp71clNSKLp-@mf>cdhT3A}B)k6~#<~~+Ds|2|%KnnsVDrE2Ko zlqo-NZZ|qpD!ajK_p*3gb`OMp&gY^!a?01+(NY!7fr!pSr)4xC%z|69ktQWid&CIZ z(kfqFpqHR2$qLm*$G;0+F6^w&Cm5fK8GpG?Y7``Hz%;0S#lQQSpin;?a(VaB-!^np zR1W9;CA|M|$$TnI3_Y~bWpH>l%ZfgJS_=W5ReilI`_i@C-SX1s+~Bfe41xP_`MEpS zJCp_9_BD30rehV5Wo<>+Xlx}IHq2tBF7g@PUI;8&PK>)AGg;TlhvBpLFZt-_=RG+) z)wI=ixekm>K8Ti)YM0>3G2|VHFOSNGWJ5bbQYRlsQ;-;ta0@D(3 zo|`S{J2Q_&yK|V>7pm;OJ^M`m4cU~KZ|qiYGMx4tM>bbtGItjo1S47|&wt;_BVT&S z@b2yzZf|;g$)!Di3P$#W&l!HwynMN%K|`c`{b~>gNg0YSLGQH>17~B$;IH>UCIE$t znw&&xVi7?-n#D)JWT`2yh?@^@N>ZQIg1SpVwroxau~7D%$MA!+Q-gy|!L+T7AyJ$7 z0&E)~mzr+RP8)g&+ux5-vM$1&zU|%7KKgVM&TQMX+3#8O&fRKaEq&kL0?&8;`;$8u zdv8vhX*t$$wIzy~?F{4#H<~yQt-iu!A`664)^`T2EDKra7jfk6At!Cvh|Lob92iFd znQzWBlxU`FoRej*fR;QUZD0%tbe|)Qt=cDFrHQ_F&(Y&^Z{y7H8oi9;1`ZrnCgzLKY_`ztjK&*hfoZX2z}qq)~YVpg3xlg3^Hba-t?gKy4k zWt8oB&5nmA?05K_S7~_M7CVGb=bj*5p5aH@6Bpq3VZmUGr%>t%V|9wLx&;&UBk2n- zg244{n-Thmq!n(d{r(}+U|7s#E&L>C_sM_i>dPyKy`i!l8*Y~w;Wl9GSnZA{Bd%Y# z?NV+RO@o4OSs(thzz^7+NK5?|?)zWl4ioA`Njpg?YHiiEwen{kd%masf+;JAGMy#x zMUFdZr2yj;y1GBi#Hpw&c}=ch%!6Z@Ix|l+GIFjXkUn7<7`HvLzvwcmZ6zck=S;=+ zuRQPiY+24!t(a3qr4v{(QeIDR%m0?uBHDXG?66>8o#T6@S@R+b<5-cB;J)-iA;NUi zp>Se`;b>_W)L_JH#;}MyKuic|lx)nl#JIf3>q_E%%52xF$?&pgUoCXjl6^|M_Iah- zr${3m@vjrqzhfOf7;fA1#8m6Q>9y%=wlL6l1!w|R`JDqBx%g6OQI>&&>6=;NRTJ8Fkd+{UfDOT^kyHDy}ZH7U!vJp@S0BmtoFa?--GeZJV08 zF^og*U4bc#?ue#LZW;`%;URz*`7FVovxjypB#t49xf;9ef8<_CNfO$wS`1H>gtdr$ z`R7NvPFJING2Q&v5{ETe^A_bNtBWe)F1UmS0_naK#Q5c!Dv?pebEdj-@!og$(~roJ zu2q`#-u-LLu22$d(+x7bACha;Z|MzB$(z|eU7Xg($+}ku40f43G86&MxwxJF7$&eD zMZc8%aFxaBs_am@_(9>JpNQ9)!u;rJ%rh9yu(RXWnN~N7ZZSe6SC}vx3Z?7Gt6km~ z1hc)i+zyM@_Ff59x9yM95bwhu?TeyS;5)^9)On*=q`SRqvjg9}1GYVSo-cU+Js8qS z%?yWbW7BCFyDdWs*8iSHlkaV=1G?zgVW;WmmI@{$dJ2TqlTX=sDs;f^nOg%d>6bHJ zXXzPew{*C&sZi&{vhqDFr|oEJcvyp8s-OdX*zpUGN``cZ&N2h(ps$IRo7vCE&SHI0 zS@~5{@F$t50KYhJb8et4$?Yg25qP&r@-T#SC#T^^zPmNyzPQ-pwWI-Vs-iqtW;(Xb z29IZmikq7ecg zJ?`6X9*YjomdnX-NY^yVQ|gjz3Wx1p+0Zhtt(*Rm6U9Y3tNvNRxJsvUjrq3j(PYu_ zsN!%m#*C?(&S0?c_)}E3K`GVv@l;ob>@`0-4l8vVA;;sGaUJ3_`(psz2a>n-jA6nP<>kW#HP|Hr76DNf;Cef#CxrkCqXl!QFm#@+_ zpf*`G49$;$I7C6=;ZTt0=->CmRpn4dm&-LY6+3hWQcQ@3eJ!c0n+ql-zi{r#`V+|N zj#mi0rbJ3DC!D@s`$CZ+Fsvzl(R;xxKk7>%8A@A7q-blzWfTdr&f8qj^umN;62YvG6x0 zjd7c&^QPVBon5UZVo>RKmQXr%$TZDTP*#OdkNSp->ret6vcTU3&xS z#=ViHZw2R^KUbVB7D% zYJ2@wdiB=FJ-(IY;bXiQHEK&;PMfCrd_UD}vN^HzbN)<;(`O*RT3`${zdGO@QG;UD zT-GW=;pO4+rE|6snLYfPDO;-qC@wy-HTk4LGtI^n87!a zhQ3LZTYAD~;G>bGO^rVanlgb46R}BU@*+re7AFyBDg^O~UwH^MFuPq`Ym#QG(Q09n zuq~?ml+$}MUXo^=E^j?huDkXjAC(rKjZ}--G{p08w=1BmycqVVX$t3!f&=&k*$ZYR z$Sk(sgfriEm;P@!XHWBtsy<=%!{NFu$i%$t>R?JK=*5vb=tNjB6E^g(B5-V}ixTAt zqHNQ?`;|v|SuFkpDyi5zd(q$BGXwz?e5X^^m<$00{X_a67G{JrYEDWeR@$Pc;IqrsIxsyNm@=?mKl0nD zzd(*hojJduQy6vmYI^eU+gl-Kz*PhgmoCr%6B6Li9cB(;`j^4NA|GryJXB2JXmk)0 zertBJ8k!zcF6+ndaNbnEXlXL&ZaJ{M__E244;}@IGEYwpv5{o)kAC@=oHrTOk z7kxI{ZtEc0tylN{THlFGAuGy!_B*9MK$jbJ`rVL}RP@}OaIhZu*d)<}pp>m+nnB#hpA=$-KqPTIE1;^#sd_+qh zpS;M{Nno8!T*7qG%-o*9KI-w`XIQl=U{EJ<94nKvF1UYNUxp2f!p6ht@P6E6j$J!z zEVsqE+fY{JW_25#(RI20I84rL_epJh``|_)`ltyJ46wpkzU5uVHaNBk2L^ir67J;_~ zGPl{}p6B_O&1s+gO;MEnA;U%Xfm>f|899r3O0oj4&*00&=N8NOW&>6Kj6w=f!dRe? z8_ev#l0*8D9>L)VbnIa=%SVPgBRbfsPY%y3+i9UL-0c!?7joPxg+U*3ii|fp%G-{T z5en9jmL+Vl44%Ypwd2*vc42OQ9!xe=d%iPA?$}eQ&Dny1!cAde5F7iu`RC_4SUPeR7=^ z=tWDb+j<$r>ho4-KiIAf4h9alZpY?kKkI({t1y2#A;<>rdn9)nZrrhX8k^hRU>V3W z3#-0O&}}eqv%2fCma0ON$M)f*+~aXg)93KOMk1}B)4u2AwDUJ+X%;2oHfQ1Wz%2MB zN4y)@KM|{mog??vz?CH-hn7jqk(RjBQSvzp!p5QNFJGvx5PeklnQAk^m(I9FVH zjz8+uAk}6!dVc7*hHgzwL3~n4ANa%~vzHAZ)P?Jhw{N0v+mF9kXJWg*)1&6_-k-1E z_H{4YebPyOor^}}GMdk&%qCZ&6FIMcz6ZjavzDFpKZtxXdyz%XUA^JGX*+>8Dlldi zZC7^FU565`vyK^xR;4n2zbz(9#jsj?;+-<-njvskJXQK@$+|&=gHa`H9iFmqR>zz>vq|_|*beqvWz`bx$mxd}c~P)f|i~_e15BgDA_x{vw1MMFDdqhVCwKpnehxaU$egUqjuA!2dQ>btY}1YX~`y);Ht-K=3O{iGIHCr>EN!DN~c(gL$~%>RJ5N*K1!?}7N)XFXXjw`Hxr#4TU2 z9q~$kkLc3tn`3bqyM7aCQl#3+vkr?yqt$4uIZo-A_)$N=LKJ;&9*BRxGmC@X)cIYz66W zJQja1w%+8h9vX^dJOhWTD95&1SwIdY`XcxQKbIg*Mmib^lfEs|zLk=0uM(QKIm1kI zNt{n}sRc9q)2W^x|3IDgm9ir1W)HntrrRi-E!I3Zwc3`rT5}&**{45$PK2xqMzLsf z#1q;Yxh2C)Twq+l)a+WsFnkNsnSm9n$)s42wKU2u>uoi8H<_&C8qHTcq@=-|V+blV z4R?BttV-Kuvr1?vf*XtUSfKUQ01CbBlEK6GeMEY!5hT}^=F)Gg< zY{_`NRnXzdNL=cvsN7R2Cj5xph%#LNP~oiD!=ynnUT$hwEJUAu;>(?3Vebf0oaXxU znLff(e-aSj3H#f7%Y8iQ@!WCCmj(nE;b0hj>vg4-dz!sl|1*z!?{_Q3TxFZ7k$yo@ zo~1K{`$UJk>T=}NL-e`Nr1UZYT$02zv~YztbVtPe(pyg(MG_sHJZOpjlz}SFLMFd!*JLKqayk=L^9~1A0lvfR*38RVaLN`K@ zi3n5KbVHNEG4ASL%UbNa!5<=%Yxc8)Af_%o_%2rZ4@rKN_OJp2 zUllaL%2S+~)Z#D3_^yMDnoEJy|Vpi6c2wp#E@=Ra8cz%S*lbyH9M^pw{P)N*; zB#u(;s8Qoi+F`t=s)k(xO<8zk>c_MCw`?@%A(j-4W}`IP>yCBRoZ^s)vo>pzPMrYd z=f2%)hx@Nfsd-;d#48_cJM`G$$Sb|ZILoLKc8SjU^Ljd|gMg3~!cmswkf~0Qxkzq1tUnbEUX^}q zOJplwu(-)}zH6;PR(pb2N~b+JH3!1!k)d2l$rH z7)#I>v5mZC>q+t~s_RwRENY=YaT+zn&SA2hq0Boq&2r3K{)Iu#^K0-cgAVsGIaR=@ zxEixvVgV6>?>aUHsC`sB5q;AD^x#=k0cJV~8VH5J2N z9d^b!4r(i~F1@pZFmb2Zs_(26K`^7Y0982+WQt`ry*x%^PpdU zOgI=ImTL=19Z|@qQ)XgPsZ&)^ykkUtFlW}TGG_P_I{l^Y5V`ossDFD~dl1QeE2E^% zV!o`?dcBN8!qvI|{`^%r)Alm@z!Xgj?b4fj z+_k1AI(YGq3=C@LuzUJ@ItuoDm^gsydGzDBo8hYMd_dl}4RgDH_lxcRENSU^T4o2^ zCfmTGOjh<*or0-7R5e09ipj_xf;t^m65zbKbHoEo(p67IMUYQ1hTALS#r6OT+NH@@ z%ZEI8;YxF!JlW^uZy^pIWv z)B8a$S#_f5yhrG(!l38OfMp0fov<{+fi$DxUq)G>u1`o#kObQJd-eiu=`27nlnX9j zXVl+e189AqZ~>YL%oMr=HwzRPfFnZEkgA{$4%$Tm#ISp!rS&!`iX4t-ce4`;?<>88 z$CaY?Y|&(s5lh#ueFaLvY?K;`4vC~^j#C}WZui?<>^N{QA8AjiPl%u9B#9fivGDCe zN(w8&Izr4&NTc43lSap~>U$iPP;?^Ob#w%qt0yA0Z z;Y3A!wVo7F=2I-hool5^EIo+mj1NP+#W>zHY}~J=NUdgjKKA7K;M4W*LaxQ&OmTk@ zgFt;-TAqsN26Esw2A50A%5SVi%q1$CC8qwiSfUJ${U`jMAPmMQ4Bnrc`XPln_DL@u zRw1ewSe`7>Zm$Q_rM1s+^D+;GT*yy$DnFW$wxFn4JOq?oU*9_SDjwg*&!ZR#2hM5; zD>4%%^YUE9N*I;{EO0wz2VT;v0teZ6jV!ZEDc9TulR*Tan9*rAB3Tt1d3)k%EHmOl zDUD&HH9**yG~{v#a|o7&18H+r6K1X+&~clen*dch??$@?cwy>9fa zSn$uu?Ca$AUWzIqsmJ%^9#)?o@APH5AhD)gwx&PrGCrFHwO$4)h@@r2?l+W!u@2eG zHA6Fg2EtmrC``=o`Z=~XR|mx z<5KG#R-6u)3i8(4Lf)RXY95qNUi1S;PaJJt@cKaz>)!2EMYc;#TtsY zxh5w?crz|*a(2WUu`#EO_e9NBdo=w(&QZkuvF90F12_%9cOWJ!$anOaui)|>v%fs} z=m(CW#34E>)Jz`gOAj$O+QEW)DQt{rG=+)s6l^STuwn?n69rqz4wllI(>j9H zj37>ss?T<3AoH3MHT6b|u)|)_kjHg!o(s9TA&tLFYi=J#QJ-hWP!za&nuOgCq9u~t zM66~gYFCt94a-gkB z(}GqrPXDAKnOW@Zb|v@1y>~XP$>I^#Zxmm5`2Y8HGkM%WMLkh5C<2}3)?*FTl5KS*wDWBqDbP%D7F(y^cn z_DY^9G1bceC>wT1knfJ53ysu<68^5F8Ca~BC=P<@YFE5hWK20whMDd(4=QY_)}cTQZkBsA$}p<0P>M|thCOG!6~zq zj-u!4&n=|q&~q2+&(eVl_2=}xbSxc8hc49X>5zqbRF5vC=j#oe_a86`rhk$QQcOBw z^)(B#m&GwufV>|{^uK4aL?Gfv3u83|;gk3}DtpX|SIR3vgE)oH{3*l6ey=}*%n`Na zcEZ?()I9Mdz^bp&KO7U#o{nP7{=uheI-G@;m15a@WPGDhKxYOjo8$C z^v2tVU)oqwC=2*0>mdOq_(AXknfF$>$z|*5)>hZ~|9UY@ineS&f0Ieh)0SiE*lZv3^$djdWVXGY zthUyZ-MpT(c99d8j4jsmKU^A~zNSm#hOua@$q8%n0S{l^2G!g_H7%@pY-rEK>luwD zq7mbRUG3uo@G~4NhZtWt#?8}wSkx!CyKKIvTdewR*vF?Y`3kma1>W;*&H+2qiIu)B zyS`HrUEirj*LPH~I$136^v-XVHfwJGb{KYTa}&5EH`}-5LAdXDHmA}@1}E9g4R#*l z!?B#AuX(IZTD+x66P%$C=F=3zW@07CtT!ay-*oyYkp~;8^E)c(C%@Bwqs|TRPyfxN z>PLTk;l&^H*Xeur&Yp80uM1uY^qVDnNGW#ku~oSUw@$GpOKP6DSZZez=1>ruT}|~i zb3SOY95|U1WGx_OK5WEkjAG0w1<5RVJ*=|Nrf4pI#HguNJrT7~iJF~WV`NP{zT)!| zUvd*)Z4IfdiHD+ud<$O)8}9x6_aYRh37u_(5W(v9Vfc7 zUCmwDRk^NsS9T|chZ2Cb#OTU4VlLQ~UBbJ$q>dq0Ijx0|i3uOtQ=({r0Wn=r{OAK;Hva=T*Yxz)UmRf<0zV5 z6=XZ`e}ku_@+G!i+s9L)rTqaehucGx(bX=FNbGF`SF#MaDg&8vUox&L+rw3;8CSIo zS8RUk>9{I4a8+&Ks&f)wl_&Al)5uq)WtDNH6_@<4k=7hvKN|*H$CbpiKV>m#GS?a> zE8|?&j5{~ru7>m?2a|Evr9IsBh~o~+KTE-^?o?U@!l($%>fQ{QwOBD4Qfp7gXRWIt zz)frLhH4pdqf&Xd5JzuA1rHPV!C-y>B$z9=v16D8Xb=09 z3G4#~tHWA>4fB#ztznqQndil)p}js(j1jIP2Y$#Ix`Ja|^}5tBez_Vxs}5>mN0U|y zpH~+#>|0pRTbN7A%C<0nRtz0Z#w~`?+Q;wIql*$4z42)KNS%K0;K?#W7~F>bfPqhW z5R;j8q#t?Zzmu9E2pYQaWJaH0L(oWO^#2n{3jRPt7tYaLTv|_f7vk?it)%P#c+;~B zm#&uDAom~J4<)xw>H(5*Nvck*WfSTeWHr--EMsUPHi~vG;C1#cqB?V;+=o$A`HN?-qSf1<7%}aJtB1PfReLTRGWcTs zpn|CH}Z{lDdV z*yxnDYP`W|5L)MZ#-I4%#b2c487d|{w)@B9bYya}hYeKu{K2GYDyu)FHL(pXvpgc7 zL1Ov>JVuOV&1gM^qyWtP96p64M@)pZa0*E_n?jOn82K34vZ2oLr?Z-br*RrdMkZ@y zGQ0n~fh5dUJo8kN+t5l>&oG)qdKK)$*fa6@Byl)DVY60>$mBnpwo=^Aw3QNL+6sd& zKTyO6uGG7ffzyw%*Z?Q;FUXhgtGndEA;hvtpbBlHZvGIkXfx^ECCn;9L4~` zSeAe>pTSsQz*ydbmIcZ)oR&FxoR|P?R`!1bY{EP{GfnT|hS}6JP*wbnn`E)I24N+G zu5SX&IGM4OENKljfZCAFp~k5I{-9bE*3!WSC=SJ4MMObXsNQcwouBDo;{mWmt<=#} zpHo_7sYnd$&%}OGYc{J3t9N=Od`6mD%6m<%eEh)~@BmspjsiBzzD+b-dvu|J0Xp?) zn@ITa;KA4`PWt~~jNsIB;24hqyGY_WE}lE1Mvm*x6KChL#95=M5w^^UUN}(&WaT{| zF#DNaZ<#lvXPq~@r*R)I(1oi>FC^v7-h^Kv(=RRaX7sKhv>1iWx*4N0JvDl84w-i}ItP}KqlR3NWXnJGrQR!a?51w!{$o|D*Gy_llAXS_G z33gN^nPW_aE&^Vw2x4r64V{trM3t%}xlmPr$ZgGwars)5C@Z*2$hiLj=mL2-~$-BB;$es(pX0U*7gwdH>+nK@w zM!1#KZs0QQyNc%lnE!bY+PF0~Y_m%yhh171v-EmH@;ZEW6)>2u;F=o_A6 zWDe4j^h-RS)jU-#k>A|I?`JP7urO8Jr_u8 z+d;kfnW44ZEoSk=;cx>S0q|#+8gLZH;phmmBP*zRVYlRZvtYL<&3U*2_GCsifV(Ay z?~#;XJW^2L(mHU6RX=`#SC+AaoF9dZq}a3AAQmu6CH9m8jP0dFPJ_K&pTA8stZKDw zI<<$7<4LC z^%WvHmo!L~Qq<5*Rm7Ne#@aJ1Y#Nc(#;58_M{v|4>LXjI`8kGc8K{pyW5;$KIB-B1 zv2UL~_Ki2h@EdP1e!4*_m!30nb5r8Exg=Zu&vJ8Vn45drBKZ`5JUOS?SZ7CadQQu! zX306tY>OnA#z@a`6gH>l{9&$ke9Z$bOA8T|T_#rzx@F3!F@VqT<>do!xpLg(#tvmN zJwK;k@tQlIO79X_v%WB2=BQ2cZhUXhcWV&6ifhB0=l;BL^rmXz0hjyBo%;0-J0N=4 znjp(tu$ei)KOT~fGZovJnv-$nsX3YPjw7!xg63_#lIBn6J^X(@LWhazR&#W&W2w8- zhUrNEN#ySMx?lgluNzZm^@`WW*UKIu<#20hES+)2b+a5_>-j%pd35=-m5V338fqgB*^h0S-TC z#09M(@u3l!5!Sp|z8`7^EIR?17eg?9M=J=`EIgg=U`78~j%1)wF{PtVp~z^l?(ynC&_uX(nCLI69RkPo&cXcPP|)9vyoYGoP+&Orq1|?D+Vp*7J2+&7!-f zvG(o>ZH%HbKO5WGe_;Ew=tKj*PqS=t4tB-+kxR0|S{iUkX6Q78=i)fAc;?uII7cT_ zHW{90Ch!ChBM-w2X<*md6pFTfCQw0Q+4-|4OuYKCJ=*9x&Pr&5G=JyyA~-AM2lEB& zxqroE8Sgh^lY$eE|HG%sv!@I3+7Ixw?I}KqXNRd;l6VfF@%X<~rq80Oxops|)movt zLU7=;Rxn=(>)bV;#HQe=_z*)@K9?H-HKQ2asm9|Pn@das##0}Xhw)9JwZ{gmyFk1> zQFJ=(@+JL_>4zF$dA!GsU3=Wv{qg-mZ(2>Q^n8caC`Gl|>G{-pt`zaP<}rNMW!%1~k~A=kX^ z$6Z?);KlLy18U$U2YdH1$Cj{Q&E#V9%R2h4FFEtnruyHyTW=OfCoKAg-9Ns& zc=B0#rG9tRcy7BGJHvI_eP(<2#{1z9N51y5{)lm%QZ|!WOm`j7A~1$zfwbVB#}lpTnk_IN#firxY1rVk@<1Jhe6fayS9_vLxUQ6yQB$ z!UKPLPu`5f?rAgbX@G+Fl;aOcIUd!AB+Tj~F^dva7Z-}62A`H2pA z$L-xXNRtqHfg;Mbg@D;(AvIiR^MjzvaRY$>|v#=oo#tYU-oz~aAa@Lr!vuBMR zJ4^Va`>0v7Ms>e*=G9Do?Ev}J$ezcJ{@MVeheEQ1S@peUvW7xQPiJ=|+qbR5iOo+$ zGXxcXZu{?zpEVw4{}7%TWHZ2nec}N^RdZYv+4yrX@H!^K2k-MoQj4^3hB0?W$cc}e zVRfFH1y9TCs6xoNRP%KxUdL!hA9Z^BIo|^pfKB{9d)D|LBw9-Nqa9hNgK7_BtO#^? z1EHlhC$o)lJX{vsN@6GcVb)#Z3)QET+HhW6MNk#M+{8ygU>ee!0~pK@&>YCfvSJT< zRLH=00(K2ZA+(W@M>-}w_}YmNzIGZ9zW()#moMkfkV#<=zINh+VfOxe*#2+u!PlzY zRqwDnJz!_J>Qh}Qo{A{EO?p}~oMTT*cBJBfP`e}5mGrdae3t&?W9JKT^n8maMH?f_ z%yRQ+S5kYy`t|E+*0ENns%(ta2RJWco{>)=d9rK z%Ppy%P$#*(4Bb<1AfGvm#0!Ns2QJQ(@ z$)7(*z!>pS@3Lm4RB%%?h%t9`yy6^Xvm!@XZJeWi9P(;5=O~*?b?jHIE`0RRzg~W2 z4C5%XtKMR^day+i_ywQ84R4b;3eGW(ve=QM%oe*NF>*HD$7koa{~!ZEvy#2@{TKQz z8C6g_zt{DXKlq?gIPZY?*!t_AKA4w#zb|*njO*F+3{H;X84m%UyC9Vs_`9|x{*oTP z0A@`_xZZ1(0Siv!0d;4Gy>y|Es$~qPK8BkGAdIfMiK0FTCqBx8Wx|+wUdd?0ooYQEC-#Op;&N=t{ z-=n5cybOS6egU-(BkDRTE_Ni_+QTyleSkTOzB4k?4ET7iJ1U_Zy#BnO@PE%NQ{JTDt*W<6pi6(`L; z#N<91;0W5aj8ydjhdqF#1ZujnH=|sg=}jjvK%Gpq)JV|=b6JOb&WZOR=59i=#v^W$S_LG z82kPa8$3U8QNE!{&tupshF@Pri+;g-g>lB|y*u|1r_>x;k$Shj*Y@+W=V7JGo9WPuTl2AqQPmUMV8S>f$u;ZT( z{~@LDyRqX%60R4M7?kJea{Imn))VY;y`T@tC>@_v8vWEaA?q~Gdj?S9 z66M?ZN`8>{DPWnvvV!Fcs~l?!8y8y`+Z*;I_B|Xf z9J@Fkaq@7gaJq5Ea30~B!gY*WhkFwD1s)zAC!Q@lZ+N+Q{djwLH}P@sx$sTnyTxzE zzf3?)AWUGAz%D^Q!3Mz%g7<{DgcO8=gi3_^gw_e02+tD!Cz2s@PE4OPgCBa{6R%bB}S!A<(F!oT9n!$ zwSVeX>Q(9=G<-A;XliKg&|=c^(3+<8PFqUbO}k8cgN}esm(DX?7u_X#26_wh4(WU7 zPtd<%pkz>Gu+5Oku)>JXXo<0t34_TB(+o2OvsLDD<`XQqEP5=qSv;~7vh=aMVC84E z!kWc8!}^m=g3Sb5A=^CLGj;}cv+Um3d)e=BuyDBHsN(47I1L9};<(B2fa49v4^ARZ z7EV!4Q=HB@b2wW#7XaZD=UvXbT*_STxx8}ia{b_T#k~XoeT9%l009610O|ln00jU5 z000020096302TlM0RRD400000c-mc2%}N4M7(G|bkgSE2wP-P`RyK~71qD$=MIbIF zQHTg;bks;^VjPQB?xUw_RnP;p>Iqu*0G+vWHOIMp-#y>|x!(X-6A1`}->i;&5l%`J z;S};yoaVjGs7PgK9brRZ!K{YD2}Z6JPR`&I&jc@^fLn#rtawp)5yyB{ID;%c6<)$B zz7)=43qJ}k<3{8ZUO`>B3g@sX-V}aEUVJP30Ym(bP0#iMPcxnFsBigN)3H0A8;o%d z4=w^|Xv5-d5w+-hz%zW@p^Ki3UeM>!>cgQ`hJg~bj zH9O2UnPpP~ipH8DbvQR*4~sk;40!vvr_V!YZn_?`;*pT~0H@^a%f11%B^gCCLzV&k zAr3|4H!5{I355=bP8WKu{ajdU`|B#Ufv$R&?_3MizAV%pK34s@gwo#{eXy3w5; z^rRQP=|f*MbPOCgaZy4kWt7v8{tRFsgBZ*Z+;|vD1zvm%V>lxi$tXrMhOvxeJQJA6 zBqlS3sZ3)!GnmONW;2Jm%ws;4V&Moav~rz&d|(s1*v@{Aaa3a1#yd8$SFB=Vr^K>_ zTfCPz4se{G{NNXdImvzQa*72kWOw9K52@xJ4|vKW9`l5c)bN65Jm)lizO#qdyyO)D zKJ%5W)DmP7i>afYgEX*&MnW_ZW+}^99{KVLR?^HW*07o@9AX`7S{a z*y^uqZVc$QaD7nIvd4>s z17lA$w65%H%2Y??0pqF`)r8H%!3zZkU3}(l7(l+As$*VOW59WjF(i)ksC9`~v*B PO;!K^00B~a_lf`jd~vB*tTuk`sL=m_xtDDs;6f9+3uO%ovohU zt{QiFF)<)upr6ao0SNWKwnOFry7?!k3VC+iIIAqIXIdA z*c$x%v8(vu(RR6ViQP?&etb5x_~9V_!%Mu9_fHG_1E5LXzcc@}gSW{4y$hh}f--oI zbz=h)1A|>qBXkHP10w@70}~H~LHdA%$UU+FYr<@BngJjn=rl%AWXmzF>pf{jo4t*DTI ztA!D+NXkgASLTt5OCIn$j6qT0HS45j$8MC#5^1z@XVS)9{^i2I=lTx&jdyVg6%rf_z!XIXEf$5Q6rW+coB z{t1zJLF2ro16#Ww`(;|Y^CqW6vnNe%U6h`&WtuaaW$rNUWWPX*X9_ah*}}*^r5nc&I2OCCReN7Gfud z67M&OV>feNN-}s+kV?`~l`*(y_&;g2BvljEi!!3-qPWH6W)nP3c|Kxz6VQ#<6dTgm zqDWiTWQ=3m(%LH%3d6i&$=Zpv+OvqdaC_p2`g_>n34#nle3)YRrJ~f3)nHbdFL3-z zz4%fS(h2z??|T})M5{Q(kgpyH#k6<4M&yMQxRQ{}hImTkb%EndC+vG(=Y}#f3g=dZ z2RYvp!E)oNorYu6s`f}$cBAmmB0a#^i_oZvZ{}D_LzydN^L73*-eN~Em%9aTrDyH` zI&A&TU6*=eN79qXmJ~+$=*ZReM-&%;Gk5`RLlJ|F73u6iH+`U?A0o{Nk?t5XVhjS~ zh%PNRAd^BA_x5`PL`;?J&oD91fP{8T`Yn@QU!@1UYMB^n*4%hYYyB1*M;|83-w$If zIyvqXfPfdzSM2Vof#$6un1(-__NdXXb|2>SnYo|y2P2bY{H7R_IuFb{LhYg87*sSWS`5x>Gcd3>8XhbCQm~aY z8O{Cjv7uV9t{CUERn2wN{a&yHnih2i%S`-vTB{ZXb3vN0Uo6@T*2M&kZts?@bqhj) zH5Go7R2L&v9Y4)w>AFy5D3G>S9;^FZ^%PBWW_fn|xc;cG4X&otHlclz=^>r2@mC0K z2J33PX7X1C>-vldtSCklZHg9ybvHpHA*)3&%13 z$mT(Y_vYQZ^A30X$Hq}x7WYQ%Cj3nZcmC|9azW@9mm}AGyRqmIw`L3tC(XA+C~i{x z5hZ6#T~$)+A$86ofxENw6~oO~&j=;fQo~#dyF0~=ze7vF9XhWVY-ik@8HM?u_L~Fc z)Jt9>d-pttp64PS_q(Ji`;qSAj`t!Tl8Yl~Po)nq&GFhKnewz5PC#bXJutSR)1mNK zGNx${O`2%dL(pVuoyjP~mn)vRk3@+C=ZUR^GECsLC%O3b)gRjQQA8%GX*o+z>HBNE zM)@?{HDdc0i=0K*T3vWc9xieGl?$;qE&RQ&y=%I+%$ICZ#<+T8uRCPtRj)m zd}b9iF1zsOd6DBg`DWqj_G$<*clXy4z2BZ+U~;c}`ADJSU5Yq3OYz@|WZjo8PwknM zczj)RmrrTI-#V;MjmnO_zk{eC8Az{xFfmsez37#T}?=_wT-{ScFvs_aCsQx(&WTa(RV{-3Lw0{CGq zmbxMGbtE6LiM`Cu%$e#YI9~4)WA18!ncX*wQgj1~>VPYqg7+|$orLpW3r}u!6}ZZ& z##n;U7307$f5BDO3@eNS^ySmM=!pF6!T+~k8zmUe8XCAb zK0q7p8W;daF?@c{03{htj>*V_MKKC^2ykLBOaTuHDP)E2frFw36C@0c?!p7H>Ox%n z?-*u)-rMNF)WA?zj7mmO&_bRNv+mJ^;lThndAfN|6{$dai7}U$~2Cgzd+tG+WS=oPn85o!a7+^z)K_Gd69x?{K(ZFE_ zGyw;LB#;4_FfuwY-7+mQewxFXRT*#%yNCx$3yKIz3Camd3d#!V_el}y8OHvd2Myby z`Th=`3OZFV(CEbiTk6Xv%qJ`$1YI(OUg?8c>>KTU{eJnb{mujM-}jvM?C=lw*yHE; zI2`u%K|E&RaZ|}xXNdi zQjCDrwDwCn7y;Y5qdo)m7VU9+2xh>9cIV*L+Ax_2AOPj zf4YG?)FA;`RslGmu7((+pNtKR3%nMH4Tu$p9f$)c3@GZS&ksZo#PGi|HxLgH-%nP| z3?v970*?IO8tN)`%4rKCZNVdg1`e(ga;~E_W+n`K7l1<#F2x#|aEBg2S+YANMmqu% z2n342uh(Md7ZgckLl9v~_z3Y4p)gxV{AJ4v_P3Yf(m}2{q zlW1-PdMUcNh>d9qhSah#dmMND0Kh_I&!uo%0Uhz$QzBx^atCTPl>+8IVZuH>2_Cqy znphSd7rCI%h`^4s9i@YBu%|jE5Z{BA_KX-9iZV*}>G(^O0Ie)1Rugs2iF5ygnUUYd zSOk=LfKHY}KQ-fftsX)lp0X0uJ zd;~;twE0%jaT<8!XQRqNDCA^Z7|x^3F+h>Pn*_@6KS?R>gRo|Z;l!2Rd^Y*UZ=P&f zTYJ3zS54eL_a1C%uGi79bu45fZX?P7Xb~YX)weL5nJyZLL#2-yfs}xwyLkUxsHW>x z5uWsBR|G~iC@;T_&HIU{PPO*wXd!p|3ahet%Bc^%>FyEGKlrc%g2sK>Gamq{z;_;+ z8Wsx#FkvrtukXLj=Mk0vF!>rN7JBNnepdJ=vj_hE214{$zv^=LL0fv~_nU6I8cVZb z>taVqg6|x&6T(FQMQ9}0S6TKu%1c$jQXRNH0AyYTZ76#6_xYq5FeL~Gj0Y`LlzN|@ zai3pmL-Nf-4s+*P2vXb2!`tS{O?+O)<>jT>an1?PiRZRF9&v&)6tdX(32I`7M66h3 zkNF^}r$3ZM)W05c5wzU|3*tX%cP@iPMFeNq1s;5-R4%C)9We69Fpnr53O!nU-?LH5 zMeI;INt59_J)=~ecJwHL?y5Mftl}ntDvC>&>SNnw7G$bQsdw|Fk%KzLnKV;bp=!vo zC3LM+tjlh&%aC1vsZX=~&om1)Cn+<}QB7G{+BIm(-e!VOp!b+HvRfmx`P#PH zM7_X!-M+k@I~Zum{sww!r1uy5&ipnr=7>Z(W6UsT8!J6YkieK^-j|($g9t`Om0>k% zS=-}~vPc_KTq13Aa+3BDjFp&@wd{_$MA0XtlD;IF%S?|a7&CHCHmc{^C8@x8U8?HR zGLve|8iTZvvy97_T&^hTCLaT7bg?>fScWz4I`de*2yE0VH4AQhPBE)aP%AH}a_vF{ zJOkaSX|6m0zFH|g^36E2LuisAuP6MdUw_%nn~bpx2zE4%W!XLK7H8q=ar^X*`E{<| zuzbeu+|}Ro>YLW}1X-Jn)Fz|$$kF+gQqlvae0b=eU15mj=Ffzv4iAryR+>KKud>0T zcgyJ?(fZ{T2J`Y;6=*{8j1U1Kdxgms$Hnj5EG}vTKwe(~VZa@L6R508e+RVz&G#VY zU(=W3wRidMbnZ@uC|ep1x&(6>o9i&TN1H37>* zb5&j-R+yz`VW^qF)r<30?;u$OYUNA>ubbOtZjP*LhdF;y^b4)rDu-f4CK7e8RYOW% znWXk;kVtT4q!A&6o?K0jSev@(&>*%a3(K3@YO2~K?n;V3?#tL^89k#ftWQk}j#tdn zyE|UDUiAF5VQbeZVF0lcdHT%X$y>G8UUZ*HawpUqB=mCa2L*wpwEo988?qNG+WbD6 zeqJ+KPu>7c1z3ErH!O?FPsA|T@y*iOz( z1w~R$DwgmLrw#SO>6(Fg^jl%<97!-NesK{%=c2XIQ&Aj8q)%&IZ}l@zya z%Cb>HvI!Tese{@(NmtzO!q4G!v~Oz3`570c?FAKX{u3*LK_bSIm?^fDa#YmJP%Ohd zLc3ESpuIYLV1yls*M6O?M#T)X$MIc?Wsk*SJ(f50cCAXJ^Rf7R4MmL2u4C{yI8|id z_LwTmE^36>2K2+WBL4eoKd~d=G@l*~BsrPt*^f}gN%r@64iaqby8XNgw(|$PZPey4 zpNtC@-eh~i1COz{9C4pv=WA0!Cv}?;M;_+8{1)qXMb0KsBh|4-mT*BKQN(5t#ojq~ z6-ehQT?R$Kt9!$fn}Q8L6Zq?QyIwebq-UEN)%j|zD?wH}a1__U>t=&`&1BP8i3MERG#t-aR*Vg&y1d4s-O%bJ4+6yPZN%cZ&1qM6Rl`<$%`5! z3_+Xgdy*6QrC<0V)Ybp>QHAcsM^5dsO6~7`l|9>n`Tf2Q)D_RUq{ePM7z8ly+G^Y@ zu^!nG6pvYRQcYK!2abY}}YT zTE+~Snelt>F9lt3>oJ+sSxqP08S^9Gu~z8#^*1*thc8V>e4osOUCK6LwFQ0RQD0C^ zxk2EDJf{MVoNk{;#g+MQFvKj+WjxQ5oIA&_>vN1e&e4HRu z0SAv54p-I5)TXkVR(c0#!^?DxRS2=oE^a%vJJCnivG&-a%Wm|l!X4&l&!FQM9c@OG zGln1FO{lctGCfc2_xqbVOqWG1&ARfbtLa#oMl!%B3*d2?YQ5Xx*$--@KTAtl6c9iq zHk5g;9@=G7B4+@m3AAQtJ(wM0ij+{yw`SZh=Oq6pJFWztoQqgi-(&WB0&{6csx{>L z@5_1Xrar&$j+Vs7*#+z=r9(XPD9;#uH3U3Yh^2zELZMP46(n)U-xLxxB)^nhB6P*w zK&9qu)zAPlVEj}NW-V$KX*n^v>yg`>?Z8FF%b8MfvZ= zsi%%mfZ%FJ%$(~zo+s{d``<6F0hqBmT9Z;S!|o)q?tc6f-x5ZO@b`=N9yK242;I_j z29;Z=MuQXdJy#YgwgW^}vL3PDpg0)NWf%n6t(aTXaPXnG_Ibhj#Oz z(E3HEL8a_Ar2XU*y0!xZzVz`l%YJIO7ZcO~$u_f3d2nh*GQhBSaFY_&9Le*`M#_1x zH<0)wm?jYpQww+G>g_NcBb}}z3a3t-me$d41(KD7j4W9z6Q5nDj$mMSa#xrk(I5)= zoyE@c+gSHwcJF5v|1R^H)`Hw$`s>}s2g6rF_FCf378*O{{slcX{xv;f11vWY03zC` zxd6F`CwbS51#Q|Mn1>@wL*KB(h!7^3UPZ+|&AK%LGL-fS4O&ZjhB)$mq(FWdT(4DP z`clS0A`aCK&3Tg-LBbic#4;!NP*Y3Qr{Z2R`#D=8P8H|v0{nX&u^GvLTBupF=or#O zw4x~ZvY>21>7Tgm9CBy|16EGU!3ZQs>JQC2qfKcHCe0C^{~iNxKMXzS~4rpbE4>vG1z)ZJA`pnK-Z?IaL@2M?6Ws-;(B!4-7 z=-oWw-{{i+NRv}g6MOD{+2>x-J3wnZH%5FuqaO3`{;K!0pwNy!Vle-;A&q{hgfiBv zc_JAW6KIR>nr=d9<9slD-p|3b(O;5tHCS$?m%ssJ+1zh~)nMXe&frEp7L4tw44k&M z>0rXd%;vm=Y(SA$4v`<41p5I+ikuyUBvzhc(0R1jwO}rD$QL?jKFS(3#GruP{s3G`iDt7(RKzFB_l1!M{KrBQlDrI8X8fuUzhCx5heex z^rl=*ND6)GQ|1pA3|^G%GCe+D&NlU?v>;2cOAoFEB@nG|5f;m^3CbNYYhz$IjmM5v zzL{si3dkbO2+<;wm07PE1P`coJJ@N~-r{{ghFveCn>&6^RY-EG-xPw%by%7p^sayO z=@gxesf$>JwH-)~^7Xuus7JM%wOer_=syRPZewnZAUs^TeKPs^yQ2v+xty1Qbyj{} zhQ02S$AQ|e@)05ZYNehxfag^FvMC*XwFp9ai(v_E%cWj;@w#E!^xk z>& zwK`LyD*3~ULJH_LX4SA(Oo!C3R43FB_jr`O>cyjmgA?XaR8tp5*#$f~;*N53gSA0{ z`fG5Z(%;RF8$Cvy5WHS34^?NFRa+ak!-w$vPuq8oL3rr){12CQjxX9xBHt@{9Y?5L z#e_ZeWxI`6EFbSZiZjbc{#97+1LFMWJ@vfIh<^Sgp_v3Et5I-BN~00585E!n34zc8 z2}5&%WPu1GubJ+B-~q^ks<;$TCNu_r2#T-sm1#+3h{<7ARBwZp5d`9`5XzF6E3jH` z3qA9I1U;VWfkJRG9e$HmM;j*+nRL7+yn<{72828+C9jBg2@fh8uf+YL;313v=Bn=m zc$F*t?b!-`v$0U|QO_Aa&O#Sguqh$VE@vd$q(<{Qj{b_XLHn4q_$@*pgf>)%n@LoI zq599f?N8Xn1m&h@Ox^CXw(&}2>Ex+kFqsr(^;OLdo`FjnE}JsZK`swdlt@Eh_Z%)N zw4`cSlK7u7d9`~W+NGahnPl-N6)b?zP55D;Td;8c5BMf=$pGNCjU^hD)z=GQTx00Z zn5m7>La~B9Bh*S;5<7a~ogbdKg0-Fp<-IjARXU$dyEL$LI#;t#j;K`Q@`?hrHe#%JbJAZnj-}x#tr8D~sa2UQ zCZ=CFbK*j%;`th7YA{lYg{BNQJ}WrUr&JZE=ZJs#1%V560!$>T1MF2?L-Cdx|9IjB zVAv^X2#AE*QrJO+ZT@Z(1$Rs4p5(`nAiAU>@X-?}>u<>nhkh=%>xh^8`*3PYcTqES zqGt=u@BQ`I$=j{3_Q%^b&#pzN`o)oR zWmox?q^u~rM|G~_O#kgAkshkrD59@uAZs3Qu3oU;9n?FO6<8)wgcTXk;K)UU6s51t zDk*@)$w`XUBA}7zA)z^CiHT@gQ6WM8g1V7;jRDlk(4GtXqu*-{UKR|SzJ#_I7h!OM zZ#|&bu4Ii*pz7Z`ok za#S&+aMVZArozh1I=5=H4wXy@3#u7#vr@3_9Q0N31ef{A{M;@tgJ&0j*KocZzNdZe zZ;iq{;qQj8X7`_>kBRHnJ2&y!^~&{T6L*u>+17U+)C(CG0yvFW&ausq+&KQ;v9lnZ|!1mb0q$;pl@S#_`eGom8q( zSoxAu{L|q86#*IO;t-N)p!TGscy*zm{Pepkq{Y`w<=>G1~M zcWQq?5y9Mw9?IXDFjwcD?9)*&*_VdqOJfNH)O6em+!!rUF;(jesr8K5%JnvJb(p#~ zQ6G3l1{5%Dum~KKJgCq(9pcJ*z5Zq4;850jW)5BdAbrzGd4-Iou&_oTS&L^9ikeqt zC{v{G4dj5TP!AhOg|*B!;KZex10Pxd+sXutdjL^=}Phg#5}MQrA<{ zXBR44YdGFp5z-fPkNKkRl8?06+#=OP{JtRIkL|Wf<`k?{&%BIY7@wD-p6jz9`@1bC z+eQ-t#8nkR&Xp0MTpnFbI}I07J0m3cgaO))09E(_Q(xZiY?QYuN&vbB0j0=!9Pg?c za#b(pXMd;N){2{cn`Lp?-qs`G0(5V(NjKhWQnD<1cMS!*1B`uRwB`&bvGs>ZFjfkw} zAJuaX$cvvAzMDmzPn^Rkr)PkJg=^5CJf`gnf3zDiEQAzrhisFW{y^$zvAWqZDY_CS z2hv`aJI9bja7Q@9J4Qfv|y$_*mx!U)&94EfNlU>S47%H z&*_E*Vpd1qx~07Dui(9~oSvjNWS%1ZhDA_EwB}W;8O`-5Z*3qlKPoFN;eBiT?8<5? zTDr-i>UHN2^PaEwc0bVEkL8U{w=?$9^OIj}ei)tl9>-JS7(IO>j_T4~A$)j znW|jjjN-b~=vNS*GF;tC(R8c**ck1&C}F$=ebIcCN<<*6YW z%*>b@%vy{BXb7#{1g7@WwE2YhPVa@W64Fc!(q;PVrP&7PhH`eh)h?5y7v_ES5{_x$ zlAD0E&uVG`J~r>h3$PRC(o%qWLH^X~H;^N)@q&atbN1#E|W75)M=V^ zE5&b@Q#4HJnKw@2rkzu&laq=VafOuIG4nOh^06ssanzQeZYX;kAS~ZIV)t z&v&iw0G>^Dx~!=gwH!~=5v2!=AcZE!99wx3#|oPTCdBvN>6&Ebq6Akb!IN0A5Oea=)m`?7G6p~t zX;yJhTPvxYm`x>lDX7V2uRLXW@7mebuH{y^Hdb-vSi$)8T#xKItV8&3 zlPb)C?r*J`uxawBUU+rc`Ixw}tyQ_ntAceaIN0}f4Z%RI4nz6C>hBgVCz~`AX3?r; zFGQp)bh8Pv>}95?l(yPJ<$?_T6JS(l8iMO6C>M_Qekx-1P2qd&|MM8MJucZ~jp*w` z|5cz>ET%szt%k^df0RD2`uS4OH$U9&jQC<~t!jUpADe=mG=OUorz>pE$WR1bmP|^$ zAc-;lP2V!>)mM`(UvM-3BDYK%^ku>;`vRj6%Wa$v9Y4}vFSA#?I_40M(l&xZ{$QQ_ z25fI2tj{Qxw3d{~sorFam_x?kA<6%4smAv8PyceDt_t6i9to)vHxtQQ{xs;J4eXjR z>rO`&7j&?gGnm0~nAtI&Il#t4RYMDgRzzH);048TEYW+=R-6vG)g^VNhMv1=z8ACd z5zi%^3H-S{&r{jtTDHJw(|(Yha)7EKQWNxU3fiD}75%a1@0z|}`3*xC);lwf(Ta^t zCi!LLNcw2~zBI$v{_u5w#C3z010q0oB98$>-iL;q9}~l`342xJR1brrK*m4;HyVo| z8j1rX->u*+n%<%XP+XG@5dz@B8)4G%(0@&uqeH%E7#=X;z!;Doim^6ebL6P5C{*p@ zt5B$T?#t`OydXY|$431?8(EpZ&*Dh%RBKFfz{9V*#miG;jG&=H;6mAJ!Xm3Kj6Sjr z0FQ=3q>3=xmk~j5#G+JEB{sr(%!m?)#=OFuPGTQ>H_SYHhz6GCCxZvkdr1D;{4O#B zClTccf+^?R7aYmKPASmPJrvZTruf&MF{pUHh|88y9$V1JqlKR6R0$FoaBN({_wJ~e zV8i#ifhm(=Ovcb8z_(AAa3ya6#Z(?Hc-%pCacx2D} z$~u49>cwdma*zfR}S6-;?v476(p^F;9AFNGSEFbj2J#Z%NUixpy=o$RN}`sEV63kRmssyxvfL;Gj3$kd=r&~ewrS)kui zZVsrM4#U#n&O)+%EEwO2oe&=Q%X>n&z%`k@>w6|vnQb|n2K|>=ZG?zA&$CZ?5_0Ed z6yy7bSM*ClxBp`3METb`<>F`Li*i=G2hIJuDFdK1P=5VQ;?)0;q`u`{Vm9|mntjMH z7-)BNYgn9E=lLpo!mc|KhP8HPtIc-f7`xLtBzuG(u6X*l?!pKwJhc~@~N5cy4xe!A`^2T%Mp1=F(c)aQdfq!Wj>Z9 z+`5u*r(_j$?;Pu`nY4^x6K~&(ac8E+vvup^zRSy|sD%$d51Ey2wfCwO)aJDIHGO-C zR%3(4pcl~k&Y%t%LG*jLoUODNE0BNWFV&Go-1*D^JeB$&2pb~MgIvumxEbmskA&xi z3oPxY?h7YM>g~n3LQpaa5qQ3dyUa$ zPLax9_VGR3s!fyESm$m1seDy0`Crk(>|DQoBy~Nuc26~J$k*mxoOa57x9j(eFHGmF zx}+$*E_UCv(E(Lc@>Hn|;2J=j!8P`ss;VoHFi9ggzqUIw4b+x2e!|du$%9CG`X;*2 zJLeLHO^D!PPEf3!URs6VNp`Jfm^_1S>RX=*Tl?v24p?t zKxMEfL*GM}9a?!DObAaOZM0FE2gA6@@2`kum8_W!cmc+}%)a9Zvdk2=BE#Exy8AEgav?|HK{v@0X@zsO3( z)EU#Xy0-|7(G8&y9g|XEzA%vYm<=*s9l|+eg8FKevZ{oWlD@Pe3i$PFfyy!LRH5Dz z3nhS77H$?fosUp8T9+q(R|^})CbRuA^wxMmPa_@JsN|*E{ql?IS>3R&lm1^9iyPy? zP4n%CI2PhWU+hN9C|=p?q@*A@L%?0aYwVwvuEN-u<;YIk!Q&tu9@fHA!+rL<_Gw6eCzGbhi%psW-E%lkz5A=fs>;@b+g?=Yv0O|e zv5*~z2oYR*4|_XtDIUZ)1BoW~SX)yVi?V|E1w_{_MHzKhhv^TE;m)l`N-W1{ItT5~ zpZ9Z_^?z5bbJbevzOH3bFS!tXURIgs%_MaA^SF(FA#Qe_ZVc3ihgkP)+%G3vwo}%J zhg$dezom{CnI{cqRe^HuOIAg)TAJzTyR|jayNEOmamC+SPkYjphgd*?n*;Q)$kX8jh&;lT7Or_0iwJzY;{oQU z(dq1dcT2;v`T}a*SGOV6ynpIk zqbT}MrFqH!By@)M|Kb`(C27T_LY2Egl4k&*U5D3mOSY0)P8i?1so2(tyn>j7{4x#6 z9jrtSDADT}lJ|C1Fp87z zr6*BmOo=nr^ThQz>3Gml6b@85vCmMsk94Vup|l@AfqUS1cK5#ZURotxN#a+MC+*7? z#XA%Ik|(%X)i2KPq0(_LoL%~S_eHFDSo!z4*;1MqFv&U@eQ_dc{_-ZjKytEXU1?Jt8?JdPI&evKKD z9sXXy0Hx%=QrWDnU-?xfwtB5ctvC9{`r_Xi){SQMj^C?kJ-8mlu_QLS^k?3wxrsTl z4Mn|~v^{>`T->NHiGjV=P8~{j6ZbEQ&x%=%W68&SyMAq+K|T`>r}2U!rn#_T7tQFQLmySS$8}=jTf-+`hNAQY@-BIKvfB9=LAPfG+AYo}0ZGj^tv0G^ zm*UCCe~!LO=k9%Nv}qmiz}Hj(iKkic`*Hb?m*0wZiHDU9hPL|0P$89E5cIZpuJWeu zD(|shG8$Uyg;=GHov@*ue6R1u+p5AXqx(4Y>uMOxhA}1#{|Js~LFKDn+p~oFz=&Iz zU)F8@eDq|0i zn76Ov&9{*#f`KL04~#EHz}RF}>BtvglzYpne~142Yua;?{T22t?(NP=f}%hljkHqe zoql{cc=JZepc{1mVJg~Of8bnn;mt?+b8d0@Ey`P!8nLU>u$wsB-pi+(Z&;RN$0elO zDoVSNNgJyQi%pl0SH~;IZZ?{YY!U#_EUf9YTZ~zwq&|_)336VsKYN}g>52E|qJ9{= z^QT!?SH=T44iUIdpRxO|-%xkMHahZ4tw`rZ!;B3|@z!zDC^1LiikzrS8KKU|n4am% zJaS|=+qkfq5WARS>$It}%FKt=TSYXO^ zloO4rRiZNO;HyqYF=vwAsDS{EU&fy;tp|%{R%V-Mp45Qtb{80^3Mapo=rQY8q zLi&cj42L!J{AcDH))RUjLTH~4H@Q8VV*K;clcg`H)TfERsxfsfFfc+b@>a6-<44bp zxtCAr!`-%@4SN(uC5N!N1MP>Kd?@2n{LC_YTN5PsCcx+rAex;F?yZovP+qX5y_-=q zT2Rz5BNhxSyU9@CmXzRdb-!psRlwW-ljy3j=kuq!fiZIa@Vv=&_w}Gpx5IP$2@7Fc zz3K+wAKAHoZoi;f&T+rHbuR251bv5n%mL+}?dyo*nZ@%k02ytOaiWe-7&-7zl+TLh z$gsUgoclm}GOa^B&Oc60^v`5)xB_oX@3YqiQemrUVjc>T6hTf;Ug9-Kt)%ZY2|;ZP zAwxbBVOp=G(=exZ4+_NZ^WT#HyTNn_*U_AJT{v^jd&bsfE^#B<+B>7%fCCE_+@CG( z2m6h-yfO#44RSIh8$F+i(K;qh(&^7ROqN6eJxvbGOD#Z>u-hxmP~If^q&6el963&U z9{@llC6B3~-0jtjhWpM=jW+OtC-z8zXyG;TWzC->ePe2B@aA(WGL@Sh$c+Z_<{hjw zY)#4XTGa^^fj~G?Wz~z)x$FPJ0_py2sIY_Yk~ex5=#B{u6w*?2lO2$hAp~-=R9C{8 zA?EL>V9`V0my}NTr7>=43IlPLMY3|Sy}BmL`vyGpdy~g~uVi~8N(fDLUg)pAy&{V9 zc{?39mG^YLg>7$wrks~cFWb%=dltlmM$4s!Ud@pykIbB|`L69>n0Z^D#QEABjMP%f z|7&%*zgm#L{_?jD z3w9%ysvvM*B@bYe^g8JaCzdXZ%v$Xvor7=a?X-Zv^GEy=AP_>CoodlKya(g&Avphb3&J^4m^psfR)?!`*FLY-y|`hNfCzn*#MdYS^9wOJ?PfhEzSL3Y1wtN#dxIywpO z`J1#OLv7aSz0EHZ!1vi%6(|QmyPL24MEOqtKJ%{PmF4#B8!cLYk>Z$aHuJvmy3rZ6 zZ~QW)82Vy~SaBZwr2c68PKZ;$`xO6ZeFoYTfieH~{`5ZYNWJ6j6UgMcKKwBb8r1NB0o17~Bp9+a1~D!BF*W zk8@UbeI^|>x}F0-=cBqc{dv{`b&% znWIH%=GUNM3{)JMl2a8A3TmRHVIX8lqp<^2Q${NyjLf7qWh))ER#UZXUPSN$| zAVrX#-G#cK5O8*>c2dqo+DPJyw4fngqJ&v%!M+J=THFY3Za#?My9`(gpmLkER_Ot0 zuoUB4GSh@aS0tdly4^*M>%L#@qx z-@kLytjgms*{?R&$<6k=3J7aRUdY{crV>tvTQ;9hB?*gbOE9dBK9cO$mLYc|5|c_; z%;AvG<5JEp?$p4dmanKa|9mfdjddWo)YR$pBdyml5VI_YL6mQ%(<+PsZavH4SgHJ4&V-a4zSUfDHv?kJk zqq*&v%-hbQN>VHtT5bjCQBBceQ!VSd8lPj|xm{s2uIakZs-*v;LGn93CX?giF?kxd z&0>DFbck_0KXCw3pP_9p2iP3$mtTp~*lT^EckJk`p4)OWs-2;=dskB+9~_GT=O8Wx zE{VMe>YNCM2QFbYgs;)v(+4E9ejZJijHxF7q-`hqJCq~uYOE&KhAYXoto;eUN!;h9 z2uyLW((k$GQ!|eA`atjpiiKaQrP5DW-Gk(v1I8&dWZY==j>4Oj8rNEitv>J3GabSp z#kw}lbKYXSN8*Tnm(TI~I;}by=o^QVvMVz_2k+Wpy6DtM5mT74@o_5rPBK0n6R%y%EE?vI7V?BB^NL@Z+7l`1dFDl7M+8#K_)er`Z9 zY*a>7^yZ2wvO%4l>1X*3*|eA6$8w_cb$hGLd|^8vPXDKN$M3z|7jzjKR0ed0fGaBg z!Di1;3!F)wJovx~wJ8Z{hEl%9D&AUt6#;5K7RUXqVQS0si{7!~-b^s{AW&A`7i-kz z8Ia&(9egv&-S3Wz$qL=`%X0;uG|?`$hGV4%HI>UuKN(zpI_GUM`DMzB%H`wg;HJ%Z zKcpO`%e3$P3D(Zj;A$PxPWy4SNSmAw@G62kfz}`-gRI^%M8Tj4`nPExl`4~i4_dpo z?w;v{dytYeYf0`S9w}Rp0b`>^gbsN+=3BR{NYY91`Ewjp%yB<9tThd?_ z)q=yV!7eL;yxqtUJw%qgacjaPeF>HS!qw1^*->0{%w(EDhq`U{-yR&j*bBx=qtT=0 zhcL#r>apMYqPFV5`@m0Ok$9&cd*6Zcg6M!ASuXAhrdAuXBoBtS{VMR$Ydv2bdd*U0 z8DlJmy-?Gx5=ekj+RjcmfBpbD!Zij2`}%0?u021;DYKQb6|5{JD>}SfqPXEe8Tw%A zjBW~KTw3}Gq@O9X5hfa|ebXSgCJ}AU?CzamBXWdR#@2$2K)V@u?O%*dP_r@FN?gDT zp70Uyj$xHA@#(pgk}!^>{XJtOOFrcEjrv8fmOD0)X;CM^$yV2|pxNiF_&x-MNJ~H3 z(%6=m6?=SjGNaXQV7W&q3qylR0YVhJ-sq5vz(aUL9-CXxu@56+E}K@>=c>O^6*wZr z@HHhhOIsbxS#B?&(AK{)acqf;dA%9w75)tg`>mY&Hx3DB7K4WAI^n`gmpZ!rNu8YD zU!7jGyDX2()FD*AXpK5jLR9v!Y&UCs*C#AIW~uOpwl4eGQ_%j*6(2*WBr|7C z!3GF6W_b{i-Lh5!PrC(-CRGvR1mc5}>|;mNG~`wxtR5U~$9Ev50U$I%N$J&V!`fq@ zf2)7no>RUN6@31ST%pNgzECU|E2`zWUxe^_Pq@si-O+JhOKsI-yG<)*u7}Xj?0#mo zq^kwR@WiVq4_PYk;;I$_54(3$ill1DSwc8hw#@ zW{e2SuD;^2B7XaQgSdS^%H0qka^YD6p6c*>Tkuj3uQzx-?7y3>*W&`y+O{{ZG+OCA z@pu^GY?LHwEcxSdUO7~1_5UZGAY$K3k#XN*7YtUxW8Moa#99=%c#8&B!IO={%xP3z zu2q7Kdh@v{NpoC?VnHit=IqAgNiYrF8kN93D7G&#)$`$)Nq(}urm6e%!$@^VXk8?HDlbSGuXH@0dZV; z>~RBgvj>G61~=9e7If`eP*~G$UP~Iz9n#o1Xi#HOb=TrzzaM2<0QAi_tgbK( zqdj(<9|^W84u*Azra0|5)f5TKcI}B$VK1xT#I6q#kA{K$8nM{p#;Z{{Y-OyqL~ zqh>d`6c!RRemd?Y33P+%W|Ibk)?@s@kl= z$o%+N3Sca0#~+LLcwpJ0??HRHdu2rjJVXO(6X{?I#_uY)&-`p`alN!xjD>N5ijfEtG6hzYzH^R zYuv?_ui)CJ!1`f)$41UD`p#kK4V{1HV3hL3vIL9@>9)}wh7nB+ghy?BRd{tiCQGxM z!)w-zsi6m_Bjpc{!#m><(rOs+&Tf$3XrKRu%Wqb1z?`8z;2cWqq66!MmC{X(k}%pXnj4d=(A7+rX3uY5jouL?YOPS8+71h~ ztyph^gb?4c`gPzd2g7&0I;>zX>ZpRfINoPgvavq1$0=gxyQ}k3_OIE#JE4Mh@dqZs z#(F5r5!a|*nSn)Sq{J(O!* zN-ifyL{;j=@JmE&mv;$+ve2-FVoivJUQVazi^}OpdS$u3h)ylnr|45?A)P_5E7upW zU;4OGdXGMV^VKTpbNM}H>vSW-jgApr;#4d`P73hAfP+~!7&`$aq6M*Jjqs2_SJ!Hj z=c=hy~`pxdq2Pt{QQ~j>!H)-hb@PIQFVf{&~#{x08^mu4EP_~<2F(`WOz{tnv zemIR0wJ!MB%jc1nC+2;18kGi|aW>L4*5LOuB9Gm2D&_S}*iQiPjEne4kB&MDWOUR~ zAk)aKj-Og3PT*!fp1l8y@i0l-w%5USCRmRekK8SR zo%wix%S3H;2)1o#Vm3z=x8w;vT66zRdJ zhi`sgAfCo)jI!0IfPQAc^U+NHF0*w8n7);cMIWikF~%R??`?+vK5p-$$G*e>&;sup z0QkpBoM4N$>m1-pbPjMEodYzHm7jFfJ3wUXncP93z_5Ul`v{0%#`Ph`UP0D})QhM! z(WfTjrM#v+YI6|GZ(yD#o^+425FCL3hSXgK)bgPzkB!|EHEA6*tO?Pv?;JSrjy{Wy z-MVwfHhq??>f1kSihiO;=$*S}&Uu^1x1NNxER(m8a`dpW5jk1gnt1EiVlmN|vm92h zoL86`9n;X$VsbJtjn%Z+*Bo1Q=9Z(-PV-=;iB@1m533KUSd5*BO%6{QlSx5W+|}Yu zBi`gH&-yx2Ul(^wyPk|JnY3&iz2l4fhOVv1tXwi+<|_U6o9-EU_Y$f5hS%p!yz;Hf zhv{EFIP0;x(r!h;`)|03rr&nW-l4-LzOeN0t4x2hI)3>Jz)@Xc%|GJZZB<{8XJeMa zRGo=R{>pr3ypli2W{v4}MQtkiv$2vts{$+eFRJX9LI0@~E_mvvsq&(UjgIlr>&80Gxaxae#x>c9Pk4lDYl^9@-Vr~60$Ki!X_JJ$s# zM0a`?*Ad4hym&U3G;z9r!suRq;&cz4Fx|udZMt{+-{SYU&O0H}-)pGCOVc}`eJzUBUSfxf5Dj)W*YbSP_>(`{u18a~&0((6`f`gr1i}_WhT)DDso#HT zkwSd|Yr-Pcnt2?u622WtBRTDIEVb@frBU7Y-(Ry1teg{CzM6K%&IzpJU2QBKv~~D; zZer_MOX_H92P;~G_#qICR&%`5Ezt4)sz1j5ss;d5%K=cW6Yarh9e-EX(XxjAOUv4= z7L+wCtGTm9{kOLthioUbe1Bn3?)YoA$-s8TS}7K`Rt@A7>N_#X2ZB%QsWhwo{8}vr zlTJPMI%$n5&1$1l>jQ^DI1F~QLjC_sE7VvG8>$pr`Sfu$InANkPPbpbp78Bp^?$hi zjt+>O&>9{(Dnxby3;NqpVKb1ce`)zn!CL8Vq$fF*{GCMnxU%IztzK$Y3;2!$n}g+$ zpB#wx&i2FVs*ORdx6C`=^=GRZa5OhL0$7@biyClhjl5(k7Z$S;Q-#^mW&)5s4Og3G zU;69uVHvrYdZ+erH>DSs_u>JmdVB^~aYe>rmWl>OeVPt zNM0ImEAy9+-=anH<~^~tT9DsgeQ(59zw6Eo|2*=;t2?e*)+_I%XYRQF^?KU%gU6Sx z3on{;$KG`PleL?L4+LS)nr9yW2hDlxrD2PgKRQ8C?p-y1)?JH!c;j40-(7m_)ZUf- z2HrW&e(9ViXWVkDnx^~?c;fu zef#FK%~Em?BRRAF=#T7_MO3QPXOzCs+Ich+5KK8NL<^cWJfc0*topwzc&S7sS z6Qe5}YOPZRS?`pRQRPV|F&ZM+|7$+4T0b=Br4?{5G=rm5>Uu zAZdn0h(F4L(L#+gK>=EEXyz$3iRO zu~6w-&Jl;x`Dif~x&j7mk(afPh0_1cSm^mI7AgmCz4g!=tdUfZ$3iEyd?)$kAAlpP z$snTxd8T;xbdfiBUVGcb%blVwe=g;a+rQF}H z8?7JQzFlt#?_1aOY1Mm+U)@SK{!Fb`4U?OwIQ-4ptrh)p14RS7K2SUJD*d~c_W!Q$ zq>fjg{GIytO}TW&K^}8wJ&WYGiHDS=p!)*0iJ@9saWjXlLsk*RCDSdLD~>cDkv&t+mZ(ke?;LbgQ9Px)I}{ zRz~?Kd*G9aKk-7^tC|B?FhzN*P84kuCvtwe zL9$9)jriyJ$?;FxF8*mbD*o9u3rh$ru5=^x34G!L^_kYF7P+EUU#=-0`Q*=D`6*Jb*G+-*`do48wLrq|qy6Xhx$LT&LL-JM^trBAG;I3f z+b);XZTw>RV1-jNH*4iTPWyg7QZyrD<<3)ndE}1wdx~Q+bAY6Jc6vGzc>FuPdAyVL zlLE}*fmxq*%#Ko7$LJ`P9gpj|(tNBWu+y$mT*kKREwzXn3`cdCl733WK=~NA|5wJy zS66WGf7BqwZ@CnWk8vlAF{hI;<{o2=d97p2H~~7wOLoW3|p$+<@=0+sq0fMF#dMQd}o6 zCoU`6P3aZ}zK@>Ja4s1H-X{upBOU18L8qu(KhpO2ouYW~^w|BPT)dL9gRW81hD2#!FB72nJQQe|F?Yl*{?~fSo7TvBrqUputZqXg}M-&YjE(023 zWy{d^R8)O_H9Hv1;LMN{U{x-@z0oYb=2^_=h|jk}w9X1zvam;h#3KF2r@XQ7$EJM? z@<2zVm#bUzP;YGcPKmK+T$W-d7kB&#XNgB$ngru%Tk%|$s4a!rqAa*w7Juet{7IMi zBpquYlA6aV%$$}gP%3cXuvD=;{=5u)jwn_)Y9_18C=7NvnP_iKr7h&vW*SLq2YDmS z`+D%wi^QJs=u2yhTeZ)UC0@Ij`Q;e6Rs+`0F$3rw<$ zQ>*3az!$<|_F`s1A0v2a0vtBvOiz@dkgT2R%(3~iaW!LY`g^Tq2Mxh`4Am52029AHy&mUKjT zO@0k@#i>Sj1t;tu%$jB$z~$D~xDM9t%aYV_+JXhBB9meYW`>YNgN3$;_^kG6v`R9( zP44YM+H~$9|AWt#2yaUti6;2WcT~TS)=m>kq+Y_?@F$c^6W*5BANTvy;&L=yl9j;T;{yLOZ?=;C0#m=pEhNtDksC=wT+{l_xUOAwF&24&=6<%v*!X@c>&G7;12d5o7;6{6@8!8^%oMV zn&QIBF0m)OAgjnE?%gyx4qcb%FjO9IEJVp@vV!hydmZc!z%SK$9h%e`U~~tK0QmEW z0Vi(*JW)k{B$0B$ACuZtQcwFaKpi=oQk~e+9^NJeap4>WaW#8t zO$}Y;NU_;dok`DN;*Vx1#>X=-we4k!I5*y%&dZgwmuc)?bjbSk>#6)sXms!Tn;KsJ zOgJa@0{(otx$(we-Hi?J=m&)^;g1>`ext?C)b37?-ttlRhL6JK`}9LDS7WM^7X4n% zWy&z|A*qr*C(MT#?X2+0;sEX=3dT$0jdK)BRm7a?2?PQx^A(>RmU4_|hoKO_HTQvX z^0AsHv`bPQJUiCuW6z^7WOp&@TvyNo|L}o`VzFp)4Wm;QH_(!^Rw-*v4J*rnD;0TM zVSH~JXs^>M4qgH*{WzcWE(9vGSFPW&gyEm3q%g;UWb)68aeg@ zH(=JTej8RSJbC&_=6*K}xqS|M=!Vc2pS!VmX(T(ZE;no8J&W0MH{i$c^S7PT4Q=O? z^q`PTKR#62Dm6DXW-2LF*bnJ{GB<2!L5??<_74!@b7}t!k;5_ea04d$U@r1OHWxn$ zl1+RzT6L}QjDsduW|wXyJ~pX{Cj%dugf3i%Uo8!?;@>=A#<~-o6t>jMf;|mis|ZA@ zYw)w`D)?GWH6GSrXR&IA$+aNNy&fCw#pkZovJ?w^0&UQx6&11-#s?@_;qZ8mRw?|q zs77}lgDKh*@|axGp02Cf(=`Hg4F=ngwVPkF6b2c`s6~OM#%{ng*nAfIXqEnVshn5X zDX;z8)I2e^JT{;HKVy3ulmGw#c-muNWME(bV((K$)8qMVzB0&jFo3|hg)4Yr^#A04 zg6taXoD7{$;1GcyLE4}}zHPzgZ_fna7N=F&qF5?j`WZKdTp|obY zEP_%9Qd?^ffrwB~KJ4~VB)t?x4+T*{#B^p>V+#jR z%0!u%vwtctPO56#mYdP6%iwqKn}Y^wkIsTm4#2~@o6LxNm>19NM)f>hTSb_1=813R zJ$qyfGwK)GWH<6Tj;Tcyn%Ou?Ep=+*lM(tC#Vi%ZHB7qRN9o0JT-Uv*G~UwpIqVg~ z?43Zj8p4FxkD%&DL=ED$uI4%P-W)=QDuiFwjE5L8Dkgoc+Dx zH4dv$?hoOL@f%^|KEh@vrql?|y1uW0wF$*e7H$q1Fo*RdMK;J>`@K<%+SMHXN9Q5n z*32RkH;V=KyNhYp_cbsRIOS%+95NA6`xX5fr!J8=djvCb_?H`~TB&21I9WKEIcj;) zW`%er@1*iDu8MI;O#@aHu&T0^87-3CY4DNSNp0pDvsi_BPML$aD)ytvrHFhZ-^n{P z$si{52c(?6(vKE$-s!6g_&+S8R_|e5$Zw9AkKuvm4~XU5VgLXDc-muNV9-H=aSRI> zo0x={)-n4rmoYD5KF7kq;>421(!#QW~lDjI3{rH;dsZ% z#;L*Sz`2YojcX1!6L$jlJnmaO0z54|dw6c~O7TYU*6|+V{l^!=H-qmVe+vIC0WJYI zfjmJOK|jF;!F7T+1iuK$3AqTx2{j0t2u~6IAQB|9MU+jnO7xKE8?gkjMPhrzwZ!wp z_ejV{R7q@*G?HwRJR$i>>WI`gX(QC$hQZPeHFmpGUt%e}jRML6yNFLmtB{ z!xe^~j8cphjPIDtGHo)8Fn2TmWl>}C!ZOVAm{ovPmDLifOV$F`E!Mwmf^625IuJ(Ah8%VM1mj54L=Abb}2|zG%*^L#)Lpp z8;OyGY|EB5!mhd9^6{&W;3N1FK7>A#e>`_~7P{`uojEgeXXf0w3t-XQhk^elWgt4o zQ-X3_!GkM2L;a9ZQ<T*6_1WAr83q%Wm)f|%9@>R6`? zF_!(AUWXyi`)Zc4kh?CYHo0G2~HZxkfUrc;&D^!=hlTV*c5zDxlGGV!;3 zk%;GVtDN(Mp8sFKzfE1<>?yw}KlvgVMU9em^+K42H0C~I1C za%ydTyYeIp?>_5vIJe>tTbFzv9nspQ|BTw=xx^Y|xq)Yt4S75LU;Y7s3xrVsc-n2z zM{H9;5P;!-630%Q-g~2W-g~xF=r*zGy>|jhY=@8p2LquadKCzX8;EuRM6Ut?2SD^D zihu)Wx;Ow=FnP<3xqOwe^2n!vLW(G+gi_kko(^=R6P@WoSGv)i9`vLaz0uGyaNxv68Rb+^ zNgw*skNyl`AcJt@VK7yA@iBy<3}ZMW7|AF`GlsE@V>}a>$Rs8+g{e$qIy0EbEM_x@ zxm1glBWz$J*V)GhHnEHC?B^Iq#l|+?v7WsWD{<@;J6pKLdx_@&$N9+*esP$S+~+Q* zn8$o}M_%h8wcO(YPkF>+p74=6Uhs_PoW{?0_VAjQyduD7zOt2if-GPm4K#9)CKl05 zh!(;uW(iCA#4?uC$_iGok}Diy4Xatpy2u}V<0@x3%QfEcRuUvpk|bGDBvsNRT{0w7 zvLst_B$soX=MtB>%>^!Uhs~UjJZ^AP@}+zj^m77wB{jI~MpJrIx)QRPL#k@XHyHM6 z0zz&Z0=y`J%N1Bf`6k>N6DEJQJ1+-|);3!Vc8c;TW<6YlGwndZ>o0w5y#te)S36{Y z^#2Uo!+=R*ng|Es!!Slhcf$l3Ukp=ZayHD6>Cte6%nTLe@(WDpOrQV&00B~a_l^Jn D>k*;G literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/Roboto-ThinItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-ThinItalic-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..9ef17a8681fa09e1be946e63f79e70628360fb21 GIT binary patch literal 30468 zcmY&;b8u!)u=X1pn{3RDoxHJa+qRvJH+D9*ZQC|CwryKCzk9!bzNvanPe0Y&Ggar* z>F%jOfB?RSD>wl4KY9NCfBJv(|9^;zD$4)>ASK@{?tjpm))o>I77_jC2EKL5 zZ_t1~gDi^4%PDxgm@~ta> zL%+q4^oNm+fjs~K3;*pO|BYJ+`GS8&znuvI0JxlQ8|ME2Z3EX~YHw!q&CP$?)B*sY zCQ1$T#byRh-@XW;-!{d%y)Qt0HA*)AGHkB+{C~b0I=!#w)tKkzzVvQ zwqyQHeslNV`mb-0Ag(~dnA_1{N#?Z zF>w1{*HQi3kMKW$!hmer8rYb8bKm|Tuiw6((n#17_I6Ir0D$v1KpwwizKCMFpV&K^ zeEa^k{`ULcBVgQAl+5fW8KOZV>l^Bu>KnTu4$%9?gzb>_TM?#1(DVZUN=@MZIl8T8rdZhj6!|&) z#WZ^9NRw{O3IifRAtj=$%q`CmnWPE*OC29LV$eYG5U@K4B{efkFC~sV2p63|p&%cF ztBD?>KmsJwEpbc4CG({SW>DaNPCM$}vK?WxKprXIny_}2dph&!zRW_z1%S!>D%e-0 zt84rJ5jTSs)_2T!yUcn6%*8wpawk7A2W~um9@ww9YQBQ53#A9Rm-N)TEu`+Ohru4< zAN?}Rt)I29XKfW=J5OqLTIUdNbf?Lz3D-5UNOEGe$Q;6*=;Lp4FJd-odk@g43+N`Q zgIXH-HDMYa5G3sc6!|o@B^M>8Y7LFuF%E3dr>O%ftO6w-4Hzkj1`7fc;w&m`Ah$9o z@hFHLx|p&P;vowBmE#U7j3C@YI;2$NRE$~9N`5sKM9s%H8sllm@e&~zgRMO$Sd;uM z2(w{{M?bVFuD&oPH^?rOs2*RbJ_)S}u_Fqty@eYZCrBa0hb=@{C`b%k4q&1AfWSZ3 zjV?AO8J8RMyrtodwTw~-{OE>MNP5Mq|1p;UUlf>H7ftzNmH#l+5$l%6sjkG7+^Jdq zPS*QKpwwt`tM1UGqBV?#%`oJ>K=*U>NpQs2J9V_FuEYhZ@iJ!-Z@w*n)76}(+`amL zH*EFARg-vSOWYmH8W&7?@4(raC4&2jJ#YqLO&<9JGt9}JZfZ|mFHi~ynd}fbYy=AH zfF>o|FP-o!>V;w$R8)mDYlw)uUtB9P`I=F;x7-aur9>1pZDy>gxpsq(+UHFJV~ zRb_uBsLqBf+P=kk@v2~X5J1Z#o5l5}a+0PowKTnTOmD>78dpPdlh7{CWS>sQ=p&Fe zg=INfBmN_WWp&yZP6R!iHbIlYs*9i=>bbO5-YR!Wurml9&NTX_ezMOFeU%2jzz`qp zwj@*xg8OmV$d23NYDO!1)BS$P=nP+smCE4~!lCNJVaPlKh-C&pj&K#omGkdhDL3eY(}DB0)kx%kOCyqogXT*-2sbYJ zfRZD!rXsF+pE_fnz}3m=g5hekdzcb)p>8ID&6WJh$G*w;28~A)u03kTl-w+<^=eNk z@to(Eoolvz_hSLK>rLFG-Edc7+iQUr@!0{4yW$(T##nWnbZOEw$7gEVEeMu@n zJcda(O_E63o&Q8)jqwQNhYOyWmw1sl$B~V=60F~)JDJ$!MHX%H2oj^@l&poj)a@l+ zy<8IR3X$EDdB(g;wGM&>H>Vi>(wXRsCjQRH&L!PT>Qg!iFsjzb;|7hz&syg56qV^V z^!)Tw`0@UbfmP0Wf-^eawWXQm1^C02vejzD-q53_vpI4qQRfz{+K1HeVuZy|;aDtv zvo3wBXFlu(FR}97b}BFRzL`OMN)6&^j4Z1Ej|O>VQ3dHIacQjSuI?eZBc=wKNr;{8 zxx@`CizpPS)WzA{*plgfE6OpZ6}%~FJ<4X4{{2SwNUUV(?!!yg=a?UHqjeAEXXY_x zaZ+-C*z}1mpW@hqgj860K=Jv-XFQEh&Qazjhk1GaRg3v)wa0kPLv*WD)TxqPY6Y=e z>LZJQQOTJP_meE|(HApkmq%TonX8Yc$nECr9HVQ^(^~=+&qCFZ7e5fVHo-@nzk*ivYS{3wrUgQicLx!qvy#?ZQIdQ8|wvg0b>x1?s z_|YBB*{+~wO7Zf{`VoB|LeF4J;7RiAtXmaczg=W)1K}FhaNM3nfE#RPPv?=OW?=D* zU{#meOkRq3#Eyq1BH2uxg7EmD@~ZxNq?Q|P?!k_#CgSwy97BTHK`N&Innm%=y!Ox$ zmwCUN>7O)S`rT9x-=k}yPLHspU{dDf;$tda`awny6`AoK#|p-+-woFDIawD~xd=m= z%r%2#tH@rW<2$MCsneAW@I0PJMqHKr)7vlR#c2BEmA)6+xv#;>TQR3Na}U33%5aqu zjW7iw%Emw<^B|O049bjr_2iOoJRodB%Q*NK;EPID^cE%*S%}L_o60y6sL*Hj3^k*u zQi@?8l_2iqCC7Vh8s-=RSmhU-RVKAn=2Tev-BMUF9h7I~@~!#N;I~~g`CZ`4qJAtm zP^eUxO^Wt%tJDy%=EOeW;OVIR8$4=6dtS^K^lkSs;nk z`KUx*6`srGJ|1aaOdCHIfv_F>=t$1*S=={fo?=apeS#fCY72ko;Q!0lhA~F}4D_8H z?qCeI_4Pr*8Q#C90dWQsqtbHV;Xr;jehzeoNss|S`Lv)N2r$$Df|$XPZ3F;|4&>SY z#xVUeeI4PK=ojRIUJe8U%jXU>?HY+08t{e3XJF_=(w7f~WJKeC_aDnMgcOA=+M=cc* zBmgvq6krSl+B04=&I8}g;7u#^IftCZ{G3RNo`l|lQ{^YyuKJMP)8|t>h&+xL} z@9l+x!aw(}IqNNgq9$nIt?BR)-r+cNDzTCPjkM;6f24^q1!+f;0RaP}#^Hh@$6!OE zC+POgiKz)KNoffzNN9+x$msCTi>nJSOKS@*N@|L#%IfmVjjatUO>GS-Ol zyZrKiq^P-ShR@;gF_hWPW_MxA?iIOnl0QtZ%c#87HT-XJX?2UqVRj#8&Wu4ioy~`? z{|0rCUxtMr9?)4AY4}~SfpbDs16aSSQWgLkfc<+$1)%?~Te$$d0G;o;nFb&V&;yVI z_yOPmVSpV#6XJ*4s*Xattd;=s1_Bb8-@q~<$Li1e)R-a9TnLzfg};WzTtNrW7Hp2O z5e^^({C>g+tJPRJxdr0skiRg*y##rGp)gs6<}q_|)jLh4GD(fambso-O)DLd#Pn@I zPO^T;iZ|8+o(j&+{zf(gKxtZ=-jBJweZoPe&m?eJ0uFfWD3P#axcoF5ia%%GVT0e^ z3GTQs8<^+r=Q&|cet{pRJ4gjwVNJG;Bfa`B>=-gI6r>dG((x53el{~7S&r8<#?EvE zF#UKLW#(71vH?%~od30O=kQZ1X1u7DH6^9)lLWOlXw+Fnsd3_YwO6OE{#ZYNYSIsC zxnI>C9v=~j?B{GV$rug7k4MAu0q7s`xUd`t>!Y6q{4e4tLmd*5TzkQdkVCOcJvpp$ z^5t!K;`*47m&`ePM(JMO9vcv8Ov}AomsDruh?m4{oA; zGNBtTmxZ~L8(k2As?Z*P*4A&w!`oF`ry>Mh?aC}mW+^A%bf>z8!Lsn-`UQ-7)2H7) zCH-ExX{wmb5y6E#*gU@S8c##ZJ1}_b$mhCiHNW5TcXbc)^#wq3Tb=iY@={iL{A#b3 z$(GKRm^PkWBVjTKCgDF0y|)DU8H320re5PqL>TH1AtEvu)#u3s@^gGhwMjpYDyVKcAPkw__wuR~$!KcRUAK!!^=A!3M=Jdzeu(Ko&*= ze;z~5%{v=$^q}thPzRa}M#hk4hz`8Duv9V60q9MnwU0-`?=ac4^5G+*CP@sJOO+ol zY?#mS(tN?cm>|MFH>6T9*UJ-H?BQmBFJ6TM!G@r4t^*9Q_X1((b_TT4q}=+jk84`& z9{5KZdDRInuK_4l@eqoX5w8Jte#(-u6&sWNtBZ`6lEn1DCiZD)+2^ySmMGNB#EOYC zOUr^yjlj}MII(`tDXhcS)~VkPXT)b}6@&y1SGbkAMqYoj$IwH0MfIg8@I?*%fdb4y z{X6KY!i`zaVh)4Iz>CUXZ8X=i(UN(_zr##*->UIO3oP3EX1n%-Lx;%^t zkOILwNTAtusa~dpsN;gXt^Ghe$Kjv02I5Imij-?keFmvN5XWTgDCUW-?@dMV*MG8a zhe4buBPVR+#cAzI*L`UB$IC0R z?$y%Gx%s0}ZAGZ-T|SbRzN_xzksAKpYga;Do<@5YAsdVxPu<(Cl^rhcaV!*po9fek z{E%F>=lKJ9RmMPYB{*axXW%Y zCenVQ_ik^}rAQ)osHiJ@WBPHa4!3v#zCdfSX!{=<;@QPu>jOlo6~c?Xwao$&mlYD% z@`{jl&K9m38>&t%{rLAvM}&17$v5jxnbSq80)uQIVzc@P=4yNd?lam(aU9HMa z?g?H#qW?jVxK`J5?PquyN8n94-eFVVc*NR|#0ylA$g86i(pZpDA!Q6mh5PKijFT06 zjlP%Tk}W+(pwsd#usTf5FJO_a1!^cQnMxlXCgbUdHKpR+4#FjIOjKP+q4$ZA^@v%U zK08dE!v7hmG0{3052p2Wdoy8Ls>&SnUF7*#UV3NTR{NziesBEBP;0%dQUSnH?Zb=2iZzhIttnEIcf3mGA>5cG zJiu?>_+{b;&ziW5$K~c}D(lio6-42x{H`CTye>{J&!n1n2Px@d;S|+Zd<_ZG8z?76 z2_f6O)mc=l$5#FnhEvtDZkx(}%T!mlmD*I|IdU!dI73s{^;`!(YGp3m@ zV%_nf*r#FX;l1D!)9dlr0pS`q8j+o$!)tUFD3flh?Ys){_le{acwX8%u7~y2WzAu9 znSigj$!FFm0QnM=Ae7XPY>6DkIw|`yP>dR+~GReTM~WCQ-(@!;;ncO-CHf0uf2*O z#^S0^h^-J<=>y}#6<8^yO{_-@JAGQSL0O|p=l_RLY5&9QoaPwYU#te*A%AqcBC>bV ze*vN#EN|h`JVsmy-nJhL)hyJKLi|)3%?Zv+ek>>NLO;)-dc>Y{ac3bhsRpN&bN0Q! z+(X_yKMtu%?+pE_BmHm7$clCKipU)5G&Q|3Wjl(e8G5XSHR|m1@}wl*ir7(|f2^~B z#+C(LJmL=*I(|k0oyQ{8mQW`eOuL!~^S;WamK95XDL8x(Z)gjqT^vJuoaqS$$|yFi zc?r_#d5IGj21+pL@CXJ5`X=Vn2X=kkNIHfm5R|IJ@}+8rh~_Q4{nNzcEc8T&xs|K} zGjyy8NVA`@sbT0Q=~Au0n@#QQ>A~~K^-O)7%E)NHUIs#XEP=2!`a41r*2zT}{k`x` zf2HAAMMnp8tgU=cy}IQJ8lTEfj)ODCxJmkOZ6Q_ah(#BzLhdVa??iGqaUZH=Gsd$Q zrRPjvUe$ck zuF^A&Z@Y@Q@aI8Lp$u}Zl72W*VOPosy+wek5u`Gmy6qwI|UU9do+9 zEIZnQ4|e`e!Ar#AE%_@Y)>q(%v*^s+rCh=LPB*A`CLkxq(%zO5mMSl>)>A>Y+dd#yR)|gr>fnI#n z0ENh7SCyC3-}zTS)W^sHT(lA%?w$(%fO-arfel1uFS?uKUovhls2!k;yK2rHF^lTI zUVmY0Mk6k1w5E&e%R{BLD*1<604!!A95IaYQmx=Bi9`b?cpCg{Y$t)pBV24+Z8s|n zn_?EQzgHh04Br^osq(_#H*C$$&XaL$6N>R*3KmJnMWOKs2FXWp1duWz09y@a7WZhk zz9Eu?7L%AgJOM3?C0bBF4PC0nsWPL!Z0kSmG&#$M>RiYO!PH%UG)&qH8&1GYx;r7Z z9?kU0cKphJyTl;)`m7wL|M~=yN=5#=$pm5C4Y^SP&M8MmlyD`5?A`T*ks}QsjWQ}X z8K+R+BB!1q{_bmB$>C-E`?Gh91~Cl`#|| ziY71`;k?OR%I;9>%9s4d)Ahc%QRx~QedXD0A|YilvsbRW`i9X)=HVwd3vD$rvYdjG zngUWNQY{Hrr9EV=ZxGi3;f*2>#z|}`B zo#(i_MYh^PEWbDW5i%MVa|gRF0L*O6P^J=@Fn-8*u}VV<<*=^}5UNzWbVal9u-mHO zZ7Xw(Did2YU>cR_kjW5KF}W9%KNVW%TVaFJ(!N6N7ySf6Fx=B|oBEcjZW|Y^JeASc z+rqL`RBe5e=4ivfMPFA#m>GoC!^xMKrW_|?Y$ZBb9+_cK>5fLeMt#+;6`z0t$Vr7@6(F zQBEGZb88e3(vzE0OZ`J>$QvT$-^aqrsS2@W(gi5haNNuE{FYjBon64@;nuxNg{~=T zbnLID)70XRKL_JxzdKz_ck-jxD~*rrJiG#C4ibKlgtf-b ztq{IJ;(D>zx6NOD|NW#K@EZkM5fiOS2}-6-m`gUH*qdjjPQ_d}!j;Fk=8RoJ&k7OX z=o}7_`;}(jR;p~&=(=ZOtG!-_#`U77JT7j;LVJ(Ag*=2`P8_eI-fTa5_uO}uAIHliN`xrSL9H&mS#2|o6N@2~)y}s5DYBTmM^Tz#flpb_?6iG* z13XmMeaVTgwsvP?%l_HG+ga@<9(#MjC3HD5*7cO@qQ|@f8qMPs?O2hQTeE$K5o)%H zvTNa;y-&f{NEsZ^{nMUaYRgWK@hHNFf)60`>hH^Xt$=;S)eV;r)FX*u`(xe;n1KFt z7TC~Wt`||Fd-E(@Y{<|b2E>GHe+5O1P^1~UkiWt7JTJ+M;q@ZUfdi|;#>;Ig5K$gB z@O8k9H%ANb@a53FKgEc69)x^|kim1Hp|AB7nf0}{6qnAZ`8oSL$ig9#d$19ymqNvQ z{Awr`#T2|gZf)2GO3&AUwcZ!g*_M9hFCpkS?E<+6U1vzwUP>k{j>$lU0d~isbh~3qoR`7 z;3Ql;r7<2V#U=m~jlc?gSp|T_ zk9_zt6bR`QPz1+J3;061Cz|=)69m3_@#Z|I^Yp^qyvbcLgzX?)RH1u#qwTUcR@}*w zyehi(TpG=N^O}S(O@GO1vf5YLk729Pesft_9<|P97R8wykw|vqWWb>WMPd94b)27r zHSoZL&{9?$7V?1cCTIR%xj;R;d(QK75j`iHcCbx-t^%V&t3?!zXC4P`@II${tnO!Hzu$SKg^n(oI0sUEdvO zQRpcTIX0QD>y3F+g0^H*EC?}Gf>9;094Zu_!{u#iBae7*K5?;#>d&a-+8a9+;>77h z>V#qx9~@u?bL@%H*ZlOR-IlOPXI-ibVvGG^gb~J}>~W_e$6|z+a8sOKB8tgr$XI;x zlvd4kJBAM_%LZ{656%(F8k8|Y7ld+oLT_fY?%k(FaX*;rfjgN5$X?`!w*pN^XJG&lFq3fk2HryoXa6VaxpmgJD)^(3 z9&4VN^v4H|Q)H)^TCh5qcz0AF2O=?4>+4KDmgRI(PX=8OM{5db&whD>Mr$vqypc8p zI>7DwqyET%9guo5x+Ti`m2}#B?dNO;br9L$dt1|lFh6KuSx|=U%W^%8D5b)xJ|05a zOsl(HCHUqmW(y^>4)IyW>~%^Hm;vVA?c9fS;8(+81P`ChwEh!E&(9HVOH4=te^=nE zO0<1&>)Hpj?)LbH2|I7OyoB#`LT4Ah-^m={I(DxojmG=!Wy_16jzUEHoTCM2JVez= z#Y|zh_31fZ7~v1!$IjK&>8+x=zh}{Gohy+Gw{Y1#9nl@LB%J=ZH~Ru<%fW3Btbjg= z19Kjw>|4l=w<}TZ%@h)GR$TcL1~bY_7)nm^V(rLlsJ$mJY9|Cwd?o{>lw&8q#!b5e zR+bF0a3-*DW;+OuY@oi)jyuPWh?ujMPifc;>EKW1NQS}INu-lfY-5ad)({wN)N>4!FC+1W-r$<#)yYiA zm!=X!N6z_viua~cLE!NV;K<8I{{+`I2d6Qg2cu|M+5MdkCJr;qEDw!DDe?f>E(Gxl z9;kkvdO}Dt2U_q`N$?nQtQH=YASM2H*xGgf5b5hV)q$YMiX<5xVRDl3nOWtt;0lpA zmz`UsmXY*JxuafSC0zFTYmzg_mas$hr!&0IdJ%*j7C z;%%qdw z&aRzM-S<-^P~jMh{Zbd{Ed6Ce+P07w>KM40eEi#;@^)C5I9WTAE1YUXgU`o#)Y-3c zXGrdAo9AFOk7#cBMQ>Bqxv?|xkH_uazfi~P)GJkKv!jpr{kR>BY$u^Ey@^{L&qM2j z*6ePac`e*R2Zv(G`yUsXnbTTEZ4y^+R>5yE_#A%=^GSJL=|GT>BulsSH z@+*;@^~54PMSojF#>A)zvwpShv7)JvxN9K(K$*E`2;^GvgGDzw))5SUp@c7jF)nhC z%tUyy&9KR3)Ncg$a+4z#ELKBoI*n93EbIKPJIdIV6tX|#GB`|vS#~8u@Ju4T0zxG| zMIFb33bYJ?F{o?j?2;Fm!#7D$2i3b5Qn`2Qk-;e?C3B|F-1L19{drF}&0$vt%0VP; zCmL+M|0ngw9@y%Xyi6ySOs8@0KZF)`)%iWQf>|TTXt6pYaJZkSB~OsF>ba5-`nmR! zWWhB8I@3V8_50~_nOGT!u^F98rjS?`!5XhoUzj)JO}B!J4f>;~jv~lm9=yfdCk9)o z*??FC^uObb)PQIXCY>qVR~J=DqHyIki2{(M!(5wkiZ_}bUbsWc3AcT~yg6$+Q$PGY z0!pn^8)Raz0m>t`IAD9^9xN7%uO|B543$US>}w$Q>Ja2^s6IM0Y)BZfo$ZIRNnAU| z5ETPhT#4jdM1mg?r<${PrrgY9ie1onO>rr57Gp$DHT zh__s*w;f~28Gm($eGLgX#eo^$;QbuPelQ`|Ad+Z=OlC^@z+SacOvxawC?RV)X2ilu z3%T2E2|8}zE~*n(QosTMTk79T0Uh|uU#_me0dvRiqvj$<90$WOS{pH95e)|K5MB#` z_bd{ko8%nDP(_&?1fVQ`f9!0WBERd|$AU5Z4iasq@$C{tG-X)RIQauhHs@RWw8WDm zxO4rV=k{Aktlbt)Mt^IqN9}GUBnmq!VBJNT*&6~L&vmUk4vYm^QFr8V_eIBFVRu!I z0)ti^nabe<4dLpP!AtvE3Y=(f0{H?;uh@XUUPyuebHs~I` zt7hOaj6jj010CPzAq&OB)S%nG#$FxDixGxxeb7vTym%=2P#- zpA&NaDzOXhE``T-&2ZxCXvk>J&d4$AYv@*<{3P)gJi5`xXc3 zJA4p`7_ZIbgZr#9l1nB}4T!`9#sa56 zNlA`>4eMk_eg!Q(?eHzPJ?#@cDi-lpEH`0A+!HPqyGl`sOgDA6wuHQBZs0eR<;O`C z%45{UY4VOMg%yyktYGNz^r$ClL;vDHO8aEd3YSjQKBO0R3Y-h5r7(P5R5hdjWpifX zf!pk)A_-3e_)yPMBa`3ee-dEE0*@sOBDbQ zlg4RW)$=-#;@~j5Pfs|~Q&CJgD5oEzxr)tH({_*%m~!tFBO9vA{;*I{=oRrE_WkUH zVHhEbSui!FL$#7NjG-YRE7ZTrnWu&jT<5Q_%+YF;q4S^PW}gIuEey(Q1EG8)R4#?I za5N4oVNL{jT^>Yr&FZF@r&cKs0_P`>ooZ5EV8d*Yg{tMGS$6~*J^ni0{dp7MTEnv2 zP}ojDq+R@t(oqEt(Ddy-TU~Gikle`xC2Ns$AIT6D{8?me%Rc$?Or2>(73Jg4*Gs z-KwPN->ypB<9qPsQfusNj>NFAlFY!z4F8EULCXTwb$U39g=C4l0(2ThrvcJ&x<+&2 zMI`Rrqv?6_&llN!yfjSpU)q8;dja}JlG|LJDH~1&Z_dC)Lxg;Xdk9h_6%tUxf5>E> zbI8o<5-YI%qtXawsbx&FJ(HJ*pEkv%;-CqIsYL^fnyKvlsZ8@VfKTN~2#{e_bZ*Nm zs2ZcVAgZW>jFmr1`(zs>9Q1q=uy;ER>h2$NPxL(L9mC#k4J}ocAJ1?jy*zGzj>Q}g zIJ7KJxWA9pAWQAowI&URFw?G!4)t81!Z;2PdE0kGI`&amd?(47bw*JTmGN=s0yJoP zSW%2PgvPdk3}9hmF!1qSD1#-7MluB>R7o{!X()zz3=aedp%j*-= zmb{{4&~rP3p;OWt+Y$)>$`NZ^dg(DpppIn`?Om;`wDObO!Gmn>KBMW6&Uj6xjZ=L|G6*NR0@^8 zEO>cdOe4d+N>Yuxxy9z(v{l_7sNrQlsB#)XEiSCM)# zC_Ltb+PO!UHG;XCbA|-DHV=L_1m1k77OI0`Q zI9T)#hsd4KG7@{ztrNrCB|i27ayx0ESJs?2xuxECKS^bsKJP?;s+kdPA!BT98X1S4 zy3y&WrXcGg-<;E5we#GL@jpFX*%W4X^dxtv*|RM7+qFx};D1xhdR>{yWOB(*L=*^_ z&5m1lUZE=&Uj4TwQFb7nGX-5bXfvW}EfO`#oQ)eWJbKx?Qr`E?cOVK8+^{$wS;%kt zQ#Fe)?l@5I@`nmZ#}sz-CnOs3K&!+G=M*Vff=pz=SKtgjMPV zfKD^bm&yazG^i#SC6nBr5wV&-nm~~;3KX?6=+QC~C0v(&()W+M5#tBBp?-$;RyNGr z5|^<5Vk&Q_0BVHy^qXiHDYbcGLxbtFaM&pLVnj};fNljK#EsHv+03_s=*H9EeG2lP60Jb7i9?+3* zFq7Gk9tt1(1K|N|UT1wkVQjkMSkd5}9ufa?8UND;_UuG&B=C^CuR7-ZCB zJeU|tLnSODFSwLHE}m{Ed6{$YIQN ziM@b!0V>;`zGS?>gN)1F0*qMhVy_V+Kcu`#HSrhuV=!LK#+JX5jWXDfhKQT7 z@Sb2=Q}0xa-aT2=zq3}XcO|2NmC2H5K`yCCIzC&)f2Nc-zG~C;tbTfjQ65cn+g$2= z1h*sx-oBhO;S(3Nsv*zmZS=o=Hy&e?cLp|*V6UB!w>nb|S%l4kvUJV<0>_wkCpW z(K`F8+7_QJ!|1M_+QTn<_E*M^v{8_>!zfHIGVdWZidp2Ys~Ou@Jf0IDhrHOm zk0_ywLnZZ-ucArD_Jxw?2h4^g3Aah5JN)i3a0T=jv0vMYl3h|^**M}%+5;YK)(D^H zaPJ3`@8D(LUMSMjHIx_rAUl%y2H^>I%BE<=9dpWt`^%t3I>B#1mDENq?yaFqeE*W~ zE6I=f!hxA<7U4euh*)y<6MPIrySO%-AW5}@-#6Y{d&dw2l2gk z2taNVq?HbQ;wDKI>qz1jw7yj2P`d&wf2UYq{#L1p-S_4yB1Yv2OnjA)mH<)rKV^X7VDIwP&XBf8PskHwt$pj0zb#x zR8=TITSa)U{lY)$wf72TgY$<@S4~m}6ioD@Smy1S^6%TY0jRUgajtT^he?LpDxAK?J?ymXapgMq_1e@5juV#cYpS&8_b`J%l=S zn=k9s@5TL(=GDi*O`xlx+^G!mCek%!Tv$d>A;41^j#6;Qqge1n1X9J+*LWyQ&Xdo7 zVJlH07^p)W(wbo*jUi;Jwcr(u!6>SPE28D%Fyj7Re}o_;)WimH_aA3=_xDtmSj8(; zP?6xwOG8dHD=92Yebng~t(>MStICIjlxj1C?@H-gVtXJ~~GQ}iQ zMt0b<4Gf%zAGBemgiG}Xzp6o34J(%lEi}0Day`5;G};?F>l+fHicH}%$5${b;0Nol zYa1gF8Xa_zT`WYHLOp8cEgw5$J=xlcZM$wpr1~Dp)%sVJ9w~)J78r)$hej(*4Lt;T z%#^9QY1lmF9een78WdI5R{X7t+i=B>0%uy4BpMbtt(TivN z`sc80WL*pzJ-fLVi5Zlcj3}8DmJl3+=#bZA3(uXnh*F){TAIb0%IY^^>8E5H@Z(s!{CvI(eg4l`q%&lDKccT#5H zdS}!9u7Q?Dl&-uE+f9AYT+{@5ZQjT|FK6FGHpqt%03mQ6P(lvcCp1t(d&KrlqJS8?yiRkFGtQGMR988s=(nn=3>>1p}s& zWGP;WaH3&@QevD;KX%F4&M}8<<@a%X=?m_936R*oi?>?0eplA%if`+E`P#NfVn)JL zalvN;>0M_V|})TU|45A9LO zCk?Qu{4jYb=;j4yWAkvA(!U5vRqDwQ72|>;*{Lqv5!+c~MC+_}wWUE)B%;p~Bw&mb zVB9TJ+@;L+al01{RdS`*KUlhW?X;gpVrssd$-36Os|Tcu>U3Ecj;~#~kVB>013@x6ijhxnnxWxwY6s^gZ%5^Hu{`CxQ zc88VS!m7Bv5y<}%=e?S$=T07s zNOH(p;gxo~>GzvS4@UdlJFYNty?LaJ`8KH+Mhx6ep#rzPzPJ7j>OTs+BsdthrFNl# zi<9ayx4M-jNVJ%^8$>A>seE_Las2bn)!#vkQzE zw3g{>m+0@xssaDf{hVtNbO>nyMO6}l7Je(Wt^dVLLw&HqHH7Lbr}-(c#F5v7hoLa? zQqTv~pH6I_5*Pj1q?Wxs?&TJ^GDOTf}a2c#*sxqrn?H?AB zgWvqt^8~DT3FUGmIV%*wXcmSkHW6y3YPjUi_HuBP#%h+of+YVsKWaO-t~ZD)Ev}tM zp=Zu0C`(JIABIR-1rsYJtM-=m8YWjEs@ff`+rr||EA(CyCkD9yf{#RH&$^Fpb<9|w zn3nFhnjH1^kO%rLrq1EQg^@wY1bv!{j9>#5I1Z2~Gww5}UOF?Viq6`4E!b!~EiI=Dn~w1`OnxN>c(fZqktwyiOcVykry9{F zuXb^QmOkW|W~$_;z`GzCl#jO5SGDh`z^zqsba}clnZ0fIicMpW)eAIDU(1}irY)}y z6&C_OIF7cOF1XD|YC}!$+;@T-Q7aY&{(_BBdo0P_KGSWoSA8+-B^}pfZ|)^n4VfCl zm)KNPVXhmMt$9A!7{B|%;4gG%xu0A84zov(ALlN5UD8)Xv_x#m;aGlMmxnIMyv<}% zo?Uj*l^>H-0U>2`KPrY5rdQNJYN0MhE8jxcb_`fGb!uB)msw~%b;NNj8K*r);Ao{x z5pZI0gk*7)hP=4rFRie7lKruku1PtS`l?ozr!r8Ywo3Q!Ej-wG0lej0eCK=>^aEo1 zPdw29{Y)UW@?tkbADoNFO~R@};=?(cCSFUd9XQ`La*05Kj~M%*%z=l5W);SJ?>Xv` zN9Hj;z}&u}*!xbA1%b^u_Hki7cwF`XB7 zfYGWUBiq0>N0te*IO{swNGg-{Z?)Ou#s<9^l|y5{9Ezb7siHM=rBvhUQ`bNjOa8xc zk5yr@5{KcuR#$6kPP)#FJL%;c-dpPK(<9I;<5RNh+;!7&n{y*gXUlRZ6Jha8qQPY%Kt4?Fhc&AGu zvM>GFORf`?=?xjTWkPaadIt$c9W%K@+6^aG!vAi^o}SY7(9B}7W~jpl!i` ze}qu03H#Cv^AZs_3BNh)lU8|XW? zNO;~TeN-SHk(W_eg(wNLp~IR6Joiib=HA!1#*pz}c6zZfv3-E1f&ItcOa}0t5+uUq z4hU~1`5Y?#Z|BWhlTwAr3%-r5EVhhkeogk$@u?}B#zl*wAF1nSN$?7Vj5qUpRki6< z6UFp@W##{vzb=zwt9wTe%Y;3bHHxs4z$2t~yyro}zJx^UvLMag7TnsqPXZH(24-K(r~ z2{t^MXXYFP$*Dj(ycoI;O>^^mYS?p{q9CbrEF12pt^vQ)F$-D+l)0IZ5R@qsLSkW7 z#_^IEHFt>U4o~=hb`pl`A`iAeMZ574Tn%rh6d4QzV{#8O$jY2kO^(~S{^Z(H zAdT;D}$`n}`UBaxLS zkzgdxcFV) z1BhZv!<{N<6^3-~%4h6}w@@35=#LV2)?-v`XD(CZ%jh*c$8M=TPZHw}dMw0{C9tBu zpVBz!`U2I=poR*xh8DF><}a;!us2N-v|GB(z3>cI zHpPOe@iAKpS6`kTYSC?RdMPW-l|`s*vFd5ay!LAjF7IOp(Uf>FzDN7gO|<>J2KkL*_mh>1geTn?KvS+#t!+GvgJZhYp|$~A~TypYq9Mbp*>@@{KJp|Q< zWBo2*7_VXIG3UJ}LMPX~X8|@l`+17tCg2SP;Cs^2zK@Q5 z`Gjb`&*AzsT_uaC+M=}h&&RUKV0_@yx7S#IR-?0{dbf_3UtxGlITR4h>Ym`L zo!IX;GnD;!4a^X`lEtvZz=e#V|QumU$?-Ts}xd)B*qMvZvH){g2$P zr@?xMjD_;-)g6`F-I1LA?0*6=HqOaEl6=Dl9~fVJSh#-+=43xn4&QQ%cvp+z(@)K$ zobsTKVfa46i=cb&)AT%+%jAb*7A!k*aR*y;dd6Y_QSbqh)^P`AF*_8B+Zl_V>Sf8k zAKlQDdcNhCCsd*fhF%0L|)#;5Bu5f#Q(*ICY@YU9buFHyKU%F=P zo7qL=uX?4}>zXx`E7L`6gj@A$gf#E_^zHbrTVC6tJR5oSst@``=z1zT_POvn|2^%P z(3gqj)BJP*re6vQpc)LHklrZ)`IyDP;;y#u(h;LG)0Tsg1jQ;M(L0Sb4(Ivf?yR|E!bgkFdw$?2fzNu>*_|_oZw%gi z*5?;5cxB*QHx?7)XYY|q0!2#OC;OEZmM6x%v*_{zW1cB}YRH(=N4!+_%DDF~y7=8u zJImr@&X_cI7kkMs$`fSFH*I8)@))D`WfC~BE1ArlY8M5`t972VVCP96Hko9dn)Bqz z424>F0dE4VL;xUj4y;bi!kTkhq?6q-Xf64uity6BkPq~Ok5i4a)9awd89O(^{Y<>i z0W@?tJ(+0>z?CLzNN9t;(Lr2u(EbH%eLtTFo zy0MGY&JNU;yslF}s4$2B>Mw{~FyX?;g~|`?cL^K!3fq=2QrXN?>^$!G4>SFkDkkQ| zRhkbgwodCh&_5^-K(WjR0`4TNJ-f1dAL`P3n@Pu3mZ_#5nx1KIXPIj8;S@cBmu~Tg zUjb3gV`~dO*%|xZ>0JHSZ~pAFZ(Tiqs>4Sz_P)8VkPT+GJpJ@n4%ezLet6Ae-*v-f zbL?5lPjA1ZJnp)U82WD7m*ITyNN&A%!_EN$`}Wo?S6p^=&&G!~U$Dkid&7b)kAL=x z@X+4-mo8i0zxQo-oV&&mTDRn(*jwD-YnKjpk`+^~Ts2tCFzBxZ8W5Av$X%lGu9t?2 z1dv)C6TqewA+>f*y-ch0)2RlHR`AIzPb)4k7v+b4#AIJ(C&TP3?Ly;X_>Wf3Z9Mk1 zSR}jv7}(2n!@fZV8e0$#s;w2=;=z`1rGbN$5{7$~VKhyZHHONToFaNRM^Lv5_`yUO z55{Zr{UAj(;7`{Cn1EiAVklDdHr9^p8`MYxK%o@@c zoE?t5QdzA$qx?%b0#1+9k@eP%E7oLj)vmc0U$our@XlYl@MdQf>x!*ieGB^m8AR?= zhAOWrk1J=A9i%~dS6NIpl6+D{Rw{Fp2QG^3y=Q~4;_}wDOK#bD<#O?kjek}#%$sZr zR|Rriz*J%sC)of@$_vUx;z6lAD3yh=;{y%cR0Sn7?F5E_8+4trL$cS5QTtR3wEpLB z)m7=D6$4l>APKF4S}CX&2%_0>x=PiM0xvjK8dHPgGhuklmX%jT!(&BxxC_GzI=$IN+kv%d=PVD)oy-@M)ymam ze*3wGd)II+?Dw#MMyG0M0edk71)ND@uj6+noYHn~`_2Tbx;253_!^Sl6&e(J29`r_ z&~uqOQWL3RpXwGAmA8pnxBoxyHOMnJzo?fe`j1roq(qKDvY3E>?xhS0liR<>e6WploW5*M*uuy25zm z7}*!X#hY0dZc4Qw4%r<$IDeXsN@kK7le2vanq$_^qYvOm$*H zX9qX@(B49ogmWfa$K6mzK7cjXVL{KRA^kcn=myg3%73lC+HO8;WS=^dUPnZ(N!01| z)`2tjb?^P=X=^MQ6P7*xd&@d8fE#`3qG8i>y>n0PUe?EBa}NzhrVLZokVmJCKW7}} z>wChb+(@dk%NT8si*PV?lEz+L2ll?FXBX&&OS^SLy)SZKvIjs%Ab-P5ilvevsCM1+ zB{e;YNT#OgbyH?tv-+IT=gb^FW`@?MW(IRQ&+Dhlp$eT(iqJ+qsiP;gg*Wn6w6Z4j8R1c<&m196TfXqz!BwSQh1gqPe8z@KY zUSh|%1Z~oYQ?uHoK!fB?MeWmCG?Mfdjj6d(7zI+cgRL{4J9GLq7R!_i<`dognTt)P z3A0aMW2|Nkk1SYxWrlJ5fPu{>+t#J$Z{+5Sqozz8amFx1aLlPgPn$ScoVsw~?A~X4 zJyQmszi2Vg;C22fHV0@RAo5FM99ycDGWPJYfaGJ{JeP)XoM;LHuM;wiRetWvKSy{` zIf^vRRVMQ$zLCiy1?pZf%#+dliI~b07_r#`8iA~Er;38KJOQ?Aw;N~%&4``(^#@b6 z1ln%=w4ws_jtQ#6iyjT>jWPSZ3E2zC08S73HB4iuM8{!qs< zBQ7}0U`m;oj^-1Bs;0KlHq2&Km1&!i&{N}7-y#~;q`zVM1196>#g*MWXHGI(AGuk% zd~pAMLF2_kb;HI@I0xsym%WWV0s35EmNImIm)5j5)rL04@wmP@)=-)3r67!IZAdks zWa*|P0XHYnE<0L%01pb4wxm^`^O{y=eykg;94!d*CrHk&e z^JLNOJDy&@XjA;j4OiW|`Qk;FZ@Om*%y9&>m2+^9s`xQVxUe&WC4u7P$p~`B*8{)B zn$s=Kq#*4=A{UWsl&b+bD%T5X=M2LWfnkJm={_O@;{xqsZ_h%CNJFC&yAI2C7g$S= zcu=-_(OPCFTWH2~dgQuJG+>HJO+uocNfjuy%YaB5np4Bsn#O*8jm#VLxQEMt?AOyx zT&dUu_yab{M~fOQ;1O6I7MhT>BomT8xb)(YV|Ma_PH(XI+I0ml7%W!1%j-+m6|z$Y zK*MNqd(e0|u1N ziA7+Iy~av8?Pg z^>^1^3q!QU*J1U^UXD`EuNC7M)y?k?dt5Yl{fLp-tW+OZLMg@p?SvCU43a@9_kj9p zho65Fa0rVu8kSO$UUwq+0lvk13?mK)Y*fhvnMS5JMywl9rn`qR5*Td+QSJevT;DSk z(hvyZLMz}AcTmpl09A<8=eM86B*9QDtGcG4;ST~L1ASXZCy{@TqNEwum=PuE*!eRTnM`NS zKBE&#GTIR(kusIBH+8WvxCI)5sxo(lpc3~{E(bfU7nX4;i`l(=6YZfnnTh3KU2!(; zp$RU9Yy&^V=0oP$oSi#p#&x+ldbPk?4AKB$jxD{brrJU?+XmF*L^q|DSM8b^R(oi6 zh{9kn%H_WGmZ8-d5m)`>>z2EG%hq4}l&^;MeR})mE#y$f=#1?3&tFtOdB)-?!xrAR z2HR(fk38`@zi!SPhEw}J>WXN;NBhZkd~~EbK05!8J3fMm33So*@m}0i+M)9!t5e#T z)co@3{G2B{KUZpg?*HTYMJev?s;{L6#XHV2q2co6JPYVNJtsR)Z)%>t|7$+Nn)kwD z@2Z98cJ}*`yV!wuzT0uGq{#l}JC@zMX-`jWzD_z{KNCNh`C=1Kmq^2d6p9rFCGUQ^ z;gW8&Kr#gW!(@=Naesi-kBhn^LDV=yduA7PVHdPf{$;vgc>kLswPOqs^MWCRuJ0Zm zt?NXm(Acvt8_~-+e%jI@JW?_5Nom z1!DtS;%+!zaJ1li7qmH$3deV($2>JSuhBs_n9?DWDQ4a{DLQz;7K2F+fYj*JZKhW6 zXU)`~Ex>&aW?F*GqX%tH)gAxG2vd-SWtV2VHy3v@%Dju#mmE*06U42$pj(xGVncm@ zz2VGgWME&hv@ky`3dIElkxpd1r)lJv?oC5SbT6u`E-b1Es{Oh@V;EgG@TM{VwcpOf z%!qH8aEGR!AUrKrn;EHU4}))m`vzJBy*bqg6wr=GgDhJP2WdFiOaokI3dWIyvIBj# zPJu16Ep9PmYS4~Hj*%QSWEVCEHCxoir|ki*0BLU#>Ua7L1TjP&KZMik6Q}fLhw2kM z2eVJ<6SkVf8s%9slN~ip9F%GuN*-2DBOh_Mv$JR{?@pyZ|1tP@xlA=`TG~Imn8%72 z)!#F7H1eA#s2#Ta`{fEyG8HzQCBn9}u3B@`UyNX*bxBwSptijusWZxXPX)e@jxSf= zIhO|)MWwhqTQ(PCX|>+z@uk;NYiS$$lBjU12`)=Ix!hVu+#^L?P2Rij(QddE@22;{ z{fSg{^2)W|xZiy!zWx?%imQC9yh>Z*l4*&OjvjlmKkh4lF);S|yIo;z!Bn{O-`5ou z8(1D(B@&R$H-J}GB7kU%gWU=mg})vY)yS(Ey~ME zkcM7E znno=sr@qP0XQjnec#Og0^w#V;y;hdi+l&?1W{cI~^!l=odlRR+J;5JBo5KYBPL8${ zcfcvGszv%WGs%V@NNt(gQT*Jw)ml&S66L2alr3kIReI$j_SFb0D>p0O(B9(gtBG^5 zy*S}X-^4YMl&%xz0`@mC`OKqTc59U~gR%plK9DYhOTCy$+qHo^U2wmir=89Ie@h!z z*=@qD|K`kwWP4iL(pAc|%jW#ftzEkU{2kl9lDS=D{j?si;)sq|pQHOY5}+y6_r7-h zhC21lzna24zpFRYjcmLBfAfYqwNWR1jiX4tpEPZS+AyBPipq;mnjU8q(L`IgQ64Apg5Qe%KkTapH$r^App~on*HRYAR`S= zsB|c!bOdXw-$nas0v~s9Np4hyq{)x>;8kpk;}39L{2c(=J-Tp>OwV~1jX1c z)WyjWFE?F_@EZMGnhptI2ZiG)m=lx=!Y#R~{q)ni>Qoi=e?hV|xONg`>Yl`JCr6vp zlGu(gJ(;82r+iO}v143v50JL21;%F}Q3LTB2tfFrtf`Ae{p#IM9LHxia6xOaf!{872}5Q_}iEducF-O*6nUK?~4J1AA8DBWW-+-drdWE z%|*}mV<`^Uj(uqXYhz4nh5GI%wF-evRUy!*mD*V!^Crnk18?V-n|IZSR&D)-Aizxf z&>GB|Q?8v+r`$*z&1E=TYPnD{=b!9F3;s6PJ8VWePxp_}KH(ytC%lx;`&3&S|sWz>v*|94%D|Y-0h1#UQpw6)X=9qQjIc9e? z$DDur96cxGU(#lE$;5q0E7iq3PfLqet8)c^1Z6do+1T}LF|pzaw>VX$>Q#&7kDq<^ zzjgLIP_eD|;G5#=`zQ4(hsEPWm=9J&6HXq~gHIWtu-7^#FV|8b&85m=3_hr4E z-rWK26=%lnLMD2&(4*U|z6Zu+ad&zTOvsbo^`Ex~cvR06kmp_-D`~b|>)$gvl`E|5UW5I4S!gEY1?)Rs&DAl@kdXsNy4QO3UVCVgx7lf;gVLKLtTZxyJ z>2@gF@fY%FQj)D;-8XMeb%K4`H_E{4lWkycyzNx`TKod2?#ksEPyadq!M%o=AG z4(T7}(zY(#)05!WijKZ1^6YKlPnzzpXAX4cS`Aw5P`IU91yOaO-&#A}I<1|^e~VEu zPo4UXlK<)IacVH_53}X-6omVIUbH5CVU-VCMFZtSccwq1oV3OQg(dgU9hCY8=#}(g(6Qu4>f|oycPUmw>_hVm+YxvyIxF@J9n8vzmN6m^fC;h;RYV{2!rsm3Zbw#qrW!%|hNRAr0jR^?#VYo*G}a`pF`WG=8^v%R@sQE7o83U0yfk6Ztb%VcBv8ngTVtkNp)gUr{V;T84Y@DkMK9}$qShhW;>(yys{;`JQjcg%Qkqin|U_6PNhNY zUf!{~Hj@$)gPh>1W&$xYDKYVgc{P(2dl;DM(SpYqe4ry?;kMet3cHf;V2IhGRs7Ib z_qJcD5>H!%I9(0tbW{b}krHUunUr39=Z4y)TzWN^HYv}4I@O{49}BTRdFBK6Zc@HM z_kgx2|KSxXlY5k3)b8ZfWV-0z`rfzfWERGTz1?WxmS2YX*<#r`;X)IPKU!F;pOA% zRmi`!US&Fb@5JOrM)+p_`DEO00DOP5@G7Hc%=rBVE$(MjyMEGo6+YE`m8OnSj+ z$T+NI3NK##>l?H!d{~Rwo_FkPez))~QvjG_1ry7|HC6 zq%yUGPiAX8D<_k->wzOYh?ka>!NP%KTDw1Zh^rp_A#)~itwcEO?b;Vhdgq4>81zzy zHrqDTMuj#XY9o!5}NVVR+ zB``TMIk22I-hQF0RJGcS`?G&xC+@GdV;gS4(y_b+R>(MzORMmimYe=_dS;q*QQwk! zJvr^&FK1|(Xs!PaZtC->l~N+M_m4BQwLiflR!Qj$BCQw1UOXALxHTg?x!=!&fqXp1 z;8!{t0{9dX7(SzRm5#yh2yR$Fv)9d#m-gv#Sn%(c+zNZK}4sux|~|5^1BT6I27AyR*zF1TSGn07t&X?$KZnS#`Sui zn)p6zsPm!t9%OYsvj%({c&bFD16^od6l`#i1+CSpeLkPim)8@`Z-i>y@nkVoMqiP&6IV{nSEBVFo?m00K z-Uom1dzes$!x=6sm8#ssS{*P;`Z8*>yI>8NV!g(UmR2hzQ)$T-Ak35Xf+6fg zh?;-ss>IdHuK()!b;JoWm&Z3Qo49oKn19(tGT+zvtpRX^KeUf8A&%k6tdOZM#G(%$ zIPb`>z#l^4ho4z~>^EEgHHd_3Lt*R@#gN|*{&C&&XCa7D?*eXjJie>LBaLaQs{u^k zzACIO74jyzpB9(Bzvy&0Jqs#1Kyc!WW)&!SkV4eTz$&qB6N*uqga9L?daq48P3 z7B)7Db`TDCi{0ULYxb!4pG2q%so(JhLgn=<CD5lfH4inbWkJpwLsOQr1@}Nrsv73I;yKQr%iF)P1<@4 z@wJhK1>li%AxNLWpzYPL0Md6qoB`H6dpY;rS5v>1nj8h<}Yf zb9u_y&-#WMto`*mYk%xBW$mZUbAIlAeidt>RZ0Bi@3R(4wU`dcLC(*$q7>6&3SsZD zu#76?r2J`!Frb33jU+MrAiPUgLIV+1yH0j{tH$DWBfaIQqQ+>p)y5vzB9VNAtpHn4 zCzbsYO}^Tl#Dic9`@OI~?<3nf@*Z6m3m>v} z=-F&8E9uj$u=e~B4h40&W(1nfR-4lmEYozRA##K%z#0~$(`-LY{d;MoxGAv+jUB#V z*E->>vgN0zp1kt6W7GfpX4}CRm)~;sGeb9y{k?dgl^;P#<)6H=r*cJ4*dCDxIG6LX}&d`?Z^L)qzXf>R={fr@LJxZ z_PvxH#jScp<;bj^MC3p~Jcwbt`oA}oDZ%u*PpXNj$>DcSFbR$%un=Xv?vq)!%Z%Rv zMH-vQvcz^gerqS=w|3(^^yKh;$B%2mKpwA~)^%J*_H-)wj?O{+=7f^(N%VT-*<{+B zPr|FoWx@AG$(xc&K9x+vP_MmWsE5ymW{=M-Y0l*Ed*KhhlEKYKoE%cz9m(E7k2lei zN+&9M1Q*PN&wHFctVa9Xo}9_&Cv`q9Sj-=eG{M|>{}7X?&;}7&l>$=d7KO^?l{nR-ut(ZxuUmtS$3L*$=lg<(UxbMR-XPn9dQ2c)PNvb z1-nIh>|Pja2-ov(!$i`pGfNO1YCLe;?e`yl`YW!k=Hcj$r^zhRgTK6E7afMaxT7u+ z9EtWvb{@n7kI|+*|9EMq9LByzYQE(YoRxEO9`4(7s=k4+(3LLOX(y!MPROfQf%`sb z6}VSp2`R4wzq+(?+gJ!Lt|grcLZbL4UzGN7X0DrZ3j$ZB7k@qA%CQhOM3EB3T8Zcn z$IPaOZ%EYQH1SP6PldRGcuE+P{>FK^Bx9h3*-9%R&hN>}kdpGz9+;}>a#@+G;cA*6 z<*A!OwPm4lS@y-qiOiK#9)E&kPw@vOhhW0T0+l?=2KwEnY~tBg{vhLrRq;6|7Cc9_ zO9OWOoW0^@XcLY-ByJ_)l9tj1-o#mT(ozy#m~>v{9~(>8DS>nriKKL0G$dqj_-EQ1 z+L-!ISiEc(auwgZzx-OO2nVL&LOhv!)GxGp<{krwod|2Lop7xj62wHs)gthu6ZUQh zEe&nN7qIW%RXj#HGT%SFmfMc(AL4o!&!`{zdL-W>$d};jUAhN3?!OOTj)!~WyU4xS zxSQZ=IZ1}FU%#kbLlZRIWESE(Y5=S6rHi2Hj1#A?fi7DOjk2;tW4zt0uy_hqa zEz$;yTvuin%{m;rhsDNw0F@g0{YlB0UBeCL%!sI^;TFnG1{6Hvl)e`zAD#FnM9MLP z@8c2t?ah=RdCt8EJeh?AeQH}qw{Y59vh($2)-KtcKmEb!OVC>6Aop-X`90uop87<0 zL3_p0JrwY_X17pr2c%k2ri`G}U+A}&)`t2+<6G9@Z$G{pIq=s$O1HG{;+uGfu#h zG=3!0;jHXWrSi@WD`@;ICUoCg<{pFid+p$kN9f|#1*5OR-hc_Nu z{=}m+cyto&Xa_K7;gj-wmkCCOWGlmV@l`QP{#~#o_!MK8K`$@hLRQBc;G5UwxO|rNwj#9^~P0Ef|W{cg1aXTIo1vte52) zG2|_j)3Uz>|I!%FMB*}+CO*TeO-a!&zYMK4|G8<>JZWi1-T&;{Flb4CZ(y=`w9&$^ zG0#5SOtx~%Hvi4JzK^saFOEru#eIoOQc1E(Nt6 z@1XtVtHPaPK~ix`3UJ3L<9E(t1>w#thm9nfpt}ibWYN&?Lw6IvF9R&$V^~7EdS(ah zDdmJjdKto;EKV7uJk9us1s@4O|5GDivpL~F8R}WHL`9hcZg{>Zo~+%=DqNaOEka9#$HPwG z+3uz$x>kppBINyx>tk>mkjB*5Oa)ALo=MP4p?gJ8RdEOlg z#5q%&(Gzz`EN5Xsae4Rkk z9&W!vk|KBu`vY}US5{DnuB?Eu4V2Ief?B5R@Q{Is14@=G%zc^0Fe#uRt_y6#7|5H= zdxN2U+s@f~mNLBI;-5U|4WgL2`1-b`+t4m3X%hPH+WkNz)>j|icEer$AIypZ{w38- z9K}j>GOV7Ti&d6)rwu@bda{-<-eHvv=QAPM83(0=4-eXknJcP-Hu33!rbgVVHKT?A zH*P`TBEbi?Hl7<*8j|f--tLK%fIG7E2nH^9_dRAypV@TFPjd`7Ie79Ancv2^$yX5E zdbl(irpOii-HBuxyB5I?KVZU(dQ;5P^1cAD^QM5>($S-~Q4YfxZt;Ca^b#eh=Olia z#xK(f%!PxNNp?w1Aem3%0k;~N!k?gaeiWC4ae6obv#{Mteo=QPe#2te((!wQYE&qZ zPMvHBY!HrgE?V4E(eY*zen_%gJwcND?RH~Zs7X&Q*49Pcx)~(e@$edI5o@`3)5o*k z{smh^rD5l5BGk^m*6qBqnw|4t=SnO0_T(&0sWfdXYI7=|>BY?j5;~wQ()MYZo$akO z`^1@Lc2L1-&ZxZ}wW(Ia{P-sSh;4bk#{GY}s?lixc-muNWME(b;%%pI}6vn^%XJ(8*HxVUT1VRf1Nh%dm!~RH_$qNlL z!(5mI{#k_bBvdrZ6jOP=#7)-1dPP(ch8VSyn36Dh6}1qh7DeJFh=Qzc-@K>sCnhY*-{I zbzKwIui&@R5wLc_ulq=l>sH*hM&Ot0aZOhvUk{;LZ-L*Q<-1#?1YLS2s?0m1pCU*m ztx}%t;*OKYd$?wS@m*a6hdt*i8&hJ!v@0L{s>p=oMts!S$Y77-4hc^=oPXzZlH=k1e2B#G3?o2XDr*e{z=!*xJ~(PGErx(b^`66V^GK^zoA z$dN(QYIR~TGW#KH)<-#yR&2B8kfJ$P(TDBqd6Vi9YwZpUs}9VlZuIjGgWl}z9t@~d zyip0l);n-YZN&wZhX=yoInPd3_Yo5BFew&{E8gOz3SdsPquBZ-EGr}u>_+AeU|nSH ztKe)fALzxEhz~J%KE!a*Do5k84}pKtNywpA(TjVrM7)?{y{jnU+%s3hc9BIdD(SqzlI_*iDeQ3` zvpS1$D!(~mkD(X6e*pUZ@wET|c-muNV9-H=b&L#*2}~kPZatyBGU54m*x(oJO3pxKy|bxQ=i=;AY`g;_l!v;F-m%!F!4?fNufc z3;s#`e+0Ay+63+h3J9(d;t<*(EGB$JBuA7%)Jk-n=pV5ju|wh<;uhi+;;Y0zNa#s; zNTf-cNnVqxkam*(B9kSnA$v_uLvD)PH+c;ODTQr{c}fgQYm~*5m#FBd+)!0h-J$wU z%}=dGT}a(c{ep%H5H@H!X`a!t)7qykr9DUcf=-*RlCF<#m+lI^GQAmkcl15)-k?ja>P`_bcPv^S&O-p`8*2`i$j)CmK&^itg@^wSo2x? zSx>RPXVYXWVB2HIXE({-$bO&wD~C3RFOF%BcbuY}Ryp%IH@TR&ym4LT*5K~qq37|- zbA^|a*8}ecA1$9WpJhI;d`)~$_%--z_+JZX3k(b53rY!=3f>ix5-JdSBFrS5FT5o} zE;1x?UF5AO5gbq{${@-vDk7>VYDLt8XtC&u=vy%sFaRpJu>#jBFI009610PFxr00jU5000020096302TlM0RRD#00000c-n1}%Wl&^ z6o!Ahp;bjdfKZVNi&2FTNW@LkR*|+8E){ob#XiWi07Vu&O}zo2Q}UC0=l-#5Jv6;6?adtU+xV>8e>Fht; zaJjAT3DQ3LQ@GI+XWM3i7I=qOZD0ONe*yEfiM#*+c-n2zM{H9;5P;!-630%Q-g~2W z{GRO;x=n0)@0~yr+aV;u!9eJUUIjwp2BKX6(W^kf0T8{3BH+N8E)Ib!Jb25El~$j2 zXLmH3AqsxA8M~eu{L`k$*B!*}##9+lnEOFS0CxJwgNG63;(nu$ROtQ!(hg|Z= zr+`9=D5iu`+R>g4bfgoV=|We!(VZUjq!+z$prPZ$g_|dLn<2k1Z@SQ!p<|VHP@|mw}rJfKASV#kn9HfawG!v$U2#Z<5Qa-Va<+QSb zRjlL+hgidE*0Ro+!8fjQhO=Db4R0ku5+zBJB}GytP0}SpG9^p0B}Z~O$9XPsncH08 zB6rx#3CZIIHzi*R*dc{dB*ju9rPe@0YjaSuMH)j6hoefVD=SSs4yB5FRov@Tx|M(5 z>&8c=rqq>ArAz5nmMP1X70Q44Jf@n;t7-PSP#_Ynt*LGanpe8t)a|$UBH^Y#2cN3L z?=|Z%)Rj)9>o+Ror7{2jc-q^+_`h`nV=sgE29AJ;jf{+aksICEv?CNZ2t;~s5D5Y@ z8X{#kuxZCcFl=B}aNWSH?XrPY!F3k{Gm9_-lQx*oqTsrT)e_2KgGgGyB-ufHHYQal zpK${R)D&)z1P7xmP+}vqTNzM<3uq4aYB-k@$mNQMb9sPVjtg)uFObXA0^>$*5YYwz PnzA>i00B~a_r(AJ^z3)v$YZbQ~%HZ|BH~2f+PR{RPw`O{|D`94RK)sL7^XR;HOUZ14`f* zpe11$Y55=S1^@t$0s!F4I9jL*h2<6h0RWIbKQa72kf~#Ckdv0Dq5t9j(<=QyrwiF0M-eu%F}D2SPJUu)0RUh_`39O| zV_o|nEo{b54CMa-qOpam(GOPu07&=&08t^K85DXZhPnm-fGX)v4BZcyfveKxOn%58 zPUEMJ_X9%sMsQ9OO9z)9&f}+jCjbDXMxCEc-NIV`Cr6I<%K{Pe5Y z^P|K655Q1BHCDQohCkfSPfYHQR@$SQozTYG-T?s64*21I^uVt|gs!JHc7{J%-JgB{ zEk8UtO!-)pv!UM4nDuCXIPm}QJSFG;V}X7EnBdBr{?88F{G$HPPUV(Jx~%Ez80hHi zfasxuBkJhs80i?ezz@*)#Dwk=^_%0TgHiSa0P;@GuavSO! z;diHv2Tz(ihzOnL^21#sMq_yBdC11GYD&KLcb~O2*^`zvI?f$s6 zooixZaq1-olF@-v4T&mOo2?X z2X02bHnX&5s!=Vn76j9n;}xy36H~9-9GEH#x0tG6_X(!+@hm@}PytljqSKA#s}Sgg zsvZZ^46Y3m-v9582rG2d&^nb$RAa+Xzug6KKiLf(E2@2_7S#U=@*&I1m+Fg8(1No1ZP+dsr4B=JI@dU}A|g^Ybbar?>BMt4md{eWTMF z3-uG1z(zV#E9{hSb&v9{hKhuPtx*_@V61`VT!my>qi8*|=@QYUk;=$1K??@8uXv`G z>7wpC*9LfVzm&_(?aqGhuzxfJp85a#vT+zQG=@(koV5H{h3A@ps{+b*%v7MbhEVgY z`^Hvf4=d!@$`z)_6WcF5>h4FUNI0^89GHr?cx|AhL2_B=%wk;jY|m9?^m07UN@5ZOj+Fm zkpZ501(PobSx-HT9|tv0+fnAP`W$z_$A)|b2c6D+Yz-LuV3P#&^yG4uj zw6Y&%sE2_`KQbSg*u=J+pJH1|Q)rxhJ`c?zUs_#V%gukasPt%ey2xdITk}9z=|#?( zw-(Qh*#6tu{w%`zE_D-ae?)?J$oX+LHAc7G(`EmxIvmzo*|NS=`l+T``q)5mNw5-$ zlPD~Y+a2x7yn$Oo#vK2Pj$+4v=V0m?w9DJlxzVHApi0Oo$P*Ub_<45vyKEI(@d&+V ztH!t@?AZpD#6w$Yx=8vRr3z|FXrJrqz2S*;;RD`c@l6K{+G3h}5yuOyWQlqgg`@-! z<}+OF4Q08>o5G&M(LHzUt8wOwyMWN?0Ce^U{;SNN`%ENoCVA}hpQgb3Abzr2E?(m# zKqhCLH(*Q`|JsKSzigZ@f;}$*hZTcR0X(h^8pV{9bcxm`t_p~3GY6|R8Z^@pnKbVX zqmp_7SAS8l*)P77Z|L3wi#1Tq0P~gy$gV`d2=RX2EomhSq%{rfu0-7Eo>SO9v=VrL zNg5_Z_Ydh30hQs?A$}q12f~TOrJVma0^cu1394xK!(wdmrH|uniDH zc^4@?ITjh_zV6Jd4G<=2r!~QnhBy0el#D%;ixZRyXJ{uoXf#$Z4h}GwOh3&G zKivc@p{c*TQSwyFrAxh~N#(Nm!Yuxro4|Qey(#iH3dK4TWEjhzIp#1pf2FEJw7`EA zt98j&&Xl4F)fc+JKa4rLIm#rK-Qty?*}Dxkzza0s9F5~w===s30!MTKIiT}< zrG!QIX#nyEjv)dV(9zk@-_S46eHg6w_Q{E<2`Ncw2`ETth^WZu@XL#<3oA=&3o1%#imJ-$^39E{4J}P=4J=G- zjI7M;^v{p44=+z|4=zq_j;_x3=GBq05iwD55in42kg(A3_@^gF&o6B3e7&FwlSHlB zr1v_uXue#b-fi1oG^yTfywq}gW=Jq+1+0J5pX~gOgv3r_7Oru4wu-!F0v$VXn&0srs05xw+ zE0NCPP2GQsJjg4_#0v}Ptc%e5Il+LkgH;0#fKUKkfMLKCU=gqi*alny@`12{P=KI- z;DJDZFn)YmKsn$8&5;lx>$%=R@3tLjds|SixsoL#a=V8FJ4BgBn9k0@v+!4 z=Tq}(`D4PEzD@8+=1(b+#(KbW!TEV)M1wzsnuXEN{CfX_0Z*Sz;4lLma#@oh{FdbK zRck2zn*D$ddjG(?=fG%SoPStghdTWOdX#P}?tlGzvTYpU&2Mp6kCwI|rD%_ur%3Lr zneo_cyrwaBw!@#{*Xt-Fue`Y>Xxi7ppT&Dy6!DnxqFUyZl)5iM*$y{GzY2I?MUaL%M8!Dv zgBrnyVwZbzn5FB#InyOAt#SHZRIz)VyMK$bzl;Q}Vf@nL&?EDK68I;i_!@*c-AM_4 zAoo7ao8WVJ8||G5*>JTYz?t0W2v4U3>E^Y*aW@{;uGBge{?FOE%&cUNZ1P=es%sb| z3m2xJPro;P`u$7H_l=XXiqQlfl;4fT?K`jWETp^xox6@?zPncKzr!sM0O4*a!4NN6N0u(SE!kx7UPmNeN7bI7$;-=|=D>%DC{qAQzQRG0<=2_@xlo_O zu$r^AD$RGrDZ9|`g@&lvzLlAEadqQ;{ccpZ3heg%{{3|`ZbTpNao_pedHwhtwBJgH zJxs$ic>p>!u~-%m4lkZIxKXWNX5i+nBDb>}BM-)D**7m*k8*~aMCigzEqpK+g8jQ6 z-Hhmf*fP-cs5f{hq}qm-usLaeB#}5U3hOwIU|6w~xE9%KRPom!l446|7=}EUa52Fw z%c58rYhsE)b05)JkeiMTaCIZ0GzYKWLC%fJGpHm%N73xg+nFG6+HO|~g!STk9 zwVDu7na$lem<16w5mb+?=Z_YW#w(c1C}=XFc@hZquR_|ggDc6yn$n{d1s;h-VV+*e zx-#}P4_tG_;KAc&g~Evo^&^w9zAoLS9hQl_MzPyg1+0&Ssh;o{@A%MxFWkrF zV}1SzH@u^cjyJ7F2ogbGi8nMGP5o*5z98M@f5)xwcT8~LvvIn1^1Kb-mczYoIe^}9 zUKC@#pod_8?*~NM-raEEybiIz+;q$w?STbs5dAglbA1JC{QM%bY;B>2vz4an*aP-_ z4LdDg@vkOsQRpl_JMBjBc3Cm(Z-RTDKeyRvt9+z)xooL8qqiIPG#K$bkh1*Mbv6IA zRZR7_c?V_+#MMq7UJ)8WY}sCMgDOMrmt-ifgwNuJiCB@aKxq_tRHqp(BAIfFvJw>b zL8}6K&T_v}&%WH)u2`9$X8_+=Buv?F8N86v!JEI{5|w`xX%S_bsdAH)N3NnM>6&W@ zV|EmNahDn?BTf?FFgP2L>ERV6mAkJn-;*9at)BewQ?PTjVtsdLG$F^W3(VU%O>qyiJ~l z7f+~QXFeVeU%9W}AF1zNXY+h^6v~rORdHMmMg>2)Xt|BL`!Lwv!+R2anXL7)#mu#0 zRwcH_iYx8HbGrMUHO0*$t*T_Rk}oD|gBf&0ZfoLn znE*rJaoD9p=X%)@oScVBopF-TJ6iy~f$=rxdMI>Z*`Wi@_}i;FOvFDz2aOn8_|JGQ zEW`o{U`Q20-v|j=I8wNPUnoj{GgzYbcb_bbnB!0_Y_srfCqCOtP94aU327$G=Rlsu zSp`N+Q-s^eIrsuzc_&v|>7P&g9O7W+9HI3wp}UkJjs^%QFrRE4FeT7$LuBcB$T>s^ zKWKrmOwbr5vDh_%38_6Gv;J`u@5=LO3_6S9>t>5=F5NXCGKCwp^AR5N=?2LT$E*A7 zxR2HRvSS4^uA{w-(ZRvW+PozbPvq8vh+s5q@B4+|r;C=ofOfU@#$}PuL1FE$(+z$G z%rlHiIYyXYB4$JEe{`13k%$lvM1idAD8lD~O#(;;JNh3E9j^WL_EY=hUpWR@&+fbG{&TOHUwc57D2D1jZ(LjXiNy6Hps zl<@HuMS(WyuuO+XMUvry_Xd9t`529!WU1{o6%Ms?NOPREfI?-d5St^#6m%E~tZ2w(049`KPW$Nl=Tzsp`bO=JrXT3X6Hew0lSRSe_JF{u4%Vs`H z`{}GwhYMh@h8>94{eW3M#(ezAC<2zg*mQ}18Ya*woJLq=&nS$Dv+)lPj3(c5Rc&8E zmC@mX>r*7*4eC>#4!%#*i3Jkt%^5wgtwL9ry9j&w8IkEyg(YlNNb1FUVMJ_x1cBpq zN<>h>!2&gXIG3_S=GA?}3gzX}pIwX^g?vXn)(^TM}-^^S?ju zTTlD)Ec@FL*d%Ji^;3!Qs?)*SKo%hNZi;GKKq~l5{b6g9`Un4cJ~Q~iUUo0BTauT5 zWBZxR?wSYvPluRuy^EMQBbevY64bD+1TndftN4am&?QQl%~y;M*qI*W?m}R-Qj%M_ z?DZ9HMz9F?O|B*R?u4@{P<+iMuTJuKs+Bp#mOHZ+IVCLq4t5b(ObMvt-DkXvnJzYm zW8g4qc!O_A9ruz%!?l&n$$?HJWg9lTS+SiaGU4X~j(gAU5w$jJl+_42b8PpcZ6}h0x`j z$aDxD>@4{r5(EkrCR}!lGRg2D-wwLQQtSUr_B4 zvt=mZX1$q}^(~$X6vb zu9K1Ch;<|;n##=4Ynhl-DNqHtS>c0YJqWH1OlxmE&bBUd@^_tmakbiw$?gwFWR`f6 zc{IkmaBnx7ej~3rzh{x~RCZcV_&Fx$t4%catZhQj!UA>vRpn#SPiZR!F4hMICG3Zc zLZ5Q&cbUR!P%6ZJ3ZvDT)CH?hoGm;AqTec1)43|F8KWPI1O~`b_u!2J^3>p#UEp`1 zkNiAh4&mi}purk@fDzGvN}Oa`6su6G*bP{7T{g|6fWTVFDqR$Mz49SYw(jUPRZ>ci zfC%!yAdRKo16CQLH|q@N0-%JMr02ViB(-_!?#m7BlQZp33WeK859cBl&CxQN?mp$E zQe5wD;|naWJL~O7L1zD)snT3}!#JLUt1KVuTpQS~=1h}NiBPo!$|vg+&)D3<$p zgpyX3=&hEo=$HM@0T&|7-_@y?>*xDJPl-K|C15av68p|LcxY!2a`ABEdTg3)=KB2s zKd|B4XTN#=WbRG;_bEPGBxLrSQYHCa7anXwQGCR_IpWya zG!P67W-q$)rk5i6A!f4ZVz9g^%V`(R60S~Wvy@9lW)x;EcWSv|nrFdvw4OJ*RfDZL zNF4mvod@4B5wp*RCX&G8R3=!Avdjqoh?1VzNFZ-HH=qk?>+hek?pA+^YZ)q|fC551 z-UlL-<_h+DAc1uCT0#_ArHO?pds|*9AbToOEIFVH3LRk-;sc>hL}{Yf=;2K6jw)TN z@i2R^+K-CR=dauJ;;%KqzF|qLbWTTu^9XF6rbR*lVlIr+3_l(@A_JQup?d|+U6JS!PfEC z0)$#@`h_0M9y`|oV=&6(ea17=B!3f}{U&N#8RDd93uyRF>cObu;GWx<5;KSggg|j3 zq|8)$fvcN90;M7d@>N=64UEM}pu2HHCUu`@%x1G>BoL;ahoY$h7U`keTJz)ytq(8q z$KE*A@MW*ti+b(Y4$=IzsML)mZnA~7jSWAVy2FP{{(Nth&yTIn>FO_j#w7O7!{+T* zo72)_YRwKWF1$4FxiBz$Wy*Z!M}eMku7oG@7Wq~Kv_ySyVVZi!BEMHvU>%~6_9}Ww z_djOJq`$^M7ybY|AXhW`p>kXgqbOA&<<`ZffD>4e#1L2g;-pV&B)|nZ`&!&OsXtkL zS~|$GW!`}vwX~Mqu~JAgUOthhy^1~CjRy`Z10^ZejWgA1?N<0{VrEumYxV7lo5cG` zV5GZ+%^H#$s~fg)_8M5A@3&#Wy}UK;$7%OoKK8cZaO}1(ox}H-q3>UL16V?#&ags_ zOt6T7AqdQ{I=_Cg+();hdIy|hpP;j4ADCp*ZStGsVM0z%f;pa$V=ND-w&rTgMf8iZ zYA7khK9wn9h=-6V$?)}5;LAE7_mp@xkYh!0p);0G(sos8gci@-@*(W5fgz08G7P(7 zhFILC9s!`9U6xD`)WRZ*K4@$ooTc%2Oe>I^nigX_}YRbifhhlZ>EX^@I2=G*%2}) z!`+;vs%o}UG#`T(GAi8&>`iS31}00?plmy^;K3WrwGPdtcgZFV02`o>VIz zNM*Kt#Q%l26SoRT){D&VtsKZRL+xl3RK)8@6jB6|yq;B9tk5Yl3WS;LR|xU+gOdl% zCN#p01nMET=|^n9Qa@xakGZ5sEZ2*)9AWx*RnY0g+EauwN@h;W!V_jp)NDpA6qKb2 zrcxqFI%yZoMT3`#QC1BHK0aEwVq+isPbx-+k`5WE0LAdumhX-Q>?2b00*e#f7Y@y* z8pTdPuYHn#kk_8MH5yykD7H~*T-|+`<~gx4)uz{|`8yIzs_O)!SHXO2IuR|?%GTuD z_wp&fY5a;nS1;4WSp2qT_}IkR1pSUgCGW8j`F@P6Fe?ydlxL*BKz)1}t+V*k_39yY z*y~=8rL^1{#a)YCw<0xV%i9NAn%R8Vl*QE9j3nt;n}tx!3;%q2J0XQM_FCe|Scggj zo1_XfRTo^`lwiF+C!Q8nu+AnYuLMc1DYK4rL1KcMR<*3eeBSpc{P#%+2^z(rcz`JK z$~56Wf`0-x0s(sHt$_+L>5b9FCg>7f5i2xF5@Wrk1$^$Q(}!SDZzxLroM9z!7(39i z5KaV*#vrlzR+VUe*ihgZCE5@~DN+GaBv^yO5~D^5mj2Sr$$I~wkb?GUEyPj?V}i~8 zy*2fG0Ix2{2YMhIIR2@F9CN2P6mYF}5`pRps~}$IzDnis{yekh`54zen#^R}|6Dhs zQE})~BsN^0LUmRQkTbq=b>E&NqFCx0$hO(23v3xC!G4oN(Ya2t4R1AoGPYPjYSX>v zMK2PA>`SKFYqfx}+OvaBf*`)OEp5ezte|T|e5ztAQp%wR*C8CdKssX_djc^{gCR2= zdz4rW=@}R=WvBR?Zrv9h>$7~!sihHFz?^~dph79+B@)lt;M#@0H`tbl!ag>C^WXLG+Ch%-e^c3c^i5yDh^fu zsl#euI2A}*yFAd9izY^jswbL}?7b1|$yEILaqF20+8V3f`kIT1*iAaThj3QsdBNJ` z+_`_TZ!e2vlqC(z+G(vT%YCJ@2xxM^#|XNWq1AwU7;y}85FWxwjMND~L4lS;`=T9H z!^RxlvFh(+4!Ld=(dQpAD;=X*%^hc;*A>ABHu68Ju|6SwEp-2&7UAc3m(}6q^032a zW%c7VHV|WDPi^NzK5h|sQd};#`|jeEGir4=Crn$XW1rQ#*S-Ru$k;@(&Iq=ud19ux zAJ?!NWLdP;)%hjb-MHDjC~UD_3OM&_P4bHnEzB8(WNc{} zn)1zIhrSmHqdP?U*1RlJL{C@ysySE_d8&@JTDDtD;$^l43H3kcybv(pv^%|`?L{F| zku%MK9a_eBn{f-+jMuUpi!N0uaG)W#;F#k%EE!fYddtzW)_60_22Y|n=3fFha+%e% zmSnOKL#)-*$7=~6KKD7K)%mXym$E&!)ONKupE@ z3{71Kk-)Lqwk=Ng06STLMQKE&q#^t>^li%G%-byXbb#Lx-z|QkyuLa>m-%PF=nb(k zKA?E+E{PZcnQX8_MJ7XzE@3(hu8D}Z;q#o9Q1!yiz9fomtOlw-x`E_LS!pWNojBZJ5!)zPwXHA61%_{L(8rcx@2)r&AOSzPjwmNweW|L2!9b@8pHZ!p}N?e7NYT z_8!Jv=tdT;JyQZGSl{WN_|2d)oeC;G%c=FZBJ^BO>f~Z zPxGE^`Y<*Pe#$(cFHViNMWjHQV=!2l!ro>ZrmwdyV}zn!iI$*M*;l zKMWD=iH3N(F`y><>jez%b$5Ft_;94v(waOb8J4Y=kfN0XUrhRUoL@NrgvgjzNH~`v zcBJRG5cq^b<#}+cDXcveoHYd=oaU;X%R}Z{F`f4IP5tmXLLluolTW{3kuYC0&5_hh z=@_c92_-n=Q2;5XJz^XZv?z6&A01J$sy`TATcadhBMNUFh)9{B5U(9;(PTggvpgqn z`Y4DDF!#=Z#KKda2b}A5r8kXwhm&ZFihJ`4*YnEe^h6em^-_J2@bjnRW+(r6f_M_b zy2cJghuL17B9?m=+L)A9#{+B|&k#uESPdFekE$b{wh|s*c7#me&z<&=z3y3EMI`xD z=gEM4nJfptmTHfav{VH9s|OuZ7&jNb@nDt=6w?Yv>GE%qvR3;Er`EgKsVxI~md9>nV#dWg54==w?E<#q~={hVv3~oT!@H)ZOSlI^j^)k$kz+f zVybSlSgIFV4S#7Yj1t+5*PH-XEwKz}TwLgzFUF{|H5_)Qu2|mT*O^?HTj&XFwtp5K2_)*p?OdeEqTb*X^%SPeSVIZ2csoXs z`C8>3J0^<apEB<)a?g9Yfmu9$ z1EMuaHv(yN>W>|?`;LIY8(gp_27%Pr)&rHAV8}wINbe}4UK3*r`;>rFL!Im9)B{IG z*KiCrVCJOk7_l=-xo%iatv`5PmYIAX9u}TZZj-XUcRYQ>23}F3={6CBZ%O4i&#YL_ z3V65E$#0h{R=(6$R{hpUy3XigHJ=_7bZHz%UT7$g1Ov?eV!TI z$Atpr^yewqReAeOnCPy#AYiF9&5tKM>;=0!6ImdBaLi6b1Y%bbt$$g{Q^QueRz}ML zZOqN-xuZEWd1R2bwb_k))Dtb7IeK^C=VE-MXvwK6cUe`HGA0PgkqE=2pCF~(+bjF) z$mCYE3+1JA1l2}DPgXc96zp4NM|SRS<1gvkS>WGyCqVP{aZi8mlXKxcOId#tYEwD4 zWsxEFswb>TQ?95AWqSxdY=W7&apNXWoIVaV^3&u?!olNV-s|j0&xZ6=+vvX zDa^WOhxz=Lv9j_B99Y8(Nga#`yV(dgJd|L*$>c~NB_}U z73sHWnjz*)>B(X&Mz*g6UcuCq#ah0sKq*3>uVW?JV44F~rMVQ1qOU#;vp8hF#*>;U zqAk50=1NiRQk@+fmTqDh%!OYIbf%|<&@SJlt30L8aXH=?V3 zNoE{125P|yz9Jx*XzgdR`QF|bE+EB4i+KJ*lx*Y5-}9C?(<%}V`&Tf>JsGL|gG&0? ziyEdcbj_-Y%cbD|rwp$BJ)0%0oH1g~RoQRU)HrEnO2kRIO!nOMMYsCC-W+TIocX$LUzNfnNgvwHRS4&Kwi99>!EV<&B!#|`F zpM&Mw1CU9AZG))@jU=PfTd66<(pZp%6qnb?It@rpjof@$;QyPf7;__?^~ZdO>wyp| zI!ld<91(-bU`!?J+1GECvmzDm_Wsy7;lO@&?upQ_Rks6ld& z*xwZU(;%8j6lXTk{_GhDW!$PSx%cOB1; z=a81WySP8=Si?vht8#SKqihL3F9>4F3ou=>`-wgkvi(*m>@*H1c-iP^ty7WdF%&-- zh5LiIqEv*wbmp$}6GGk;;m5cja-&29<4`FwT{8mx9(*)#b!8WN3Z=O)ek-CrBpop_ z^W23rX|ozW4-Gr5)NP>_j_$KuU3E#Z2F^w>%f7(vutk^eEuD3}K<=ypGZ>Ck0Ui;E z&jzsr&B9JRARgNwe6++cI(6zaxX;i-w21JXL$riNSd1opt`Ym2)qH*5@ayO@eMq-J zByG=_FgWw{5$ER-qO~~*>4oI;z zD|3zWjCYyNq&%z-EzGg_8LI#?lZTdqmbe@lU$?R`C8JHhzMZ`Ck{11mGE^K&W`uG9 z0Htqb*zP(RzvQ_|d-s4g4v}jqN4R=Z-z2&Rsi?hC_^{*V`|)z+Trw?K-y{TQH`H^p zzhdIkrOu26?Qm3Sw!ZMPl%dYW?C>b-C4jV8`l>wHZ!pIV@6Q(Lb-v~lFln*W+P_G9cZ1((T{7itHD%}dZTA#=omBJZliR%+F0=WVA42=WHy)c&;W)t+CkZ`hrB-o2z z*9dlXQ;^3d_gl1>iqIADZBVGqM#NLEP#w+;&uHoE1!RJ4MHGTo!u*dv66)zyMW z|9m=N*ekt`)go{`E`a1GCA+TY%ST~)OH7hhH;p`8jqT<)Ov7uuH9gt$cN!hzQIM@V z3>BhpsL6}Ez7Uj!pWRD`e`w)G-8T!qkGe0c`FGxJC6Qgnn!eXL77JE)NY|SmG2)S4 z4_B5wd=*?$@YG)R7N>MPzpj-PpJcdhl*}U}#d;6L{6{b-sb*aSiZ?H*a9#$2`$$5&PC)TkVsl;5B2kmZ zq;!@!v*|Zl8HJnOZr34iwEk`v(}ru%w(!~cbM8w#_~UyEY`3%%UU#&;Otn^1R|iEjL@CsplXh?CU&Rp_h44SDqT*ry^%c^N}tlHwne6gmtXykaAF; z;u3y44gM~c6>0cE(#t2){5gv@oEywfLTY34{nfu1-Dp6FT!o+x`)2KaHBdY(3DU=D&|LtN4{vB`oMiiJgR_%No!-|n$hzN#yj zbHe&mwcSh5Hdbb-@qWV=d7B@*uLya{|JC3nWrXHErB3wl6@Ju}x^!V{p$k#nEh?Kk zv%B4#L_HaQIP_h2!otceC_41vna=R}Eu}6sI+R_oF4Y)=!J@*Acme;bh#1yj{~NzH zi*)m4AaX8r;3K-)I>G7#v&bH~JMS2+FMFji9&h1=tFn4jkaMkj zWJGa}eGVK#>+%(D4!kP`!2}^Tl`E4AFD7tJ(*+Otq%gVHvxAXZAThT^FS3jpCndlEfKrh-HuDM4!3spg(GrSs%p8OAi{ z+LNxgUfTWE)4SYlp7)p>xLM43Vssar6$cE1e$963>zBSYZE;$#QEz)hKlwjQOy}=3 z^V91>2`F~Mhu0{NhL!$FHA{pP7l}!w0z45Qli(IkXA*_ozwI+4gJ4^zA8EIk^h>#fj%d6M4drnoT?*05rKD zXPoSdlv*KJ@kYH2WAQ*P#V!SeVmL6-fDFQ3N0*x}C`_?8Fb)f3j66AB#HSBj*eUqR znyNbAfOv-R&8J5%V$sY4e=%>vFmFhydU*4+cfYf{o3@&W~G-j02GblGiCSN6`# zG=p+0KKepkU zXh;55Th{R{eCIQVvL6r1Aboz=-@edkxc;BqGRK7H{3?6x$9rd|LY~>$)HGvZ5u2JD@Apt4IfJGsg z8UH~PB{~e}>n2Ve35fu5`WPKS;&?3#6TQb~ynisfb1>2PVNuzrNFVv?Sh_=fAz6=H zSi}s0+kQt$G|NIhJNIrynn}l1U-*<&x8qQRx^SlZXkp{6$)am=1(xS>2Agw~7KhjK zFF%@Vi(Ek+Ou_PLYDGtE*HOZ5{U^=y)prYJ z&v`;s6?IMyuXAxt_;)Qy_}nOLrRs?v7t(Rm-qntZ9sQa-{bIiX2|ylOv3y5DkuIy0 zTo}xXvQt%6q7p(tVL)Y$J}C8E(~PPHnB>L+fDO$Pshx$t-&Pw~BG?Z8qJBkl1gJLz=a23C`OrM$5a3 zz~ru`_<-n6%H7AHJ2b5X21XK+p7$e*s!(gyx0@^k?Kq=scKd|@{k*^5FFPIE3caF^ z%TvyU@kdfvTxMr8O*~g=?47;(%n(CL(do+8PR3wEz*Ws*+K|W(j^+mu7f^}uB}fx` z3UR$-r|i&_#WNf2Nj+ryH*0H+_c(C{w$%4K^FeEB^eeMArcQZz|(akt|BX`Bj?Kd9K!q5TD)N6kpFQ4_bX zlXtuj;FBoLPh=5VGew^0Yyw7`Jwq1jvmK)qTghvKVeKTC4IIs-|AZNq^olw(b%}7K zt{QHT<0l^g@k+>;A0;R7m1`1e7du~SYcOAmNo#(J!@P{_v^KTo^!{F=utENO{V4qWVLtgjO4qxc0avkJWV9x$kB= ztMRX?V%udXu|8RZW$TH!VpETMa9h-VrmQ8MRgEyNPR1d3fKpdDaW;WB+CkcwyblrC zmW;Uq)2Svp%4Ez)BS#944kf&>!;Og5i>~J;*lN}ei*&QfBWXRP5Y-GHbiD}a2@3)` z2yEf!XOZ3Ch;yNFbq<`xDk3;ro+Ma`>OpCWiST6I6SWB8ohTZEHhy3^JNmZ@& zMNJ+`PokCif$2(?7A8DpWLGh0+6|9#mc|}aF}#{JRxhjNX2I|jsvMX%a|4!$X94DH zP>gOW4AL4mDCgFanPV_Cq*y1~bBBMD&5F&=M~U8mZuTAx{9_Oa8nhzLYM-R?p=BGC zP6zeco=g_PZGS)N>FAH1zh*|?A8+PQ5*&U1WE3yzqxCc>o?mvIky40Sbc5}>6!{Y`~G(_q!?m)bgw`Z}}wLqolgtv);x`-eY2C3@lm>71mC!4}fh4b7*+^t*SY(x?-a;F-cSA7KtWR;sXsK3hqFC&0 zZFOEQu%0X*N?(}sQdW@848_Y*YYgTjiD9HoYa=&(Kwa|io}Z?4g-8*nf~^>L`md!q zjfG;Xt$r$Va$!7821<(LtqVj*RX&<3Z{ddtw|wJd;SfpkQwQ0Vl~mCzgV7vSRYi4q z9UrDzZLbgE*1xxCEf%PfH5{UCx)gA*)Ud3%d6bm3w2@0UZ~i=8W%kei_b^g!?m?lP=U%#!;)T=FT7%C9%H%J(EO1Yq{(#M(gfacr9g@IBngze%0SDQw!O}WW|rM>?IP{+t*FP&n%Io!ySoeB6}3Ud_%!ybwq9D7HO1wH zzL4sIA_UKiz!YNjI(ku$5X?=Bc|}`gU&`Ukeir_3+yu5I3Ja$p^Qk}qulxpSOOArQ z#mV*3OG|rIV%XSzdX+^Z4#!~v%ynr6X5SkE<-RrXGRQQ8Z76KlsPO^ey_Q7JkPwu} zsvaZRoxg$>`uMnrL-OcnJMVM)hiZgzB_Q)yl^D!NpnM&?>`Toj4|sSR zc9Z9DSUXMoMOC|3{p(fxP?RYihttAIZC=e+8%cJD)hC?y$3C3Ba#cpV*HM`wgRNG7 zOL{tIz#r zZ^sg4QubND76J!^2fa8jcVDZ#pGmFk-!u$eD-ArL_jCXSUv+bJ%bz(E zYf=^(UqNIQEaX_qO(xqt>Pz2ZtHcT7R4RkzzYV6NOdZ-Y63rU6gH&7{nMUlrMo7bIm)-Z##Ybz)E&5uJkxHc`VMV3@s+7 z!FX|)5_9;K5&Nw3dNI-bEoxPPSfieUMCr5UnIYtGwvDJaAY$0@EMqe(IRwJDLW?$d zK!OQP-%60WBQ3velvrR(%0fN=pvkYlhZGsK<~AlB@VltQF_QB+zy%8GK3-b1 zK^j9Hq2PFbv{?FLO&C+O5d!ypUzjDmU2JL^~$ z?5K0XRgAA}(&3&}UO%f}H;Y;Z6zwUyctd^k6w4y+%WK2~JN6V*EvMeF$}^=>B0u4` zils?dx=wlugih1r4o<-Pa9iUSx4qTa>l?>+k)-Ck&fHLja$iO*-g4yUF}BqH61rmM z(^OQ>ym9Hj$xQGQD)(A&i~;LlHUr;#Y$t|rxt@QF%byu+qETejrUKZXXNQwHFn+S)=+&IdIYi; zQ-%U=`q05_sO4~W>KujfIGoB<$f9jw2GPJ>!`f^5vT>ca8J$%Kfr=4fG@Ng&jFTV- zbB%?j9}IU4ACc*Yg&y*d3s0@IlmQ3I?CFBUspHY2P{mSS&A+)L!>W zlhIaOTnYJDAG4K1t_;4hA+ZzX*b4}MrZRGDy9mK5hC5fgJZ)1_G8Q=(Gszel(9an9 zUjc0)lHMeNwt|ama?{LcnZhWdz2^4lu=ye${e>{bg>ox@zHqiyU?*2;8?(s|-3RU3 z)k?#q{ShN_eWzOSjxN}(UW2%68ga#>2rCA>XDpG^A zin*gCh+)Pmu<)y*Y6(~enEg=&e5ND6pqwUs_~q?((Gx0+P(E(Qu#4H_4>BKWZ=G{* zk#l1?6GV9=;p$P|$k zh&;uYK(!#S;)hd08b(5tgnsVSYo2)yOhvi_uiW-myvA(?Z*L-g#QEM8A@wbC?K+AzRwg-tI=yaibROggP(z~IC5 z1ucZ(^`-pzFLZq2kOJeQEET9UJxcG*W@58fEOAUA0bWGQctt8)Is~(bvQwtAQviW! zp(jAB>Q&|fia@w8zEAC{C86?zxDY@83GazVfU^RSb>nGWS%_Pp=W05Q`qThW5dmT4 zs8Vv{==uk1h&mJO)Y<8z$@od9okm!5tyM`o4X@fcD;00<-o#E$PY2UmC`4qEZlZwJ zNWm(jlY%y$?SeUx99?x?_T)n?G93fj6y)B96A33Mnl`K|%@BoWN{171w<$s)a$6u|8GSC-zgs`c_@{(Zr~pPiwHvjj)Dpt`KV#ZAOg? zUBx^h-D#n>nuRV5HdgT9#HluwBd0`{%`G#^?OTye3#5d@mt`mEp4@VT+H9#5Hfz_1 znbpO;b8n*b*E{#V737h@vW1Iuzg0@Kh4ZlA4Z3~v3x2VhoH|txQ&K*i?*3mWEaxv{ zBEp#abeOxJE5ZJfbndyv+#>?>zrfrBxKmg@InEs&D}glL^W^QK3)BIs>sgrUGjIXY zjO6f((4q5}bw^kRQ@d`)Je#=^1}qljj(Ewi*|oOo*uw21bb6g0ukB0U6gGhnjH(~J9^$C=-ki~HT%Fn$fh|0 zDjx5*gq=N(-a7?6>%2L}UgDi}9P2K7FM za+q)lZntoa_*>weu(z$~sxwjJ9LkDwCO7@hbsk|S)q3~s5CHB)Y~)8LkS=kRWxrxsg^rL zxuA@iI&RfW+kIAIp961zw!2dEqiEQ(OTFz0naV7)K5^#aCYnf6KnP3N^XLUF4dvJc zVcu;c+b=x1u?4U;a(ygu{;b)@lU%z)BIkt{j#|5Rd2o2?>eD0xe|0sNyglwuYZ(sx zfzy?-A70xyJ46IWbK>^fdM3GDh)xmh3tI0E^dSQe7&oxuUGanJ5g05-IcNlXaJws3 zxg*=_^BYvqzXG1J-+pEAcT<)-pcr0?rS)nyx1z(&&3&Y~Vg7VSw%g;UEemDU>q`ti zH>y(iFVUY*Iq87OnWBt$8gm;CbIZjFt%02aLW4~#+z)9fKw2=DHC0sofy2x&%{%+e z@*bUE4xHv>UC|cwdD>x~KA`i(gJMzf$5oGnx{#iLs!yBh4yqfse^m9!nUE-cyp#i6 z0MkMVl%U<->Mr59Oy&wMc(}(xod;D?p7u>^}6zyOC`RmI+ zX&>>zc?%a^$Rp3&0BaeSB3wDQL7$OJZ^wf9bE1&$q*@LOR8ITi>s}ls3zrL@Igl5{ zVw#Rzb0Khchu&i~uN=9!*F%#qvt-p3psiXs>Ty(x<)oR`rsC`kCymeR_GQ*XHq`SC z^@i_W-mw2KckP||^^TpRE{eno=QTBq_-@X)n#QBVsUQCE+_saRJ-R_VaOHDP<>!19 z$e%QN9I;+;{EL%D&bxSRR|;sB)~%a91bwQMtK-gMaU<50tJ2b$bfqiTYx7-3<@!(x zco{%g#`rYmJ7Kz@8iNmI07{458KD`Z)pE+_$}cX(W3O8cmLRYKW<7aBM)|sDkn;7i zfwk)yYGwZdcGG(uee2h!P<~fAND=#$JNs0yn?|4)e3?1H<(!l=j5Q5CqMt&Va2-||M|1w6zR_1R%u|H0_04J4PJ=R6_;-sbzPGY?{ zX`ncv4Q^eup+OoajqqVeCXLj|k0FheR8$&hDJ}mW(9%!){YBJF)j7N4SKwwI!%{ktzW<_1s8qXC`i%0XOBS+|+_5JhZ$Pm_Ug=6J5i5(M98jY6EbK`UcPg(}{-B zR6RRtpxr>zSnR0sw!+tju!}>YsVb%`WBjnS9vC!Wv}7noToKhtXRwxHba)V>UWn2@ znTBeu3)oLRb{7Vv`q$uiuo%IwL-&k++IIX`p#7>0rMtUT$Gxc2oTZK5U( z#p%kYj@-Dc21AJ4n!%C#t4P*R-YNR<cXl^1Yzdd>@d`$- z;{a#XToX5%dnyxWb$xKwkj5F%S1XHLv>UmxL>WU-R|BZ4aVgDcuk{jbGdrqrsqqY= zRYu^q*aLRdqu@d_)*YI`gEs63MR^>nJoIWiEO z5$-q=qTey1AEOO?R~iwJBcS~oJtopEFx-pQbX-K>Di3{Wx`Ec0Hgj#UUkf;`bs{Lf#zBY9uW7S^+Fi{uHCkVM!4<@~}$Vq>GDW1u3tC1ok;zTL7tW-hUXj$gHr>Xr3cjP`OFES^L9#&fKPjpxX}KAt1m z4;Rm2h0;ciLvNcGjOXzE<2m&o?NMG8`u2Pg-n#Ggk4af(EQj`g`Q*8b<}VMvOWfMI z{Zh(+=4kH}&}sZ!h${wdyI=Q|2&mU#);%R`O1I|21`uJ|Jta@?o`ODzETpDDkb0t0 zqQgt8h7_|q>5_93zrK?S)8!7^PGuBT-wxkXMcfBkThF7cv5EoT$2xo~|5x~wfB{`l zhpxjV=j76L{dM5!$qk1Cubb*(I}Qt;@DWwPdLE$w8t`=kdzA8XkcKf7{x4umt6B14;T)}NR)+`l5Ywl6w(31{L><JKe;$PO`RL%;M77X$tEgM$&VbLY;U z3lMNVz~WtxgW-8C=rb6T8e+txXnT}j7wkG(4|e65UPS+S8+L!u`*QNf08S4x?k@(q z*64w*Jn?x-m|Y{;$T;UG;LkME=Y3;cdNiF#bZ*c0JZL zPLFj_e~OirT3KRx51IeVXxIF%T9Q|JQ2ci1&Mq16ibqs_lmRWiZ~7W^r5bL&-l@7o zpdvuP$5}9W5rI|F>wZ82Q^62n@nle^9}{e5mp9+Ry7H?&YV22)uzos%tV-teV=VlE zCQw_<<@guud@QQ;gJh-NU8A81byt?{+ts?VLX?M9ZXbVZep_Vj_)A;2jlFj9D(&l6 zUegYe@bX#nmW59kxnL=IeIId_MZ}}dj%`?TqOZvLj&E$;+SQ9r*LFNLPWwUo0|~sg z=K8lE8e2B>R$9}*30HzG;{h&ncW;N7TpFMN8>S>T0$2>#JNq|{smN>^(^2k03q~)0 z@@76!QST+wk(XtIZ{#yYu(G$YjA}#Yh!n_0Jy6Z#sEOvlXnbgap6ZVYH!vyyJ>v4`nh zBfgKaQ0(nsW6bxX!Qy1(*Tu3g_L^P+2-}&Xhm`zLv#`-O`VOyskEDC4RfX?Y^Hb4W` z*$+4~7|?(3CD(hYToAZc?tb9`QPpKqEvG*ghnAIOb<&69=$-w=z1RX0v<&Q`;OO(j z9DKD3t9zbe4+7%bKlngfcHn?ef`wd2OQ*s;o~QA$q1e5#H(o}Jcv)t!*Jc7#K zBlh>9oOnQ!pUEtMHlWpysL=?TG2fT}m-%9N6KCtwXPb0llm4v* zq08X8#+*NgIfqfdIn12%2Agw!KOCoIe;_x^f@?`9Z3*&Acs7{ufVodGLTv-F9ZtvE z*xa|%tlJRwXzx3FA1a7Ban&nf9#Stb;mnX*fOa_m#^Q}qBOxQ?7Q{;-nKhaV=mt>` z+uorX=WzZC@K_*j0CcSwiW>-7r5=l;eU5;Rys-0I26j5iO?NuNSfEMNt+`*<^GHBa zIb7o-nbi&sY=0#F)ri9`&=v{X^nS84xq^OJ?WOekWFI zmEwxQ0X|q2t(ttS+4707&$DI4jXjLrw0r>M2D}$!WLflPc}6mxFk!R^28MLgSTmpLm<)Ecf5LewmgQYPlA+hgEt*sLa@nhLJ2z$)b?@8U%^2ml|jnPMI zq2P2lJ877uvIx!K0t&Kf4#Rugoy>C6PDc(|_4$!{-PjDGX%f=j?cxvtOw-|(=>wC#HTHIIto zqt_fCTDbCT<_~1?ej8_{n%T(%BL0dquG(N*WXI@37+CDT!(qnq3;kI&4lt^n^n}bF zj;&g=&?AO z@|^OdtyYi;sVt^((`JF#%v+%R%f%Yws)D3H4Q?!bM!U(t)7|=JPx7)(C!u9+dP&f&9Hhh2IFMj`QafSZxF#}} z%bwy+`}o4*(*Znj!B=_B=9qBkSu#(1@C6uWsK}8?be#X@+2KQnesK-_aL#kjX@6kw zR%(aHI8y#!jDua2D(k}crlStvJyg!GB-KZPm+Va!o73CkW^n894u0oAlT7c7e=FBT z7qVjgcXCJ`ej0+={9+6n9Rb@fq#bQk*<|R+4e^FzY*5c-HmC<>f&S0%t#iXk`2=L_9@%d+b)Va2(tprNC$nw<24?Q(_ypAK0RiL zkCrKxflpy$R=K#L*jy}_<+)_Wip`|z`nl5FtsA9Dqo+!yu_vFbO(Ci_iO<(=A}e}6 z=>j837PWmi@5N3Vh(+HFx>jtjMIq^P^B(SkA z+`s%t0_+?T)cxRjx{6GjUn?P5&9c(?;IDgL0qO|uOF#PC*v&I5+D{xab^5k#+J3%) z-u#F`aB5c3|1)&*{I4X=!%^%10b@PH zfB*n^+GAj3U|;}ZPLI5=@%%Pl8RR(_K;YcM6}~X~|B`=3Y?IkLf$BIIm_VWcMc52X z0001Z+GAj3VBo0!yB0_?{9p2a5!+-2pa=?h0|1zD2LS+h+HI3dNL5i7h1dD_f36xq zCa6it9YlpGJ}rU1!^$i$jZ!FzL>ko~dVrI#K_H4Z75x2FkJv1DAXYKu;y$^f+>uj08#%KV}13qG>v18{u8l?*9IuB{)2TI+o zsM3eXb5!! zf;Lh3%_*!gVT8|Y5|J14MFpXeB-*W%}s2QGf3pOGC8ksKBCqc4-c3j1nk%y!!Fh zIL>|ax4;D!QDcc~nouakNONAGm%38-z;AnRy=fHu>P0|5(0?uxxTaorqQbmKkK2nf zo^`M3WG|ng$@V@{yNe6*63yl{Dp{+MH}J_V)Tp1m9YD5Z;+!*ya&fTFq~d_-LcUI* z+Fgb+_ck{3OzR_kp94<_F*b`x4l|I1@DR`2N*yNsdKKQkIsAut&b$Vhi_Kyt%tEv1 zuO3GteV+sO5mF*qFo&6l6CR+S2I@U>U6YyNzZ_;Wi9GW*GWN~FW--qEsGd8h^ep9h z?f=n%7E?{1!$4FQ@JHn|@5d-9oCcq%?bL&OXYavp^gb@Sm(l73kwVrw4M>qbat*0+ ziFyMk+zn{AdnFy!l8hqzUGoHm8esm}fR-apTaf5^MSddj7iOu}U;qGk+GAi~&_RMR z4AU6vn7EkMF$XbMF|T00#=^kj#FEC+!m^Cz4yzt(1nVv~7q&KbCiW`!EgTLUyEvY4 zigD_2`f(<4p5vOrb%Wc1dkyys9u=M_o-@2Gyh^+oyz_XE@X7Fn@h#zd#_z_zMnF#> zPGFY65kW3NJHZ0Md4gwz7=(<3x`dVp9T9dBULnFKQX+CqR6}%;=qoWXu`sbNv1Q_7 z;tArbB)BAUBvwdjNw!Fyk^Cm*AXOr@Me32Xmb8a-pY#Li2QpkTWinr68)P5JamekH zdnG?dK|{e!p+KQe(MK^)agP#*Qj*duWdY>`;Ogp1poj500062 z0RRF3761SN00CwI0001Z+J#ZOP69y`JqrjSqJ>dAbSeu2%R>zbCMH7Em=Fpi(r5wf zB1G6RyNkw(+S2$_`VrQCfIr~b+2Pr^lRI+G330CtQB3zi(RVc(Uq-QwJ zbB|dgmZW!veT@xP-D@0Srmb;w0mrzecnulY8pql3u5kiQd}y3R3O^dJV;jF3r*L2- zG~U3Sk=1w;P2)}DEgTx(8h>IpGOhnYK9Zg^WJmf^RHb9tj%QnvB=<%!6p*q@wf+CeDbzccEr=gBV!W16-DNfx10%XGvZ(R>OLMB z2V@FVhB^Y}KI4Ik8gtzxC1b74Ra*M$_UD{Vn{}bL+>>EsbsE8(@@kGgJIly%y{Ki9 zXU=7^-lgswl`nF7jMQ+=J6xcS8}3=n`TryD3v3<|$B8 zxf7}^5|@ce^SAp0(K~iV0001Z+HKKCY*RrHfZ=}<$4;Eyd!u*Wd$v>PHnHiwcLGUl zhmZsZ1EC{&6$ptNh;{))uL1!FK=dYxfCFc`H~?2LdCQHte43q|(P)<_+TwRTHT*BW zfJ8~OSR{sMEW}{NMl5mIi6?CWl<|$ftlpiYTUpQrgj;4s@gw zo#{eXy3w5;^rRQP(alxi$tXrM zhOvxeJQJA6BqlS3sZ3)!GnmONW;2JmREw1(Y+xhT*~bSqv5W2O=NL!D#x~xup1l$) zaqJX3Te!u0iRS>v`N5 z@S2yrBEV<9vXy#*EMOrGG;)w87ST+I7Q!rM2}}9JGM3ZI3Rba_D;#1Ct69ss$RB*; zDrY#$HQw-65+qTQBw11 z+C|R04uUWgfZ^6cxrtm8Ck@6hb9ZW1j4y`=tg!WK|I?f| zZEipxqyV$5YD%B7&bg=-;L8HyBOb1D23(vf0xZdZi9KjZ_G0sq=%6FCKR80aD;Nk~ zDwqgktzaeDhnO@a$~Hb)se*&BmkMKqGyQ>rs}=~`d6yELuUUS^n*h6T)ze1?yPjU# m`}8pM6AZjf$G(hNb2LA3o-LM*;=&rsL^0aAPS%>V%PH?ma# literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-BoldItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-BoldItalic-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..df69b9529129dd70897b2bd37702084b020fc72a GIT binary patch literal 29136 zcmY&-V{j%+xaAu=nb?}xwr$&XGO?41ZQHhOOl;e>jm>xOR&8x}oqD=Yod8Of0I-QtBVCD^ zf#Z)a?8c7<^8ezOnYFv=4|f0nXjK3J)dZ~+k}u{a2F3t@dF+qoCqIA{ct2yp{D=JE z{;P%X2gLA4UVpoZ|nv?Naq_Hwo5W#MUVkTJpceGHG}@==(bjr;%=?1f-=;@Hb0lw*w6&O z(_KX1?+YES&oHPA%1jISJ2ZH3(0t(ppO|vi2iiZMJRAnGD4e|B!k9)?6Q-3aUzoB3 z8vat~SISGs>Fx8?g2lKZbvNC12HW$N;}yqt*YhN%d;FbqyT@2Zz4f5DNTN{01j>V( z>-KGLS0lxJRQInO!o!e?!=Ukvm9KP2Qj22U1uq(|8YAaFlu~s+ovm?kIi%d=mH4v3b0m*V@3YA{C z8Kt_cvf8O8&8RvMEHkcGw5BdB!yYSOnryrhn!?>Dn6AgOg1{nWP$}y!54Nu&pck43 zTr5ktb}aY+q8)K|=;)z!8ujR=#-Ra+3(^6K8wPe%*?{|-TkuST?LgZ;FgrqCw7!3K zP}a;s#Frfa8mLCYmYmn~|g1Byy8 zd0#0$vw%o7iKrA14P2wJ0NI`w3EGQIGaeEvCUkiOnQ?<1FOXn$)&0K$no!13|c>_EM1F5 zgLj?{@Rk8tx0~CY{l4LV7zli;|JQSHnKQMAPo!OR{n^Flnt`hWD|Rf^p?HQ+^KJXb zRuv8_6*(%Drzn#;F1+gRN2kcRa()Idm2U}pK*|3o6 zKIMJH?<^eg;H~C2IAM`_C(AU^MM+mBXU#^Bi=KkgWu$w*;eHv!tO051TV1eZ_XtM? zdgm8Tz93{j^)i1P)IM!TTfG`_?b)y!PI+6}@mleEe8W$$^OoaLQ9a%zRw)okD6ZMnSu znOtjoxw}8X7RT~^SxSXAURkjSriZoNvgv2)VzG3mlVRX^p}~&Jvd8?dE=((^s^0kt zlL)U)Vnb@<_VV9)KFxnF!5y|Xv-PVk;A6~X$Tn?5Rn%l1 z@9hIklNTiKZL4conKgl0^EC}bY)!Quld5Z)>%eTa2uPiL36XM|xMoiwX!{t*2ybxCUkhAA) zrSc-TiMl$T#kt>QZ(udTuSDS{ zi7Dar#JIC=;MG#FCLl6U?HKbNOg)2k``EZPdDR$Ki@F4R!=jr#&rW}rui~g2q4#dp znpK8B+o6(q>8VZ^%e|viLrsb9^IW|*K9Mhcz*{fA>0?7%PxCI~{zWTYqTNLyD+Pr6 z4%hfV*=+Ksa^`aN%pLn_pZVb}AapqaojrmhmIv^ji3iQ3jC~5~2*3X$Nb$(SZ<++i z=Z^CSju{YK`w9@0j|)U{<|pE^V-PEY$G1bHSdf!1(c8sW15s?|VzssO+dEIY!@+XzD|Q980b-)$ zCToa%l3_W=CePa6la;*z!XoFgCQ{n?=GcRhxrcIbf->O>?cxB9#tz2C1qPGluaoI- zkccfhMZ_PiM6+DB)K`{Vp+F$U7Qnp;oG;s#s&u1VqCY`_vHY291%pc@TNA1aj##4E ztyDErh9=rjGI)II#kT$+9>z zo#NYq=D>0=Z6|MsK*-Mb89jb;q= zo$T+S40iPOfx_uOzNZ241{0&wa-iW1{BHalX!MgngM#wuLAzie$N~7V|3-G;04zG- z=l>JK^pEM+_iY3U0FEUA7&9=~G2SpPFnpN7npWy_4mpYWNeKuGND9abNC?OX==Dhw>Ka7l z&w&JQQ-6O4O!}Y5>#O%-fG+ms666x(5r8ZjKrQz{F7%D`zI;D_*L>%E@!j>D^lbAD z_1NNNdf6TH^+7=3U3k}?_Z34>;WzTucKQhKa-2JsT1fy$TJys`QOBABbs)$9fq+n9 zb3u@yvmwyo_xR?<)`pg*wgwg^Hbz!vcKYYX*N2ydkGpUG75>Axn3`5z$DXZ zG#kE-Em|#CYW3I;6i;fk*sj-E9d~!VBhV)n7MX3=Xlf_ESN;ykvw|0*01+}JS#frE zhOKyFE^L6+h($#JAj%u zqnFNL^PwHMMgGSx!@>^>=&Fx2{5ipZa)Q+W4uDVq-GE`h6krjs3fKl*0SbUHfrNp; zfe?XEe@3`KL_lOfbbxapct9T@63_r>0|Es+07ik(0Z)Kzz%3vgkO2q0mcF90C9j6Ko|_sZB0j^Lsm=R*CreSh~MA}0mm9j zLt5;RXC4^T;9``aF;~zbqy?KpT!cLkKEIzZ+*%E0ZeF2y1~?(QxR)R=Arg~SXg)I+ zSA*kJ8k5vmT)FG1)wI$vacuu4_$2G6tawuc;JNVpJSws=07BE+^k;p(f5Cuf%qDVK z0uFg>DG)GaxcoF5OTK15po8B(@b9@W8ky%G7C50!2|`n( zMx9iYnkHV>`gH0WPV@sPC;cE+22|Z)@!%0iQRZ5R$Ee{DpA0JoArTXBpg9gVM!yRA zU&WDzIwd5z_Jf#~TzfF3IA2DB*Dw$bxeO_Mp@aoR zRbGR!rn{)Y4;0_0`4fE)Z)1G2ARDh%gt=3ioZ%T%AwB-CZ`_TCcc`{aMF_gumRpw2 zQB1z;PIV81WaGgM2pIKcOuv6g`n_>eS2LT#g9>@Dd3@(LorP9(qVv|1&G*!4{&zSA z0swt~0}$NS2D>2iwH6=wzOJ;^JT>oPskQ29wCYLL!*PbE@Qjfdp)dnu1G@;JJcS@L zfc3}?P|Y>m3>g51KX%$1@y_D# z-?tE3QNVTy9Bv>i3f?HML_7L=nQ{Aq1Lut|^BwmR$j|BU8|_oq-kW3GeJ5s3LRNR9 zC=M%>lk8{JXnZ(y&d-e*?<|trOQ@fkBf}QgwnX6Ge7vX@i6r}z2--3DAepbT=Rq=* zO%}Q1AJcfOA?x(d9cK?lh!e-qL^nk|mRm@j7U)}|03^`jk3>g5=^nHrYSrLox=%D{ z4pC)(EPmVYGcNFbcieNmZRlT7ed!6f$U$O*bRO#IlUKLsd2Gcpl(-3ru+61`R}A!+ zn1n~-Eu-8T;|R{vtB0!^yE)acCaFU7FY!Ikh=TYn+EWjb03F6BM)&^nNOnHO&VjTs zoO3%#LDe}fVg@SA%y0=u`%8meCovzU();v2I4E7l|P?WZ(qKlqB{5JP@X4E z(p#RpmC9Iy(RX_(BIfpGZ$AuAo z$K`w@w;vxO($prUR%&1QZCf|DkqCJOg7lXt>0;N#i#sz5am>jmqqPQir`)1#zlXwP zwO#Me+w{q#RB{zesD9lPolkiT<5*T=+cc+9VV@7BHSc?x<-lL_IGwh$Tx(hyO=`H+ zl-K>KJeBxb5PH%b@MixU)8p~@dU$l@bI?6*)F3L3N$2$(?1m}UyR8tL&iK2y8I#TC zKuSo@Zn>66_LCEtW!4RwI!Ybio=;4z)D;0yo z9Ub%$(ry`wI2Rok1)~wguRfiWje{n9&y@{`K)Z%lxbNK%?Go;7=JRHbw(WRXt>uw+ zj;QIorl$C*%jss8GiSKl;<|(QhHIVngJzApYh4YW2VA0#L>pq~twJl_w3-V(=BzKT z@m;dF@e%E66GTt<&&=fZ^fH-Ki+hX(#AW;Kc+axG*#T=BN|Q(s;gZw}*}P}&I4nNoZ?PPIu~h;hwxoOUm|3PUjH1bxhuFka=3>}Q<1@p z28ii-n}27Ff%nu`f6uSd*LbejZ)oHmU*1^gSEsSZ;Yh5OQCQ=XTk2KG*`2mxpgV3d zT@O4Izj7Pkjk|J|Xx;mo@EC`!L83ZGchmyHg~ePYO?Yrhe=ubqwhKx)G_uM*Vf72NUvpj?zBXG*m_ZHkRrFTkNAP~mU2@-21fYK(G^5St<&)+ujbz^`5zsep?1q7Pr0 z8m(2wdTVr#eN)rk^P%dVG-m*^I=wY^<(q|3>_niA|MiQX@`bPxl}{0kIX%P~8|i=! zs{XrSdS`p7A0ks&{iUUe5?T;axpS6R(6q{Xtp~!bf3X?KmMR}9^*iaE?&ypn8u$xo zOs`k$Z2L?+J@a4pvxm<>o8f%6E5#M(-*4F~p&3*(etoOdOehvGxo-^<#p*fn?}h5o zvtoTar4iH*yfU1J8<-B+0v!8w61ken5Ct8&dr%=KkCu|o{8ASL4233YTL*TN3_2J| z67fuF&O_!tFQP^3D7O9kW;5+e1s!V2v)26%F+{WLf*8||jzrs>1lG7W@st%8r^$o0 zGub+Cl}BS2;?a0k#p=%L3wd1yU8QbL{WU+8!uDa&P_MNDbPt@-_X%`_YKF$t z=8>5Km@n&fci$OO^1HuF#N6Xib@T>XajwX#@oiHV!{4Jw3ko}8pY%ut)29;xEU4ou z5>vTnDiTN?b^%QHAdOG5nq#qS2mxloc+?WPq$$O6(9``KWd1+A)7`jQfe?5F42F6?dbdGkoz2T9@60yS?@VT9< ziuHWPT|ljWzy#AHDc?09I)#Db>A~g<)Tl4RPc=6#=)_m|OW3kztGre}Gx%uHD+{TH zAFsF`nGV6`9x-2o9f5pG06xzH?Nn*+Y1$ljDG<$Tu|2ZYbhZ>NK*mp4<2JP(HF)Y; zNr{!~ccfHCQADN5LQa>8D8;6Aq`D@?6(qNmMWUbEdV-da@r$&p9wZSu4*m}2-|q-c zle92^LmX|XcD>q)SpX#a^OvyqJsA`EBGxR~F~OzdHUXnRc-YAvZP zlW+6fPEZwA+r+PU6wW72G~=o68qOES36GwXm-D@O$!*gukR{Qzc$Z%g6QPU$M!e!V z>^wJOF$4C=nfYL2a)eEoFHk<8Q~n-@)x?uhY%cHTI$O8pQ_UI+RAGF8uK&4KKtt2e zo%Lc*^)&@5)SL7Bh%B7KfrMwwf}pZH)1CiX3}ga|ch67eYJ9pkLl;e?`^}V8^196T zZk-daK)YG%Cv8gg4Z0qk2KDd-esXE_TRL8Xi34HGAQvEk?RPdCI}A{c*~`pGaDY+d z!4eeeGeHo}5s^_)`b{|%|DN`B9-YRxruFT8mF>0Q^R*|0aLY>hnSt+--N@|HOyk^< z!MT>wN?RsPR??b@T6Ule(RS47mH+Bzw7u5;*~a9vnL3qK|Jj5f%X8IuSRu!5go(|4 zwjv^xQlncsHqB9+z0Caj8brzGq{cCmnM(NquO23$mxv!uSK{LnU@7UmP;~1%>-5|o z(W#3L9)#F$1%D;gIVzv*4hv^aUWvINP{V9=Rze$L?BH7-i;H?g3H)$IniTj(5jwFd z{qCC-#R&rs%Tj+FDT}+xa;exuYrhd1#kRQg%T?EdG&_pQ_KVhYmd{G;pP4E+h)kB@ zIkefQ=ilv5HsW=r!LL_%#R_YcZk4}fUNv4AM`yMYi`zcimjtO9vLJ81mYO~})M?0NuGRg$ z1m=hE&|v6$5a>4qKAqv?{A^H17-Vhp-s2WJP}g$ZZ`}+!PDKf^LhLy{3yyK(LMI6P;?o*o46ctr`;pQlJo0EA z4SvmCS1?)arOXau#?8??U2od*Jrgt30Vx!<=;Ok#*`_oZimy% zuIsOJz1k)FG4b8!eiLabreuxb1M5~*AVW|y9**|qr$B&r>|L^b|Kr^q*L7`SG6HYT|O#PsXWhRUHezp2gAG^V(t>Jkvxk_ z+OFmxoC*Xhgn99s29IK7*1#zuX zr#=Y!0Mq792TQ)F%f3b+~8VpgkA*gY7t1 zAlvnCzp_3oYFo}zvD36ct5kkLB1i71!mDC2-|X*{{);zUH_j=LyIZ9&d$7|A1BG6F z4m3_Cl5^iS?Gk#+Z} z*_^r@r0+;LW?c-62jv)J!b*AI@(W-KxD3zyHGy- zEaruE72$2bRxFa{{89vNZnO3MbPjIM*>?}HeKM-EuM$H6>QWDVVH^m-r7=IAi*>8I z2n#B^qe8BzL`hJOnyZ@~5~d%|e=VN`yCO}Ios_kx&-GaBmkRxd3>!-6H4Ju5u3DUQ zBCjb-WL-0b)|$u$-(-G4X=twOD?sT`)Rd?y^~WGY%9G9Aih91316JdD1OJb{4e zXg}C!-{~j%VMY$Fv0QlLnLzwM!3fWaETE*wd^oeEn7kWmu!>bGBvXN_f}_?DI71_% zftnAJA|r!-tt$-8xk8nP#p+bt1Xu>}U6d%1tVqmSAh0_F->~ZhMy!XdGJT^nGL=m2^R^?)BmhGapORY2wf}Bx*$mhV0V34Uys(_xOnre0m zT)G<1wUcr*4?w@Tr_&hn4S$X!)glKZOd|25#=w9$&&^&!AU> zJWTdwV8D)GN3wRD`6AWX>4mCt#BLo~tIly4_bKN#PbletMmWFvZtW6;`JF|jg@T~{ z&DSHQOGvPfne38R9^zwC%;Z6byoKmz1FrS-eXM;S@<+PfE~2JBLnc}zJGSDUNrS!% z8Z!<(!zd@#-Ds*#WOfRG_2mYd4Do#gZr8Pwb=!}H?Pw;Ll2@0y!I8tZW0s z=HGZteKm$}X0LY1Rq)Gf?;4P?XJ|NKBR!=V>LFjD7|>irOTp1Hi_^3mJ%tj*x@09v z-1MHHNVIpvu>I=v9D42Sw1?_aR*{5;&eM_be6O5BRs*5wLWSRGH7Rre4oTvSct zv>!c0bOG80c+32~@YnlUjSp1#6ms#|j@KF7R!gZuTrng*UWt$)k?G6c8=e^$a6X#= zuO`43r_6jYT2u|7(_=z49C!i?90Xp=R}rn_LKMCOd}l?fV~P$46iu4scruw$9$U53 zpE|9|MC5O>pB=)`!Eb!B9Sc}r`-4k0y2?yLOGRFf=cq-t9tQ8Ib^V&m`(Of{`h)nT z3$gEu@9fX@B&ztjr4x*?^Tqe^7f-)s6KUl8(lfu2Jm4B^JFu|AgKg530qC(y2jzWd z#eHYPXR4x3cz{}lB(%V97k;P!H{8$#X1F8gQ^-SSlwJV@Th|aQ=0Sas?vRaM*9s6; z>j0rA{9d0MS+90=AiM?m)Zl!SO3YaQG31i%x3?^~`;0F*h1RS4w499(wg{RU$$^Ww z;j(`E9+)6M6E6ao63C2H@&sV!!Yc<`<}BLZwA)6YyQqaltjUxwj#rGiDWeeJ z|AKeLV^T~u=6=4-p6DcK?-U$$GLCGw0ZtZoV;ot>R zt%Yba8$yFSdWat0z~Q$gi2*?|?R|S9Ruf9gsG!8BM&E?IRDL>YbaFE4{46u-rx&v8 zpM<-&KZoO+7*yNqV?EK+nFyV4Fx>OQ8xseaMPHiHP3rUJ2Okon9WsWBFUnz3hhaO5 z72=ZeZKgr1iEvNyuZlQ|I8FJ{%+KXv*f;AO4N+%^m_8Nkk^m^ST$W*{#1zXgdNxoB zQ3bKe6~VFQRGua&bX|h^Y!WlA9L zZ*lLQ+Vrq-2CzsY>EHDx#zXv^e99y7KT4RE!?54j7 z%x5Fre9>=QEtbYa@9RaWpm~g_h(-J1@g+Kb83GFu?3#rN5jW^sp-Q0IldGD@Nh)V6 zJORtI$Jn7?bA@z(&h*cDyD}Mnzw;-UmC;1PzYJLWQLE0Fh@(w3j`?#)ff#2(SNozf zD2nN>%MQ_@hVhe9Zf;W|;L}ik8-MqA7u*CsC<#{(ee#uh7;vGvD7@h_#4_bm?@+)H zf3k+t$OKGQpc)kBsfomChKK4>s5p7BAnL}GDjq}3Z^&xKAdLXWU-?)j-5uTn8m>t% z>b)J`pX&uuV*pk)#rG1WcfN#zEJqXNhyWFG;Q-B?qNH7YZ&i%_o!UB3mjcG3FecFV zlv?ZgZrAF=4wuU9cj_pXi|>j%$aSJEsCRe90xaVtfMP7=;5{s@ zV^#inh9H%Kv&BktsuiY-A2oICz=xhVvc2peSbb(Gm5x5C%RU0=FC&izcoBbAtjn70zDxRlOsp$l5&dV zg}GD#UC6uK41d7dP99CJmm_TJ*H`(TmJ{Bg6yQsoHhT=do9(vH4zhKvK_Tzc6>emgrT>9FsV@8!3m`<&%(IL|ozrIdY)ssJ4}~kGFdg zxN>s(2Ggilg0vxT@A*61~_G;*Jb$6GAtYTLjfm-E)Z`{r2s3_#6+ z@5teRezh|!{;K|ZIW&>&+SEMk_w)JlmgBB7*xK8Nly#RxUbrO7Y2`!|1`Y1n zXewFzM`PVM-tDFv3{K~&J#16B)8sx}f{dk!GGqeNs;Q|NqNCr)G}4LN|(%Q1hmPAzU7cnAM*#1`ga!{~d`Sud8#y zFm-Z1!tY$K+?}#Rd5cQR??4}n=ZX8cFEws4yG4D{4uq(QJwD((*G&k1ODx5$5$LIA zbq^|enN(8#B{t7c__x`!nv$%t;!_|Ilgb|KnOX5$^`TZcsx+r<~vIeSa=Yi zi3dLSDc8$adIO8aD-|#7w@c!y^%xkO@6enAS( zi<_licWYga?fk&cp~AM)IE=%GEa$ilbNkd2e_3yqJ+t||UB#EkfAp)>4UwDAwgseB6L8AW6EU-cw^ky%E zJ=A^&xPeWo{`TX*5E59(12uHcBK!rbMh333fhiTTfFT119xY>v&+j1ml;#U^RH=Ll z01`HYs4eY3h7M?_v-TDMb7vlgOneTTe|dVw$EYiOLqU>nzB*DOi|KS$1c6d7UXU)= zG>}-1)>G+whcxKIGbF2iLdjUWUyNH-IH)c%!*|)=XeNHG=DfR!%1$Uz-NVB#I9;GD z&eo{T=7`k+!wN8)LuQ1i1t%j0v|-09lCXc#p1mxa+dr{?RM+j)Yx~8XZ5~PM;?wXu zg6aFw8@r5bNy=nrTF^8gJ)sj(j^hV zG3@E--IO<(Cw))v2M5*WWQ-ImxDOd?TjTj6Zb}P#!GlUOO5e#7iWwfrF*}3!Yv}%H z@VLB{>E_vMy!nv+)A4$Lf)K5NU%vXwmFM;K=UImMPa0B|eBIg{7Iq7kOG5qablwcu zFxc6iD=nAjlVW?_kPBW%%k{ku)Kgwt7`s&|xz7RkbeK}bFZ9QN&QGidHQ(0+Q&5&O9Z+IwYUg1Ae@Sx9P&C)E1`nUmW2Z--cS@L5XpP=?)Mj84Wxtw-G`ODjXJ}3 z^cZ7?PQ8-eod5M<8(`>s{3bo%e8-JP^;rGFBnX5d=skX=3;m)H6_--pp*}?+;e->y z8N9D(&kIbb3}o1lpyywnJ-3p%2HVrel;!j5*zqx!^VF4{>&+kfuo(MIWLf+UkEOA% z@fAn3#^B7!xN`4nB`=6-R!@`}xO1DOL}MpNk;YI&AQ>3nVv)sDH3hM3BdC5?n4s&8 zT=L#EK|2W=G&Ii;3Pr&XMYckIDEf+fZ^fr>4%x9iui-5k!DkoEj`kkuG|6LgBkc*O zojSxbTtgfD>DgE~qYI(urKDtBdL^=n1(_`N#-Eop1-hGE9A=BNLFlDUoojg*H63te z0(b64W_kvhiP`NEFiQ4Ox`r!C+s5@TL?|!i@28=1_&cyEb&ZcWa@x#cx;4-33@z9_ zSx(o*mid8=$%z_CM7^Y+PCK_vKGq2PlR~%L|W}_-w$aD$?54M zAPAZeB_8hnWHjJkWZWIntCVlgULvQZAUU6sGj&jk#k?iMht<)<=Khf85_xsTvPvx2 zpg`htn8efz5MjNr<-5(AJjiAtlUMcczGHtdoXAYjC#v0VOzDiJkX*_`-@iCK2!6rk z1emq7SGel2+rjfYD8!XF9}6w2sN)AL<7h*uG4ozM1X`t+3yO>gf+LhthZFmcFq!2p6-^GiOjBiVau~tU+D!O~ER+qtUz6+a8YrCHe z9zG#1PjNX4C%qdU-bdj2H&uCPgHhcqlk4eT5ac&?Svvj8@)~|kD4ELQ^6VSk6qDsE z|5koVf29G^>g2epbbfZNU!JgmTERZIeDa3w6nu6KaFW>m!4P9<)O&5*?RRjlUo0op zA|6s(0{430*BG-+^cEDc^}YN-pg>wWXcS{|?#eQbNu0>~L1~b1d@x%A(WorbMm#LS zZdK2rw}U%nDt|ZL`Vsdbq1w!3G^o6nFHngr~7g9 zU6d99-@UAeh4-%o*!A&5LOwE8l1YD#X-nhhQRzLr-k}1Di4SAm%j){aRmQUC(qtJl zHTJCFr8AoOdy(k6Z5q(GG*h?;p9~{OpAVIX75rJ1!E9jNpcYmBRNt1S-x(>FzO3Cm z{TWAOiD~`uWHG@Er|&XhHdbc1|=9+=B&cG26pjM^()t_8+NDosA4f1!l~@lGQEVg`~C&Lz*wyF zbjsu$zQbNOABf+C#Cf4yd42LPSYtC24D-6_aw7&zwii4ryR};IrR^?ezl+wi#WVYlj_S*R6qE2l83D7f0!5Zw$H@ zpqmY#M~jm+9i22>Rtvj_Nz1%Wd!0##C`>p#K4kB$ z^kHTC#H7a}oU6<{ zJY$#c&0eXFY4o~&`#XtPV+vPww0gD}PB`+cVCEiBI7&5?XOThDm%00zdplfUkw(PoesTE!@6H8)`WD+YM1L|rQs+tGFN;anMtWiFzY+U z%I#iebO+nQRL=q^Mg*CUQVrS(KyvY(Q#Y(?PbCRMTxd6fxb!Hw&mq@aiZpS;is#cG zg&cb-{Y4b+fuYEyhhT|C>>Y87GyPY5oXPw;ooi0Zy6INiMT5 zDCqltP(v|(B@OfJX$i|8dE@?i?%&VRUoHETLvk70qOlHt=mGk=!LFS`H-Hbgw>UVy zv!Hy40SeZ8u$t4L+maQfh~cg5T==~)DXN}x2TADo_Ws6491-S#@V1$eShlfzgTpot z^+4R{FXEs$9Vw3I;XiY7nb2kK?iT=K=JkWP3hif!5LO zoleIYLM-!pHlg@Sto^F%_d+$!Bi?Y7)btM7!#Bb>UIUxMn<&%7p>6FETw+TQcY3w& zNb4MT`c$qclbbA6U>FLr#gyezx%S%NANXeJR52jwB1zXHIWX?M zea`tS{=J1R!9eJBAu8C3kOA|~0Z1>_d9m{2$#LZN+f^Sbud-f$rqx;ano->nQN@5` zmQdUea0d&jA$rzJ7z0>2VyS*VD7*SOL%K?L)mJzmv^q{8{TXk|>upiw}Xo#>4veb-|4T!VVKooisGBX zsBt^>yvQ!nEqA*2>)aN1sCGF|RpoK{X4+P9HhW9FkBW6ucqak3(-NP%!LYxWOo)r# zkt+1wgTur8zDRd8*;+DE@Z<(nIO%escBdIP?b8V4Y7iL@A$*Vzz+onctWurvr-S{& z+4bHi>|RgFyw%y(d7Q{F+@eg}ziUrg|C;AI6eBwrou&D;gFhr~9$v)@O8=dc^jL}h zgf|pdZ4k_L@Z=a*aO|3;Z5IG40Wb?0!0}rd$9KDs!5}RhM8U?0Bb+6X<@!>Mkcm92 z&5Enf_O#ws{vwpm^YlxWddg@$4{Tp*|LK9}EOhU{+sM1%AP#*H)?`fAura+(rW>t!`uvt|R$Z|mS^PxtF zGu_uT=(*d#;noxTMQ0CNtc}i+shisDKHsBM7g)f(F*QmE@1UGM-P=e(s1NijrQX|E)-KveY?l_bA z?Z)@Q`L=Z#p|YZQZc z^i+34QV!kzRo2hy<;6?=f)n*w)mJxlWzTdsBhZ#9*hb^}XS(7`5arpC%EYg@%bbh&j1D^5WF!s+hNeIA1mLCm4^(Sei{O zbu*hIE8;4Z%YFYsh}NpZWRWLnhhW-Vuh#yp5#ng7wfyle<-DftNYK=W+2wH&{nAXf z^RMP~+%O;4=NlEBTuTg={rp=LQ#QBdzio6DWmZ^M4Jp4g~AMmyx1YN?4?AN#v)8_4oNMRf8d$%K4E9E3o{ zTdp6{WL{!UhSsj8|5Shzs{!cjlh7oJ;5KV+mG>RPEtvH0JGfC1vD~LwfJy|O@)BWQ z5_T0q{q=3X2MM~5yI-1k$|&^ayc_525s#`fAi&cR4zU7-dpmnybrZvn;g?Rk-FRx0 zqD6x&=X17Xc|0eiS-!zC__fwR$6`0eHZ_qbA(oC?u*~ad)#Q15a0IQU{Q9*x>>4{6 z9nFNb5q;+o;Jh(vq98_(ge=S9^t2MDq*RdOWp|5u`>-g+^LiJZ68LX;5}V0U(eYUc zfwrrY*Zc9#jIC#^=vc<4T;yvXN`bIf@gGg;jDYUJpN(D{r#WLzGsxnmDiN$L6 zNYP#t~sywM2P97s!Wv8-{5OOln5f@pe+@SpS zto-lqxpl={Lase^&AKZWEEj)y>o(MtFR^*f3bGzz;>^Drz8S5zNbdLt4~UG z=^jpffgXNiQnGC8F=52)FZ&^2g@eL*~;gM~>ZOG#l8-6Tn(l9{s-UnJU| z8Ps?U{@^9=-B&I%Si9snNzdKM5-xJosD<-;j8*Z_60h&b^bGEn zGfrunICeyM5N&)<}B_k?naPun|ew=Or(ct`e%7ILX_igFscq;Lm1eIq%GTfv3lbJDa94|^V%`w9)P1UxfO z%Z7UdmDG6pNf6akr&=J4EfP0|*!x4IwWpOm@7{ZrJw*3^*;|UId#_pBdnsll=|1@G zYe~~h)}p@ClE#_0qzrM{UzbYtl#QhCmT6nC69XhYDRnWUiXPy%0}cJ8lpQ&p%^9ZA?wFpX{je$o2ltQw{v=bU)kHGpzT!=ZSm}ylg~?x1 zQO2H8COGc83+#iP`xW~+=pK}-EK@2+k|C){msZ(Q=m~`+j}Y!jrva_bp#^%AMxMhUg(CQ+<7GI-fu5_FtX2_U5}z8aeTh%*ZJ-a1X<- z<7A;0)?;ICEA5+Mwqci2698yKDwX2}lxW^+06dk-2|T_Mbnr^AHhj`;ll8v7yhX3W zG@MUO!}*GJ;@vH~w^Kp30=%|j#KL0F8!T9^WyULx+i;xAi-v&+pWZ@Ka0~)9+P4F- zi!KbCJ)B_gO7y=RCAy9uI*1rmejN3~DBTxdu&?bT$0)b#R4yXt(fxlLdplCPcyK6TO?Hz{1cByPbXyd|YnrN|88|M!U=_uyP)D~N7L}CNY9erWlZIz@k zQ)AG(!r_$R8P@s8d8eG>b)I~|B953U+1qbbTE;cTQ{Bztsn=a{+Jbc}mx{OIJag=| zoJZIK)@TBwwGlIywh_agI0DSwob0h`0@9d7qoZ~X!`Th#8^RyA^CUe;X7oB+(cfQ!1NHw*$EsVA% z>Qjl3c*5yxkMBI(U$%72n$s?zH2;L(%>EhH!edt$b!%YCLnyQaM1dBudj$qRSf2Wk zK@#?nPu?amR{4tGOpKzkkH%%*RF?5gdN1IQZ)yoNg)hNz@KF3hEKy!uuxplsxCfN(r7`J$TK*b&!WmOC~tpsFrQ^ zz@5c2xRh1aIjIj#>yE(#vC^~^%>;5fv2ZvftQzeYmm+ZzJcq4}j%}P?bvp^Xul!4h z&P!Cadgfuqum5`eEsI)`oMG>lx~fWtLpcoT{-D-F#E88R@%)X}u+pTSxpDQ$=E0T| zKsFxXj#RS`#+m)UB@2`fmCsgpZu&L%!3RJ5IE&R2J$~+eY%}?qYX==sscHzmZ}{el z7XD|yW}dc&h57x!{tvqh^r^rsVYK59W;~~$ zHD**>Q3`|lmKIJOsZk?Lo(pAr1U!1A9b70hFQp|ev^s#UPnW6XemH+w6=}E!9=eYF z(zowD>U)0#c(R4|g$&|J8hmn(U)AZRWRFAJvgJv+LYsiRtFkl%WCffUJg`S(?Ld!6 ztJ@PC+#|Adq!8RZNY`D!-MWu_q0N`BJ+HzSdghX|&wtj{IC{h)htC=B>NvvAlQTBT z&s~4UMrrd6OO8Ha-Wi9lX4#VtQ~P!a%&pvL?lDG>r>!%DOS`~L29T%S2t>E)k!t}& zDu2ig+}=vAA=fHrix%bVfjj&>p1B?_w*#)?7z5*B0>rO%o^hqs#>gJGJ=B|&oN3u> z##{&3AFqe@PqKg_w5DX89{)fVfy+XQDsO&Up+k82PzLi_SkQnB*7pvadUYoxYXYkW zTxEN}0Ld2kHf*?U4`%?% zUkN;TO5;Hda|@HJL7iHy^``D|25PVwwG-&iqy2XVkp50DAlf0suPr5hbjX=Lwe_dL z4s-{Up;vd*AR1r?jywRNFt&ISYYLS(!w}%07WYa@iVBq!cm9<59ZHqe!&_sN(YqN& zXaw94XOhfFW;FASnlYEMX=!9q8j+Sefi!KSQYp~a=luhlN6;aAq4Z_BoV%#FY+yqO=_k%pI}@H!xZUHCEw*U~{)gzn1v~#EP>e1I9SC)*?@SB{Cips5eTe|Bsh>qnm0 zU1~2jG>tm7U#F}C)0j89g5@+&6|_@1_0WWBOzox?R(q=zLfmRB3h=!xu<^u>HYW*G zt~_V-e7id_wsDD*IN6x8H}}|6nS+7XNk z(z9E1F8n#VYC0GDPtC=gq_$j}i*sNu_Me!G3xM9x$yBPqZ_spDr-}mA`7G9rqVsWM z=h1_9NAnaX**AapUUpf5 zPV02X(Ro!d>km$+vLtHKe7O^k)Y}jNg=n5ljlyA`hsWuv9cnxzjxFXfHV}r4qK1)8Q^2y4I&{XHBd7Mcm)4U+*|$ zdfOEZ{-kd3s#h($bivqU(@_^L7@I)-NY{Ra&aBC+3%lkIUcI@vdVNx|r)7iKCppv# zYmG%7jYWCZXm_X_GHa-~xBzSw35&qrrIY?Tv^%s5CjKt`E=*LMxMj=U_i(bKF?6cV zk^8IeN;*gEp}OGU9P%W4f~w*U4|w1;g{ex;lw4aBD4VCO>LAa40kn%CPeJaqEm zHs?9BrY=6)5UH&mVbF(at49u|`^$5WIcetnqmG-|lxk~8q%%~%J;5;I4fM>m3wJ}I z|JtBNWmdZx*Jd?g&oXndXPG;RWM^a^yG=V?Y5~VU_c2@sNxNcGxbwLq5=uuWIV+)D zH;!ye^!rCB@4iH4zBW@3q%W0Ue4(891vyN4gdI!%!~#Y>oP{s{;KY*Vc&uxvx1qJl`?yN^(m#Rr$*Y}M+9vNG zZw+-v3SIM*&;NPTE!ZwE|BBixj~u>l_YhB|(7WYi(DS%!j{oTnbDF9zcG3RtHJL-7 z@4!ZL%4*dlXzk{7VQLPFbFQY=;OFM7nuKCe<9~6^Xg7wL^!EkRp-o%#Yr%uUyhl;1 z@w46h0I+%g`;#tMmtiJ8sl>n(=AO}v3#ZN9tZCOXf0pW~Vl71#TRgq;-&~5CqZ(!< zSuJZdu#Smp`U-e}ec#RodBr)QFZNZI0(Y1m48ba(*76ogQH zeh0`P$C#P9!*ss04EJ#B!4?mCJF6}~CmPKI(f3ec)m>%ECFlqZ>-MYC&GJi)Hz zP<e%(x=9VwR`bLSQC-v_F=oZ4UaMh+Pj$n zR}KJ9t=U`NgH;J7c=Nx|`As&PXbG%T<&ao(GkZ9_%~_^Grb16R4X~CYV3lo7tyxZK zpE%br!1aLMaeo;WxeoM`6HgcfTsh{{{X6++Vf z7xD&$b!*)&ByY3o83()*ddZ@}FJ)w*@s{T>k}h{YmD9Vb+gpo~WuCTK?Y=N={-2dlUB7t99gge^otI74>*_JV|?f zlxKg2mcyuupCN1OFpA@6w^VJZGT|wWZvt$i>XHb4XTyP1E>B9Kv|Iu9G@L@05{df1 zCUUA7{u$D^H1qtk@}^MJO_&t(Cigji71$=m1-2dof-W6LQe^DbEt^T}#mduLwmhx; zo}5Cmr0ZVgJ<@ou@}jckUQ(yLkFq*p|GoU*g}*XoOf8dOuG7Bfq@L5hCoznBHJL1) zwB536ed#wM)&k*bdW*wSBc8m~N@|%W(9x}_Z7k{eICMk}j@ILWCgKMf3_IkCgxX@o zWJL$rhxKSK^~-8hPmz`|L}NOqSVf3h_pF`X%eZ0_)=p@tIJBK~d@^f8>!S015%YKs z%_uK_nqAd6bCZr2$$rbU(aWx>oL%i|k1V+^G(%kc_@oc7A-57cai)@2d^G8a*K@1gNG(_VK5v*v+ocLQB|M4MAR>X>u6J=sD!mlI2y zBeUpp(w=wahm^Lw+xiiC1RM&drmocK@@DBb8p*ii;a7|v>#$2^=c)$nz(os<1rt_W_b6C*fcc-{*G-dO2QIq& zzq;tWsD@Bs-FdgiAHZ8!IQEShe&GevHax%YFIu!=)<3nxhNY@|E1=GK^CNUNCU#{1rN8R4 zFAL*5w23|NjUKS_75Z5eck9+&H2k^yMb*jsqoqr=INXkF^U;g7x#U*ID;)ev$_>m<_74t$vp7y2+sbg$efSNqPen(q%b{ORS6wSgsN)7u)SSnZFN zd)C@4$#~)j?+5&>zwLPO!P6g~)T!*e>Wxj6Wgq)0r;nXQ*!8P7A2M#oEMTJaUWSC>}4qEDqZ;4oxq%Gu6QZrUGlP{LYFCcX8t}@E>j4uv)}=Crn2Ax zCe1%7s-u%q;>rSAg-$LLxcIE(G7vpD9zCdS>3=o;2&mGH=jWGdUK`iFkX zh=FQ&Ig975y0cQsAvOMoB0gM}^gok$%PyRzcHk2pvK|F~APvXdv#(0q@@d2L18;9Ex%`KD@ zO+z{HZ*7Wn>3+bM{|3Fw04TjzvKuZy;B@dUv2x3neHo3?mtvGzrSBQyB)TUu%sA%R ze_LCFlIa<#=7`56dy-m?c>EADPx!BDa4PU^+3w*5{y#K=2T4r-XJUTILDJSgZ!3)l zrm{gtx%clBu%~4cGlCh-EM#(V#96gzxkcAU@3AQjWeTE2J>agcR8KY-N7l5Z<*d+0 zznfmItzzVC9B329ek7WHmh9>g%wZ^{e&ei-Xs6G#W6VRzM$+oPa##Q3_ve`Gk>=!d zqw-HSu=}yeYa2SQnsv{{#*m_Ok33@h2@}F2w;a9f`gm9Fgj;SR8B1NZdU+(eu5&3n z_cOsAOm)pAqkePmh&gkowmZbzdK%;LMrHR0cb$D+EcQgplo?aT&VA|^qsO$@{&o?u z1XrwDhnTuUY3Ggy`e0uD5-qP@tL4?n@OgFHz}I8c?|yhtI-UQ?bUJziIfK>?v?QTk zTbfW;8$)@AIqf@KLS1b&d(Uqwhn)P?y39WI{Lp;5+I05w7m8}IteiKV2QR!`92#*U~#JoyTMy#{5(;#Ql57#xzl*yhwpdz=mk0*jcD z(|E7;H@&aAT+utn7Mq*C(2pSTgim$0aJED1Z>sEn|55&=%V=v;V&%V7zVF{uw`A$L z#rZxWd;;^WWFiO7w;bl{Q|If$`KqzPe|NqGIi+CjAQL7Fw8>Uz@NAellMOgu@^uL= z!+BpP?1On%fiHBxylb)ZtU7PndA2Tzp3uKd=OQ}#X=4AS2#1dN35s+5DhleqmQtOk zbAJZxsVXB;rt7Dr1YOZcTK0?BdW}f2q+Ewf!LJ?s=|!ZA^G8#5kTv`s%vDid>}b1& z6B^T&E&Uhb^jRO;EEZ(&?(evM?qi`{PQI- zF{H&e!dEn(zG#?vSPni9>%nNDrLDO^S#{c4R1-})qj!n|NGJ8_Ku#TL;};&&a_Ywp z%!-}@H>l^&^{IhLnh!on%fBQcvv$8Vqo4;#yxDm->CWt zywq7I9qq2RZTCz}o^;}}GnAJ%Pf|Wso+jnLJLB399+*OEk{eZ?qZ0ZC{21(+__5Zd z*JyHKmtOj%JYn!*Y64*(q|aR1EzmX)o^b(%UGxjEdSa>z!=?a?6*}8VMmtsyXx%%C zft(@DBWb73;*oqcQm94^nViAwRNE=4H>yF6sRjPDIbDxv^95=!R#@SX!uVD3>bIZB zVcZF^tQ2ra?viekUfOC>jrRks4G|FUM%7Nq1~IHkwrX zOe$afk#?E%@E(0LH{0%p*QDe$u>37h7JR~1(FRax!I3GTAtwA2Ywt8T# zAQA4tYjwE;Ly~l*Bg3w>X3#pR{XSr^u|exRQ#0R#mpi|>Qnp{ynCf~T686G+y(M|@ zvOyh32lC)U*INVYjg+jn+%8oNUT=9tNi)#{Muz8YL9PA_gSn!@&HWUYx!qdeWBCB5 z)pc(Lonc|Z;77ltt-A)+?M9Um1eHsGdzzzldeE4dw#HG2RRhtUZ? zLs0K9+TrY<+(4z8V&8y#0j@&&ZUaoB>WqQhcd6EptTgv+J0SP{|BDJiOg5MxXex&i zWj9#M7YEZ|6#}y%gy6Sq*>_lR{ahRjD+l=F0q#v+*6eRVaI&cxOw0fWsu$B{dIWQ*?Z+k7(Ult;DCb`RgKWvGNN?H+v`>8)rop+|5s|OC9!`c8uF{CbHFPzKM0SBE-8EvR4hUJTpETZ9km?0!>#7n^A@W| zOF7a8pdz{^*6fhPV?;ejjAr!z)OPN%ZB=(1Kj*$2`#SdZ`}&on&9inJC-~PER%JqET77MYY1K4>5ReUqhCrG$ zp;nq;q^mT>s%4wH+B?5<&b@IGI|(f)%84Jj*FL}VJMZ85d@1rWo;GWNmrp*1H_;sq zO1*r1!XaK>mJy8j?fkz<@~@*8mdk&=Xu)*(mKf1^Y;=k?q zU1o-j;;XK92IF+zmxJ(rt(QF>C1n4uWc6Li&PX;au8Zv%5A<~HUR zSwzz3<~v}Mf06kP)Qu$iO&zO5VB!-utuSf&bz{DfQuUlAVK~vdpS7Kcx7d61MLnKe ziJ91ps)3cf_J;jp8+AId%#3eJ>Q9)o;B^S635Rf+ycxnNb?SzgQ~WXW>w36T<)A&8 z7OJJnxcDs9Zd#45^XLk_ribC>249^**9&5fDjl}hqSr<8{iQjso*g@Uam!oRI3%3m z*1e=lQtd!%k@PhJq)@O&DQ6)}dZjR&MhMb0MAHaOS3_DPuhcLXD3-mjI$w~1Oe3q_ zAaK}gG@Z7pSXlfrA8!G{wdmJd*)$rCJTW1$pbcYp&C%HD$Z99f;&B{5oS7In{d7OD z*f&qC^6jxaSM`fJT*nj>CMX@81`^F@(yG~k_ov0WU zsNjzkA=M>rEOM5#<18)QS<1v&B2hX>T#>tQk8bQS1Kg?Wy38pahjP4nRr!bf8maZ2 zKwu2oWbBA7g$5`-%VS4B0<6E)D677R#KJy+D!)){&K?5l#63EvoC7N|ATa$Df$0HY zwqLVa0PE@3N~$05j6O(3R7O~D>9hwRpl;yvlYwF9!l--}78#Az9Cc%>rb-P4-gSIv z5byb@|9D=wI`4$bjnK%$1MmJ7@9f%rc=fI|Z*_j5bN}=0!dBde6L^!?&5yl!R6L5M zmrIHwURPYNT!6N6qIy;SwX}{RYJ&?2=a;crS3c3+aisl2&c2t=`j+lTWTw^U}|KXdGmg_F@|&&gOS{wR7hd0_vy!G0|ENkwit(>Po&`l;YEEtL}8Gog6{#AO9^AZxWE4v5vB=P*CT{B(pNeGWkO;ui?oTIaqD$d|; za*xi1_YjyeEC`}f1z)PqA||n^;;%oHpSkTKxm3mJ$|seN*m^0+lemL^9~Jdc&r?i( zL(1!-hq}sc=6`_Ychgfj3J3Z^F3p;}4$sT6lWzf?laM&dKPOf9bNIxU<=UjXBvWR?qnGf9!nY(3X=!o@s67(>SpEm#_h2>`)4lZr zE`q77EyC`+6pB1Ev2Q#aivHk%uTBWBj~{&e$p^+CIevmt12Iqiy<+|M?Va(=$Mp zvC^bd43nW{Mk@&!V9JR!RUozbUTVn&Ph}uFN77;PwocPHO;eCEG9rkFl7FqCs+3&e zbcUb(1GVRH1hCKZS%<9mMQs7#s8|7+PcML$57cM2ykRRvlEDZ4QRehHgke0=btHad z%{Rp?&it`=+}zf4U++C9f)XBFQ<;wA!LG_1LTva;+56Wf%me0c-|Z3b>(fW4waeHW z@XctU=e`jnPrZjv1hfzR8um|lp+4b|cv5L?=`Mqo)Ua5=`> zjJdOAS@CP3ju^!y7ZlJl;oMCS@tEsRN->YSDO+x0Z#CsO2NiZwH1bb=*6GBZbNy88 zgB==VCkW>x(=;l9=+_E9x?~ll^8rmO1vH>6k+c?Z6X?Zd$<^!&^v=fshyn-&gF-_% zU(f=UH$1vltwCuJ`mFxIssKE`K26%fA@cezs#}^wm-rr4 zOi#id6{we%q)zI_`BJEhNZ771xl-H=u za>&GYaec#>p6fF^xN7dC6iQ|L4eog*9F6yqs$J;Lf#zYG3ft0!HjPa-TwG#hyy8DaG|PG|Z5ai%Z#DhCJg;3y?wyt8j3|GW&>=w{@>uOG~I5N~j}mwfSKKraix; zdpCCbhGjUmTz!~dw2dkVEo;}PQ`SbkvMxbcFDz+)uF%2a=GqKPbzdpA_Zx;`G%oEv z@{km6!E_4#X3BYG_3862=W<(jJ1gj$DAuK_=GQ)LovmMq{}15jTgU)-+GAj3U|;}Z zakgWR;`wd9GRSi`8Gf60Gl?1Ai^49pB13``(V07nuG4FCXm+GAj3VBkpk zyB0_?{FnT%unl9E$7gH1ql%Ozb2p^n6q{tDZ5JPCP2!bRK zx)>t6bw(e?^5HxGI(zT6_c`ldTaRP=0stx?kF*)=Sp7h{F2+q7!+IJ)iIs{l-$RyK zhwIvl27TB(As5=I7bU9SH0uxW>ttl;decZlsHG7VFL{fEEMc=yBvc6jp;$OC>(?q{ z`l(9RjN%aa;G!Bd$1KJ+E;PMd1SjXBocF*cI>4Vjp|(*cw0&=bI7E!+Yw7r5BoUJ%%CC7X1g+q>Nu5 z!8f{w6bizkqu3;xOP5igPs2k&Ga*c?uSis3lS&U@*|F7uSXl=lN9Mcj9DPCZ%aEwg zNIincr#hVF0ouN`KTzMz7pu$2Rd<0O=-kO_0P znWjONU>%hD|3;sDTOoC!#jd?QMy)Dc2bDuSap_^)(IIr}HjMEWGe~#Qr8nTd`fP^O zBqqs$Fdc)%&(Xv`(P*7S8=Wu{k-E=AOs)y=Qurc1L}B|74c8+(;zK2l{|j{^lW$;$ zcu_qE@q!&l;zF#Dy1VAX@*_$9*DXF2MWd#W77a~eO`Vvb;*!n}+50Sg~X0m~$o4J;Q}Ua%&xE?|AbmcVw0 zJ&pYgha5*6rx<4*=N!&0oHsZ>aHVmJaQkuJ72|@-!w}i`tH;AZ-1c+=9c_ykS8YenK^qd%nn2T7PSdF-e_%n$DNj1qCQhZVu zq;sVA$;im`$Q+QJCdVN+P2NTRmx7zZEyWbYCrV;U5lTHumz0^5=O|xM@lcrngzr?B zshO#*Q2U}Drv6JqLF0^Op5`RY1Df}=UTAY^2WTJA5z(pAS)gmCo21*IyGi$pUXosy zzK{MLgI$IqhEax#4Br{$80|CGGA=RUFiA4mXDVabV0yre!OX*Knz@Sk2@3~{J(f0> z@2rZfCRyuPFS3!dnP;nHTV{LDF39ea{WJ$VM+wIhP7|CJoY%Rqxg@z9a8+_mab4m1 z$<4`ak$aH)2@eO4J)Sw9@4RHZ?s&&|Kk&)&`Qz*7$K{vi&)~nn|6hPcKoJhu5YQJe zCtzE^g@8|iB7s(cd4V$muLW@gSpi{GP(x5dutV^R;4dLlLiNH_!UX_9F`Fg;0RR91 z>;Ogp1poj5000620RRF3761SN00Bn;0001Z+J#ZEP69y`oCCxV(ZZ;e#TH7XCm=?n ziHR6BB!mKqG&To!2=TZzcZbG`U$8WOk^YCZA7JI|-m099Ty}Tfytl)=w*XeH3@oVM z?4EE5&T5q495#M%p64dBR<6M47$t)(R=Eaem>C+JoxwSN-*^sN@D0xM#_%lT$Vo;%JVsTNQQ>Z5 zs@E0S5E1K@nqbc6D(k8m*U`_NT~@bQ)#Hk}1WxrGmkI$DJR(7zh`aIoQ}h~Bud}e zBJ+0|YEET6$Cx({vBU2{e>Qb~wQSah^u0spvzif^CK~MF3@u!8-!_{6PqI$V*QlXi zH&N~~+v60!-@c;NI&?8%)FOY+Q(#}`_UO5a1MYj8F)#TAd5e470001Z+HKKCY*RrH zfZ=}<$4;Eyd!u*!p6wL6O>BDaoj?-XAtb@UKCWTbe zNGF3#vdAWfT=K}LfI^BWri4=3(Vh-;q!XR#LRY%cogVb07rk+yq2t7bn=;C&ppriH zr62tnz(5A!!OLK(@Zo0&Lm9?!Mlh05jAjgD8OL}gFp)`2W(rf8#&l*dlUdAW4s)p% zD@WMCMy|7u4{Tx=+u6@Cj*5+Kykk9kC063tDR#DSi}w=G0gm&7HhyxLlicSnrRn$C0`2IA%#*T#Zn@r)<8pRb5OHI8bc0;qe`hOD@{EP zrHXr1-0M`jm4Dys#z&>5)Rj)9OX*gYDa(}=%76JhrkcvDY4*BMAQG;vscs3HSGwQS z?YHj9r_yGaP0CzG000000aAPT2><|i-(kxD literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-Italic-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Italic-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..67804e1e4dab09537a8e2d37895734bf480aa9c0 GIT binary patch literal 29104 zcmY&<1CS;?)b2aBZQHhXY}>Yt9b?C~ZQI_lZJRsx{PX>{Zr!>`J(Zj%=cJSF>dpz; zU0zHK00j66j&%T(|I#JE|B?U8|NlWuOhxvmu@16tsB zpjB}NdF3DO761T`0s!DEcn9f&#FbTl0RWIbKRUucP-x(6QkGX{VE*Cyeq{9@7_`2> z#~RxjIsgDrm_PQ3Ke*$FS%o!rb0Gu(psRj#bp8W!0a~(|gSjmL00a4>Qx5@VG_oKt~19srR%ngel z@`pS8k%@mm3Lg%}X<_T)0RUM3)CC+20Dv^n$cbXx*c<=oS^xA4X!-}6wRUSpwuT-* z*R}nxZG`^;7#b+b&d}EMhx_R-P~nfQs)vFzv4g#{3jkpMqXYE*!vVJs6p%VNnf};1 z;{Mo0{-6U?0F%Sr)aa+pj{n_f@c-~aqU8OP{_j(oX5zr7p_ z^Z8BL#ipQ0!_AO?+K7lVr9i=Cq8dht>6w_YqwgTXr4|LCfr&HsLkU&-;p)Uti-@&> z+=0jf{hMoq-w{PvzYM)Tx_MmJ8&D%}CZ4@^x}USWr`=QAlPRdwODiI~zSI$V6p>67 zNm#(>wzu4)I#aoedo5u8aTIOdYI*Ke>h4@)BamCP;?7;G{xXw*k)o2?ew(c8UFV$1<)L7FX+>wnfAFGTpZ}6VHY~`vvkU$+c+@iQ; z^{piT0bghD8ohL?#R&-?AiU?m32a|jhigj}9fDGy2}%%@xQEJ#Mn5oO^aAaJ)&s5| zCVg9vN*`oDz}yC06MWOJT8Hb~hiSd5)dpWKRq-e3Lq6T?9RP zE846ezfq(2ROppi+tVWc)6U6~;;5A0i+1^LFwNRY!g@Em#*NpNy5-{4BzmEzaG5nr zEpeJL1>>p_3m4v7HNVtRipgU$u&tm4@oj~A(|!v{14ma{ZuQHk@MON*VC4u5-UIX2 z4&H11HrMZ1^&{T5n;biG;P*kI_lGe=x?m{rNsA%w-z-YYM0HLyvQwIr9*?dI?v>tS zej%rI_9}jWYw6s@f1$GpfgnKO$M@lV@wlyj`hQVVCZV2cahL zXtq$F|BbvV7)^D6HknbHY*HzFVw!>{_tK}~er%eev#)~Z^{7za8tp>IRmuEmAFM+m zb=IletB%;ae*V28x5gm&f-O)(3}r%Nt0Fw3XoOW$nN@Y+A-Kdk)XXX-$54Q;^n@+7 zyIv3NGR9uP0AVf6X$H#8c6xCXKD*}W@_~zejlVVecmrHbj0GR31jWb3sl)~H%P@A; z*L!NbI_d7Ds-o?8a16^=P-(6Kw6BZC=gmm-M?TiSWn3mRwoScf-DFG8?F!o)q~-Vd zL#pw`s}o$yH%VMNEjO39aODVyc+jK4zm?119+&1D7tB>w3azQXKkQ$NWh`w=SaY`2 zcf4h<_}^D+THopH^w)$(u^PqM;`5rnRpEBJMVZ}rh4{oTK*oa$3BS{uF5&z=)zgGXq)EA zWOIYTowUUti9FYE>TEn{HRzvLSO0yD=;YeUIBTyCP%W>nV`~AiGr}9<5Uqp{H_d+{fiB?a<@8nhlej-8ZO#U*mc8?OGpx^nNWjws8vMGD9=u=%4 z>stTihD2*7dZds%cw-y`*Wk!LX0||L<#~AtvGvsxeN*kGKIlyLx!AybDcr`|7~}h@ zKJ`{M-wS9NHCwZq-dno7?Z)D}_*x&S<6XsRer~&=no=zLg0)e37es-+97}S;@4;xR z)|JwMvIB_r9jx@Gb;J-(^6l#Q7V`wyT!Un{3%D%3maV{vwBhOGrEDV7A0GS0)0cc9 z$4&jt=W)->A(T%Ig5(I(%L3{a3V8FUeE!o@)#poC14Nh~kf*TXPYefK?TROH4!$+& zP01-aC`iN+i)1ypW!$uWZsx%39#@}a4|LTQDMR*UNf?8fresp<(FwA>Q zXZ*f7ZdFQ1Ch~!si55sytAbg~4}-M%*HKg_e$x`T!YExy;GG%bg57R8vPT7Qjxq}{ z@7>f19%bfG1!AV)j!AY;Km$@X1hL#yVzwJn70CO9AWx}*bg4m&a$T}A;|!HYf->U{ z)#_g=OtE&MLtao5uBC=-E4nPJHEj3>7nIRCN`+n2C~h!$M`*F^K&EIATbxr&M=w!U z1d%MOcDZJsQZ3A>@mt)}sQgsy^A(2eMlm&Wyo~A^_!1ufp7nu%k zL#?SIWtOoQ+pG+$JhRa=OtInVm-BHg>u|YG3oL(N+pTr3;p*(CGRCNHs-Q=w(1|A* zr*2X6_RWYf8~V2yio|XDL>gUkkg-BsxS;oQIQnHn5zTNfAsOQ@ppPzdtp7W18O0gT z8XCAb-a{Gg8W;dYFn)Z`01^x*$7JL|BbWp|1i8=|r+|ijDP)A~k%6ED3B(PL?t%c= z9Kf%C#xr9BGkU|UzP`!60q)49w15Cb{9i2kCKAR*1_l5_NRPh02LXQ}BJ5EWz`w2g z`vojatTf-oy5di1V}o4@b7(aKgRGmG(Y`*T;SQjx7$`s}9z65^)@xv3=5K%nE(VV1 z0dmL`_(}_l;ok%l1RO^OFkxbHV7_HuWco0NHLEh<{_7$hAT1;!Bqby#Bq<~-q~9+^ zq-PjiFb@*CL;L+5G!=NNV4%^53A)suN0>*LPYAMP2({7=x!6D1_wYUQUHhH;De%_& z()%q?(rZtU<>m0upAP{+aOqw5-hT{HiQmLm*WstK2X+BdW-SREWkXooAh=p(@SIO& zU|?W;vU|9HymxeffryHXj)aDi8kdxi5}%x?uAri%rl_jCGP^jxJh!yK!N|(Y&cw#j zTG!OjQs3O@?%?9&=IHADeEV?s^zZQ=0TLD(9tsX7_D@tqOn7vpyo8LDoZ;!^`B^5r z_g+KsgnErev)R`ABM!UmW|!$k%oUIO&3f-mpHS%hLsS;Tl61s`fCND5 z$MdWPh=3t^tm`Uv%4rKBZowgd1PrYba;>8_rpNvD&If}UT8cI@;R!i{wB&G#k8}hg z5DXB3Td&2+%P*411Si6f@cG40gv?_7r+}4*r_p&joke;)zQX;?dPez#G;UxEe2V>3 zPNKOH@LY6p5gpYO1fgYP_Au`H{sjY`IhVv^1vuifr$WGzh)%Wk>BwL1_7zrT^Hfvsdec24AUXIjgF?prnKSQSQUPzg zv^A_2@Swt89A4iA&F6nAyD<10C>DC_wf=K{fdD|?-v9)U^`YNT1}aOB&z<&}zjv6^ zZ4xD!CCSI)(n%8~e*r@h^FTrdj>Zt?0aO3DxrBk8%>{qnm8zGX%>vtShN#;X@#eHT zSJmd0D?ThE<3`-4f7_EMA`14_etxxh-XBjf-fTNh^Syuag_?#k!}7?dO*%xylqW)= z*h-(hWb}@hD84k&4c9x2kK*c~-#STAEaO{BBX!}sgW??ti91kg7bL~gjsG&dL+HgF z?;Y-wXJ+C=g+~6|LX&~65hbOkNAc|)`l|#CV?WFp|DYItnXGy58&0)$5y!x1@$@eVL++-vQeK@!7YVR# zW2?hOP_-<}{eGn0p@T_W%X>+|MMdrsMoP?_WXi~)*HUf!?8rBS4x4(KpZDWyv1+PY zYFwEJ_@tLaN27bf(889RlZ-wvvz&dXETzolK1KBuIrBu0vj5~l4=-qu))WzAC!#ES zFYktpvVr9p)+2Tc02pxSS+&@smx$Z~Cp z@mB1L4rlZ;R#N5Mk7%28epgoQVoJbQL1PywlEPG!RWW)R4;~NFff_={#Nw$Vwn_a> zh@^M_)wFQ2AkK|0LY+B9P`5 zl3nvBuBH#;BM(|YqJM-eQ;I!Aq;fD_rr&)xL~;X9E_Gb8v$BACV$7jZROn>=-A!T{H9s(7hdT%8y2Z3h_trES@OplfqC&bw{ih^(dEZi<#bsL|~- zxF!o%cIW##&74NyAO9}7cza)&%5_(q_>Ob5=LDd-Y* z?kwB$Yd^uDt)V8rzPV<`@9fO1ei4=9mewNq|CX-q#+bo;bHEM0_Qmt^1 z-iAxymvII)2JL?^-lXYi7OiWQKP4KkipK2xq`K^82O#peuPD7;2e)vW6bHxIJVujk zVA5i^?M9bu2z;JvtJVY_cMXpB7}3}~zT5jqZ9QBiU>?h$yfDmcG6`72MZkTvDoo7P zxN1>A=_$MZd~}LqjjapTX5)fi+K!&PU9zu)UQV6>7a1WXDNb&#&R#3QbcFCZIb_%M zsPauBS7b+;xz}HNxWJ^GFQ})7gRxX}TAQslQK}jswOejS(RL#PXD7cY)kv$Jz0G6v zzEzC|NR*2(o_Hvxta_S`Xm0*IwLX09lZAVAGp#MNC+j%2^O|4tp%C0``VMzZ36oP< zOWc(};XKQ|+4*X}6Xfdo{%(rl-Q&Dseo}{vgov)pTIV!*V=e*#`mHN-UG(OKY-KI0 zDvetP#Uw5}!EJk_PLLF7Ek4WS-xzt_94gzXB{B(XhGk(&S9rgW^*pR#Jo;W{r zjZ)edVWc+D+32q)Y43h+W zB-!fJtK|O0#j=VWircMiNenT}a@*QQPVs?@BRAA7_rfKHkAaaIzCi+Iz7=M)T*0cv66nc0!Rr@on1S?cLg(+x>V@x%$ z%QMi=qsNy2x4QvIrLCXdnUkFHAmg8n-@iWC3XrHLR_jB-8|+KV{=QCPOFsO69Z{ zFiy$|BpfOhnAEIhsusKETo~hhSQyCy>i?SV@X9y-72Gjpf9tgTsW#0GSi&s2r`YuU zAVHS*3uZfzFGwtik>zMj`)Ej`Tkk;oqI(f6it+^cvdumR%70Jf%NLj%d7blx%udx< z%BkSk&bx1WKV8~1IG5(abm)%T@lkoNh#4BgmD_YX44W8Q7rs&M-JLJgcVt*_VvZy8U3+>h{(s;~p?o9r`{LW$v1n`U-F<+f7oSyjx2(1_rLxBTk3!P znpdsLeiD$;asYU6C}=&no6Usl>lIX?2Yiir!hUMr0!$+jrc`)^m@CvUpv_U6VqO9} zh#5TH*K{&3k{nr9ygF7tNM`t6Eb`qi;Ku1=*xqXmt8o?*)pgy!#T?b2Pk3s+&f!g) z9Vn9b@(B^-``Rkv>61C>UD}`>bnG}CF?QQFJwqi*hgCEI>^p(L_+uUhPH&{i7lWg#X%uNmKwP!zTnNasV`;EeV9f|`{dGRBVlJLnieq)^ z%YzTN8JL&64nSz$APoec?lm-17eWxlu3oajSa7smYzP0=o?78~Lu*Xd1MQU^E*&wj z#+1Z!jJO=0rc(4O2aA}pTqV@bljGWsjZz_W4Rsg$dv4WM=!RQixA;`nxDvw%kk@H8SG>)=ovOL;(ZuBmqR zHAwKYS`!3Y1>_~+aut$}lcs{NRP<+oS39w-cR@2H?7B)Bprs;qL_MYc2X`~45*Q>8 zxLVgzyARq2{(uQ=B&S@eg|so7NKKot%CcYYh8179jPR`dYr*s}ccruvWDUa>G&P9Z zdO2A-k?o(>{Ox+*(bCmXqSmx`iH5~CtJQ>T@llxHVr_EuRx*5*`IHUyy(KI3`1rIs-)yZPYG~ zRss%Kh-1jf)zoO<{GSM6*iFltM0#i@cz&bM6^wM}jD5=+3*g;I5aUL^tpaM-JFM>OSUsbA4}&wfkWo48*WA5+mxup2baFWy9%hsI z_22&}t0zQX(<0D)60< zMA@tSC9Vmf_RcV@^R|6b0_S&_e8NBLBzy?MFX=1{6FUWml!24r=}{RuhZr zhWdcSMuB*h&4TRZB(^3pcMm~MgvM3C9m7gg^s4H6uI+fD_#MIb2Jhi>b4$0&ftgmB zo)n|-P87)q0D(#jLdoEQFFe@JKl7S<= zYwrs6t|R%3=Rt~d-JQDWchhafiyq>g3;zs0(h!tHfjxKxWn^54W@DhB#MK9?nkKIgXUX{v1NO}F{ZRX3-;(5x_Iqe7xtPDzgeh=ws{{UgMZJdNP+2Fa$j>;2Mb zcE0u`y3;MrrUGD10%KK)M`KOG%4+uK(?zTUG)G>Z4G^=4z(z%}r`m961A~Q#uA%jf zzJleE!ouImbi->vs;F8>0e=x#QH0~Kz?6UM{NvcT?q*PN*eISt8p-K9Elxb?=(asr zS8MN#O14+MOZ02L7+gHoXNb<_bN!5HEZt!}DS%O*onnWy%l-0Wjz6iyFI zOM*MG{xbo!;7f&`OmfT28PSeb&P(llngSm=UTU?@`8lPiTA_8N#Is6zLPXUIvt6{I zwPE;)Fk}op?Y#UgsPX+85O7s(oBo*lE~oq50$KIVJqcO_9ckffRj15MK^80gxRWJQ z7rZB;0?>jNY}9s?Q^|Itz@ateT*d<3It!{zVl-8W1zyuxY&$!iBH2kKB9)42+!s&J zzA|o4fh(NB+ws8Os}VQ|B#-GWEz6V1`VyIHDoy&_p)r@sXh}Q`<(s#{{CE&KqC3KI ziz}`6^#$2$*VaY)UVCnin;9*vlB=h5exj<)VRIZ$0^vz7EUM?4cI|c9BgI4d$|0^C z4U&>+S8W%N&m}C3m9^J9;M?V*|(r)Aq(;AdhoIz9= zB(4E=X6N>oPXJYBg1NokzRDKlM#1a=no?-2Ni ze7T>VD6Q8L^qmLs`YiONF|<8Ivi)C;jLwrkQl48Ic$8q>P_#Qjfw2-!GO)KPPFV(H z1@@%(iX{wTBCGLvQ3m&|TA5~9{RF5U?~_Y-DYz^MK9_?eFIj&E?ah&ce@sq%5ZQxB zMSn2MsR(k^YZpF8MDNPU(w-!x44tvDa2qkM%#fq#t|w?#Bx3tvDPF9Q&!q>x8XRyD&T= z{}BCpzIZM|DBr9?Tn+a#BNv1_KcPKRGKATUA928eRKa96Pz^$Bq@oOUo95l0k)P)X zw(P(qd>i;F50d_uj5csn#dY@E6sIdb2DSqE6k@BmYFj$Xc{Td0KV^QAj#0phQCks)+y5OZO}OC7@B-mow?Pzn*0#d?$_}ImZ6p4hSdzR zvvF_^DvLIsvu2Qv)fB(p7VB3`H|{-2E-V>#dQVQ|>T*u^QC6p_kqcL^@L78 zBHypt>80oB5=Xf48JY9m*L3T&B+W#8X)&)4iS&=M!;)h~coVXt{ivds@*)jEkq;H6 zoU1>0AB9rb5FL*VK>YI{mJi@zEGUZ?aDtG2xq0}Oi8RdY52ynbsk0;mEIYk672O@G zo}bV1y`66AuFZVRmo1r~mLsaMIqYAyM0w|3Vk*RR$ z~R|4BPflfFZ1W*?-u)P(zDzCrT7Qjpwr) z-1;|e4W{de?4)?M+8D!Dcp4uO8x8_Bw`Lh38I}&>t|UuqdPJO)9C6I2=DxPLw{qCq_?|twCr*&QF4dBy zw1bZAvXb0&b?Z*>`SlTx(OULpg?9T!Rw3Rj#aaaty}(QZ$mK%O9nZ=U8h=R{`yQ?BRfsDy!KLq7K9_OX2WJ*A54^nl3s1Ekgx&Mn+ z*rit(`s;0F*g7BxXH9jBZOil$Zfk<2VT{b63z$U)hoSULbX>m*iNuDVvYnu(1LcOr-!M3KmmmHZ?UCOyd;5~|-V!+I(UYP$ z6r^l#6uSNLoQ|;qI3Im@dC6HdHVU1Q8F}ygai_M(bw!3As3*IH$40~4b=4`5GR~;{rxn=JEjujtt{Yx_61h6tWV(mZr+}~ z+zZW_Hk3ZynE;uzg<`|5Scn!;X6P?a%osIL1IVCow%+d*X*&N6IIDouGA%;aeTX^@ zcbc0>f&owgVxJ>~gI6u5w7;7ZhF3KZ8+%3rli4!k5lYDnR?CGlMNp7q?HDi@2`e#% z7;Nb2Ng`MM>dw(28xd?9<~+#Xq%IX9Zsldt?ER`F)u@=V*8>F(cEx$mXyYTu40rXg zuNo=xEM}PAPnh5ij3N%daFAVI-&NELCE3u_N388shX+Z>an*b)dF6Ao?)y$9&7*S!?wp(sudZ3-w0Ykm z2JM+N^)HZjJHdRahck|wh8lglbFM;Hrm+4q9(c9 zT+@I)e>!iLp4_zZv{X7dOk1iS{t2K4aSo>F*JEYwU16>5r8cI*7J{a(EJsx-KxI{= zDg)+LTdPkiq{ABvjn{^p7FMLfEbY%CNx|Qxo2?8?7_A7OZ~`rc_9n`)^*iVbvRaTr zY!Z>gVBnd3cM8-hYbd3u#1@LG_y_sczefvYg*~sSq`9?t64`7^$XV-#c`=i%c**!yZPRnx z5_{{`8}C6=LM6`7d4UjV4@w2c@G0ftbe5J}zbs8qUY&)F!;Z?i9D7!BNI)8c?#N%gltI;Io=E{8m8)q#T^1U|9VR zke;Qne!j?aH@Heq#Ku|dm1m%F<4_VQg$-UohZ^2?y0D4Nrd5+y5J6R(r4irqtnhiX zL~N^K6%7};mQAkYejdS;iT5^;0mqQDZA1V0T&t(UIM6kM7)6KYoV=x=@Oy*B7-!Nn zY<{LXT?!u+yK<%qtI5d8^lW!@SNSh3uCHF#mAZT_x3R6TwyuZEU|XbIY?5m*1Ux~b z#mjtXi%JA7ma}c(dJ}?B&C2ZNhy*!VffxRqM`>skL6CU7V`{hZy^3mi&3k(GT~m1M1@getl-#!c1juXOn6~r)DK`Uy$Hl zJ2Z4#gNzZRedx7_d8lp3`LzvCntO3*fYUFdsvl6m=3b6Q1Np$!811C?SAAR^C@T6; zQYyG*yvmA-&Tlz2`R%;dbOuY`5+Gh0tq>U|EP?Sfx{{10BWid-Gs4RhkMo0$ z2uWaCuhU`#^Kc_3&-`kzqMk62(n(-vJ{WU&e#JPf-rTz99q2Bit3M2F*@Px7m)Q&^Aiz8)fnm#qLW)p%J)h6EKub{mPe-1;^Faq+3M@d%A!SP&?f7<$jjaP324Y>=y_TF4<_lT0KO zlG%rFl}p*$+0vc#jbFJk3B%C;Yrp0-w%6`naW_uGvqrZsHVdnb0>E2!LS_ zS%S7ojD$C{KD{EbSi|TqHq+M$YeTgFOo0+P8&H-}7@!g%uly?tl6kUM>e)#uyH)zRh%Nch)Hshr;AwAQ8HvJIYv$R8(! z{Dk>*!)4mi1v5Hk7cOHc3LvFi$b@#!6_krdUuM;{^2<%)ua6i%3f1*m0|7IE(k?ht zFR!RHufHut%&JWdYv!cHX4yD{+;(7^-UpN7R-z-Nw;%+r>hUL)k|OzB(S`1Kx})2o zJP`ML99XKo3yGVdM2JzIqL;Z5EjDYw2QN8(F?cXfDaKMujxJ8TEpx}Loax@!Sq7*( z-;euYA^J4mL)_bQwe3eA8YQ8YhT8^8LW>@(bZB849gxB_nb|tdtES-+;hf7{7RDMh zveKdfolh1b@=IN)&Bm;eSlce~@(>uR2+(vLnoQRbLSLN)q1o)vhUwu{4Bs5NFogTU6v^aoc36 zyg?pTNcL*_x@4mCZeF?MQH*He7nU~CmhN<1RzkMr$VQV0@zwL^kU`Y1mw3Yz99SEa zl^s}eusfPpV~HD4?3P25FSz)cUIsymx*o)>JWsj>xPHpOf#Y}YA8goGzNvPmaMR{e zIyIK8J~RY9$<&{<(G#x6@;g$C9#kKs>QO?FDyzm}6iUh&%WF3OrIt2t-J+I|W0lKQ zd9}m1VWmT?s|+WyVx=&XMP^zobfHyBBOvLM`HBrqhN?K$8%;I8y*Qq*&~Bf>PK4+- zMistkcHhyp)X*2+!tbn<*JLBTXP){{{mBlSze=%AD(MdE+{8_AzQ>9)WDDBo^xV#% zRjGS|&-a4IH0eAs%8i8GR_LcY25SQ-6XiN4FlZp~J+)L!4zf^jI|sn-z3SD!$nByL z`I8oTU1XJg{M|AxvYi|yl`1ug9@7IuXyy(tFM|4vt*CE%!PYAfoX@EC_--)3uZKfBr# zdHmTlb5@eAXZNCT?n-RU|_Dv<2BMMm9%h1t2s zWP&g9q?Pob4GD`We2!n<*;AjS5cuKf+d!~;oqn_6YT}$R1^NoT~q48`2ll8=>cE- zGEeD>zl3PxfnGHf;Lw7!g{in<~wYU&!KoQqVdMLA>h9)I?caU10>$-2@@lQ z$Ri1zvL}i@qO=edEa4TE&FjZvyXA*9jH+U!LcGhzn=6=5%pQnuv4I1NxDtCYPl}z3 zJPSu)N~10x1`oT+h)m`4vCBA^$S`@=`&!HsSi)_vXptt*r>VXWi4Gs?Wrt3$zC_F+ zRT!|GJ*|sGwGuLz9&nhIOG%~+B#mncJ8!;{(rP*HD?~Y}x11>as$-yt zTXo${s8WE^t)sT|kR1k#{?k!9YA)aDBj<53OfoVWm?*a1wd>;PeHo7MTOnnT%;G== zY1aB5n_Zz|W?VzG{^|+Gd;4R^1D53vz#m1Xvm%OW+8{YuNE_o9mRqdz&E2rf%hkhuX@;(|WkIY7!UF|j-YKOH` z128b^f#!A`wkQv#-e*OBWrie0331%DQGRQ3btmNyS51Ci){+D zQirLy-G{Z%m$zg$%~*`9*><#63*2WaL*kallm=P-M0EeWOUoqu?_8;tI36b>k@|A& z-d#Vj9xyItP6w_S8dI^nBBwJd>ykSGh4WqaB%2g;c~X9{zO4bV)3T>>?Y-T}Dj6 zANY`fB3#9k%7bkIRSK_=jmpUkln+NW>7FRGQnW=?nDu@7m~>93tzo5Lk(Ww2 zmzb#rbjt8owj^&99&G7feiiJO%f2e6RIV!LPp(p*&!o@P@^l@iCkat>JtQnkf1ytf z2CjKEv)ci!4IXpCB7sI~q3%zfdN}+XJ&ZN9$x2M0Mv^G(@U92v_0%Sq*Y1mNoUJj7 z)EZ|#NogSLN`VuDZF*ePWUtJw4kRgULVHHqBp5}DKi-m$CVR|v9?y_}7GU(PNpm*= z^HfrZ=p>-np?2#i@iH+NQFD^hO4W())Zk>BC$X8)lZV8r+&dn(W4f75t;hzvL0JpA zFEi~_JfFF=OAEU;wSln~ebt-9C_iEW#zoH{&kaCbyD7ua8&Kt>mwt!2X)MuqBB(Xv z(03Fzz<_fTN-NnC2xVtmt<`{>Jef@GVa$4-{cQNnHMak9%6~NE&d$k8VWr?Y;j_+^ z9xujh__MV8*pAl#gj#m!G=2pTT42 z`)qpI;S3@E0g@_51-aT$T7Plux(Egyjqh~GlII6Fd^Q*K6Sz-P6K~Y`Y>G+A@3};3 zY(^D8T-b@chG$cd)Q*Cu`_&|-Sl8&9LRA0yY~5nHwK-fg*}|K#tE%rk zw_qgdAk+>bumPv6*$yvI0LSb^qQD~zO*^my6N_oGz@Z* z#a`&m-f1g!?=R@IIZKjT$^Q@XcESz<+hFP{KxT^WAT`-BJp(5cQiI1~q-i`;1-0<1 z#t~D-6_k$dwbQIW{?BKF zcVyqILGeP_ERu$To9y@Y2{gtKkmX#Cy42Q*l1(SOnL33yGjqvuJEO)?43t`?g5d}~ zpX)?jo%o;L=e}E*af~YGC@h!d^}JmcjA`o(JT)_@+^nt5yVWe3AK#j5eQUW2@#!Qy z1q1(%Hj|sO$LfR1N(cO5t9tG^E`{xB&x~}5(aI5PRYVzezuCC3QBQQaA1u$S{eB*xkQL^J_xPq+u#` z$U)|~++NcAO=l5jRRNb?{n!gP9$t{rG?Hs;>1 zjPScG)>!cAZeN*aHMHa@mWU9C1JOmF2=R+U(3yutztPc6_C|4K&c;9aCa(bYSft@u zgV$VtuX({M)TEN+b|n#R&tv_=V7kpCuf<2J3`xvZwaq?EwyfKujK#(-bDOpBQm*HS z+-_4vQ3l3X{;K4u9^GXD64MyMnJdvM-<93akD6daJ=}1ga8rqPB@q1BnlCS4!?5T| zQOl!J^&T_YVcjA%qHa^{NK|$iE;QDvQMBpGOWfD=H%w?!v>a8atHkcD+ddUEdyDyn zMGng1oD_zmaqXM5RiXwSF8hDC%4Qc_?)?Qk zyB#-)@A%xtjAw{<-NF;+ayNC_*3uO?5YaMn;}gB-sf}WjRPfMx^1^MFR*egK>FVpt z>@Btt0(Gwe&VQ%re`V;pK3G4Y8*<)0gOMMJW~_;*D$lVKrw8mR>tjyX1s}nklOGk7 zCFNeBkJ$uIUG`scJH(7z!?U5-39UVhFpYr;C;vm1HENe<#Cv}85C@o zjhANfE`Ij1mquQ%GO;sP>%+6T+nLwFP2l~iwF<6+U~uTY{@>OZaWvppI#cu zy9(;Fmzyb8{n4lircBmI0^xA4#Zu5QU_ctasDfwiuygT2?38=#H1pT#o@w>p@>5zm zPN1uCBf@0WGQIG7Z9)?2BkheVRj2DU7}=W0W>>A<@EZ5eSi7O*Zc`vruVvn?0ij_t zPs#WVo~^<7er`R}!PHhyZddAt09lrkXEZHs)!dgQ)|fLnbw}f-f5!GA2FJzVw3GAO zQv2>LT)j0p;bfJjqgI2vh}(Ylk8)eYU~LTnpWARA*nujy4#d~YpkP>*7U3&WbDxYS zrJQxZO7}uv1d!5GixTZU-B!t5;aX7G*|ym+)U7?lz|BV5Rku|@QLk2>Nil64T3oVlX&C1NsiGrU-3kbGAW%Ghou zwaEUp(mFnDa3`8_?ANcLs7sWvib9PQIk+Nub$udySuI;@*WcW$Bd09Zh1qnbj4d#r7wp`%YUV)+ldz zYw~5FVfP8wZCHuyB+IpB_WT-7 zH7kca4C+XK>MO|j#7Y&0vnj?IqUfAm4A$$R;3Au~N9CHs;;E6?y@-Lm_STpVRy+-= z?O`})dg4dwheQ!M(PIB{`r%z^b*+Eff2^NeE8+8g3V~6hW~ou`BkyK#PvW zn#-nNU1sSz+>5)g(r1>z5w7)4l05V~`i|qs``@sz@S>I(wuhRy=Mz22k*KLX!i-P1 z$1?wSr>-W*p!*n@XIPP-x?3#h3$oV>C<%BwVO}Z>Zqv@`S_}fbyvjFL8OU@|*i3c9 zgtr8}!}*@G%=FKXNr6pBu@7#u?g}H-QW(1Lx)5?cnB`P*bOr79CfX5KD~Mnz zrPlz~8#t;fsF)P@+j5cPm4F(0Rt&1YAw0WewZh8*bn%?#Qmop{qQWkHi&?Sqj~7dP ziWo2Wn%o|u#tsH;ZV|4^+XsWgyqanE+Y9fxK^J0i*3?SGrD{#o@mMPVX; z3u*}Og1W%Fx3*6IVi$QLm2Ns#Ue6Pxqing@5=YP((G){i~NFw`TFYtkA!TDVlu1%uCSR8B)+0un}b49Ck zC8NmhFA*d;E6xq}*nv2_&W+wa(O?yF7D9E%P0A%$?SfyX`m;w$5$>wQB;vVGRMi8s zPhH_rZC`iK zP6LW-)y9~5d8>lGqXv&67Y!5V>Pl1}{c{Mc}RoE9(+S)Fjwp04o8*b(n^1dT=F{ZC< zMfnn*BPaM9Lvr*z3wP`LnXSlM_5$zKE{_Rx7sHO2gC5@+__YJg1Qy1MXvM${mL-t%2#sOxh{wu>AXmC7DQ?T zrFK*^tYt>knl68hWeURKl9_utrK|8~((zb0a=GsEC5>xrJIvpV$=q7qjhz0Lgd5}M za_@!y#pxt&{q`ZL52^J1YP3$apIK+VPWQy<;=O)SaVxXYkM2*iNtA zZa2>`h`q6LNBv9o&aGS8WjuBCU9<0E7kI`%7n z*X7`Vt94PSe}R1Ee@RH z=WsJZt@DQF^tHpg*UzbE)y@DD5!gI3Z*^eYL=hd^jljo1V*fb)#t2Epp1acho1E8A zRbtIg@8X9Eqoe-`;V>T0ulXb&j>$bqQm}R1R`!$bTM2t!cLkg)f2KG^?9p$O?^#a6 z8@z?-2VgMGv81Pys6U`VQ*WR_Rll4}TlND(xSgR5`#Ia^R2_-|hI8BV+r-PH$nbtSS5U;zh>#aMpRU~PA2)yYMu%t2 zEzjN>bmJ$k^6r;@wT~?Gj=m*O?`s@waZS3eB<_h1Yi75uS~+dz%+`*l9-qA_9NRok ze(5iJF1u~jv~y;(43Qt3yRjm+Y5tBKZ*zke&ullk4c=KDa~5ZPdfuh%o-hn^FOW-ZXQec-NMXR2}^m+sTQ{!f)-abloM{b%)5@I0i3P}7R9XgPm3?`*Q<3gND?@|t3&2G^ZcZ~< zavH>Py2HFW&Psftil#&__(1r=TfVzCEHT?k>SCi!SFnzIR?J_R-(k{o*RzBxU(kN$ zl>5zP_2mnE{w2vXxGgKrZyq&rP_tQFxaQ`ol~pL!6+}t!Iyk@vczd6jM z+;HwjMo*FHI#_~%ZRT|jf&UsdDqpYKcwNsaIReX>T&Y~7tRz=uPr;VlOICAdae1(E zCeYZA*CD=}us;LsCKw`_GOoB+^Wd^7^6djVPSzrh$VT@Tu=f^_#(NssYj3y+IBfa4=t$y{R`gm*K4>LW<@p>@GB`44xfgTtgjJZ&|21+>q~?ov_b7MVyzy_BUgEIbf1&c>CM>;%q@5FG^xKos0kT*|lAK$?cVisz+tEmmB zWHgm;+Mhki5i4C*tPtabm_6(qk5IF29oP{c_Y(Ua=p~e^xQeVuAvz3E*=EZ05~2Mt z!f*IB`;q1hHWwCp;0H5sB7M+?h|hcPj7f_;zPrvkW68CaXtZja*-~2@Ii2rX_OopZ zSKM&>xgDd%wGJ9{y1KqQ7!UV|fR@L`Y|X3=4Cc%>MC}#9mp8;>DPF)8%3ENCT4O1J z;^zuFc&At4&2)P^^)!!~w&-;bep%I|sx^vXEu3YtfE~35cWR~6v}i@(8jC$`u;^(| zfL?9!wBdCwEgIB~73%u#AQUJji^tee%-&br{ivVR4(S|1%KXZw@aoyRmEEz8+^#Iz zrpzICqyM^%-O3&mUSsl^QFL~BKx2JWwwwB5a)OR>Z1?=x=82 z_bxgAQjd8|bG+7)_t5G^>$r=BL|aGW(B!z%flcw&<{EL~vP%{ZoSv6AYskebQ1}0x zf1bSq=EpNNTJJZb^?nPPycJ5ih^^>3o>BliKK}mJ?^xvk|2#2@$`OVf{Y06?eL?f3 zJDHS)q0JR32S#6FrkcxQhB}V5T#ve2oXLbAefRxnyRVIE8&!4>xmFg-ru(gGsS)E4 z&co6;1Y4%y*t=ya#TneR9-zMV)B{As2sR_Mg%$JH>Ip5;a$@f?Kt-6VAd%gdRW?o- zvZ&(T-TR(4)}A}CZbaT$ScdJs;k8Rg59fl1Uma6lUlLRr)cleJOZXv*OIWV%yVAf~ zP1(IxW|*NM(zF-aIq<%NB;*Q}vQmUO`VuiKzuU9$lqY||efsIYe;Ci|iJtzh%$`ji z`BkD5h`Z$nFSx_TIf}{_u5WB-0JL#?Dk@?yHkfd#TkWQ9)RN^Vr8` zLCzbDNp)9)8ZIQ_szipH?6s#EjWt8!Kl8*I?4G+WQ^uSdk2RL*HnbQ|AHQ%3OUL^^ z>>X?z^s2y|t!Y)(g!@bAR+bDLtI!L>H>X1mj4?Jm2pkw=@Glsd5psZhWg**wiIS;H zlym>Heu1Cp?uVP+$zAR{@&VPUe+HkkiejP$m z%jd2i+CI<8vlrc!dhw=IWvU%AkuQ9;|j! zOxzc5yc66^<;}->PPdZH8- zk_%B(B}W|fpltPm4f43DG6a1sSz@wIPhnO?psNt0IaGeHhMJEUz%KCHo!N|PZ#MJ5 zW&zl2KsM{UF#KMsa=~S%nJ6bBMx?Dy3#@Yo9^r$u+l9#}uZt$5QoO;ZrlRx~tHbT} z<))&rD3jtu9cFJ`j+D{l(E!>>9%^Q{OKsfnIKW*A(dZSriU2Hy6oyk-~iZ1x_Q*6;{L@|6u@5D5zGm(fuCL49C7U_9KCe%99LfJ__j+t zU2O0lcinpr`6~Qon7j6s^I|imZMg8P={L=~=)&c5#b53Ekgt60HHzV{;?C(p6nDtB`;hX_%sBW3x)M4LdtYpz zUI=)O>>!-lAX2R6bN4$8>JIi92S>fbULzq}R98_O%RpT_9gCY8e1frfvSaaTW9ekA zHYbg@jzg40c^rb&u&&RD9uf$D@#DgMlW*xgs-3l%}o)0Y`WyqWITcOjZk2EVTdAUzI!X?zurg+fdS|Aa-4L)I_a+E^yTr`)w8f0oc!!zp zQIMSA6(JHH#yYtuLNYJV&M+iQ4=!F1s6&FT@Hp-zE?C@sWGqP)D-)-%w&L!(Hs#Vk zlX0YJsUS$(l-jM`|J+IzDYvsO_Bs~z`#NP9e-`-J1xzjTbM=`-ofVWv8WcAz!*v#E z;YM9%lYQE!)5Bt-@nDqh*U}(qGF9M=J8q*0-MB&0YRiB*T5UZY1$qou=oXlKOx>1+ zA;dFPIGkyn)8i}4c^-k95Jx1(9F@t{<&qPlAZjKe2e1A=1%!x;;#e?5Wec?? z8X}@DME88F<1L|9ydf7H7!&$>N5;-}r40r}$?Gc&oLdt3UuxAk}v2c4Z% zr=Ai`=48+0Gcu-*rJ9tE*Nq#FZ&G7a`NHHVh(3C zsZqx^A;%fWk&%fY>q{x_O8~oCEt^Y6#bDGP`lj{^8H`q^ z%a<4EQ%+&DUB9kij6T;X1CPr_2u5EMH>@gHbrqTU{tt~g2)WgpZo6sDdj;>YcVEx0 zS8h=bW?^#o5?=9LpV436fnSPV=_kiUCG6iT?FCaPd1Bb&kmH>Ge*-Lph#VxsOuIAv zgaR!~1-N-$wmIqc^4@@kq#rN1FUTP&>(>v?MLrbI?(GP3%+*J~<}cDUQyD8peM)y# zJ2D7)dE)-e(-3o|DVqzls=2^STg*t@NW$i+;Gq5 z-H{6^rd{=A2x)f@=r#@34g}`kDzxEZV)(fG$lKj}mBVjwzeS~&nQJ4RYsqou8jNNe zMgRZLm7pm<@nlIO)u|_(FZ-K}vdzqSBAs*Dapqhe&1TB}7xOs^~Z(FG@7B;LL;vPQpRO*e$_Ss=$!uplax#N|S;jlmd(ofvQauZfasR8LHq z3p8VUVrm$u6(-C;=8xPv{BnG@hYPGWeNHB*_Ppl-TC@-K4Mvp$s|55mo@iXO2J#(w}{GKXN&1uU)QH7}(ur~`)k)<1b z`kGu06e`PkNN5dhxp4B>63pztc(ts&CI@n~?-fnf@Bx^Zz?XuKl(n#4Ma!Wn03Y_b zxztBns8Js6dZ-BNoMJH{YR|T|BQpVl*cgtkZz(SrpCGmSCTxz+y!eh%hi81V^7e1r zm()yID;Py~v2XTiE1xN!Q0X5rVD$?X6LflE+AF7hxRKmMT*TEBzwS@Nw=Y&MQ2wTT zTU2xvTX1nX%iYr4wC7o6sq&cLx90_NYpC5rVtZdl|D%(>*^+aj; zv5dYRk$pCgfc{Y^0MzcRs;(}g<_ zB$HEidyY3%G*$h~m7P!R(=oFZntI|-b-wK|-~8jxH_+dF3;v_?O^G&VQe~+pPo1yV zcf9##%v68;eIaX{j-B_+j6veGc^j!L`|&w-{YtYlDtY6w3lyqPtr;FmhQNP`44p1S zyu~7nNrTDlpA@Q^?T-@oYRdIkIxN$|8sI<0nbZ|92FBVi(xmO=NR#&eN2JLB3*wuL zHu=65s^(x$a{tPSS!;oT-%!ka53@>(5Y5>LvD0LsIR}dG=TX7)x&4`@jy~Y|zCOs< zy}|R-7JBrU)h8dAES}y^KEM>M$mh13)j}N5^2&~<%c_*ayI*Oa@yfhgy?LXSz9LA< zX9u2n^N-3KF?T%(%IfZpO~m;6$gITn=kUFimXF1E#q0Pint=?5Zur z{tXt#>w)q&9~yYI|I~u1@uBk_uHO{6uD3gz<3lei_?RE`*FQda_ocgsOjf?${M0>_ zMRyiN=8vCA?3?GjHG2BI`))Y=GOo2&nCC*_kF=*Y3jPIs+FYuLQIRH6cA_FFcXCxl zc1uL6>_KB3fZes3ATeLXvCcYzS;$4Luh#m1)mNJ?DlMzQqOvz1pR8lAt&7sMJXZwd z3(y{}lQq^h9Mf2ffszWh=uY;4E#6-`-=Y^(sKwQneqpc6;4IESn}k@4ix#Osi(QW^ z#vvE9P>4n8#<7zTTallq+9;Ktox*37o&%U?egZw~`=e*r4Yl z2mHTNlxDJi0!a_3rtf4#<)&oyib~hb!VyZ>2HZ#gqv_h%A6=XNqjc>=Iju!|0F-S& z%5K-&>!Pvve?VF@_Vg#vcT|s=Jt>W^%!vb$#?Ol;O5+aZ`yWkXDH+AR3+c945~9PX zNrizphUkb=(?}wnO=`?I_`jkmfCH&$TL;~2=}ciSKAbf=D1<~+9%V#NN4}iIqJBqG zis6FziHzwd)cgrNYVRlRJUO@SIVSc*p8ZX z#{b?jiE0X{PE|n1@aCs36V=!H2^^@)xL_xx`YqHYQ)#_WY@@VJFsCr@{&-q9Mmt;8 z+VkLOXH=^_5ABcS!~RpmQr+|r_)A)Vvbq}_az(g1=W966#V?f2kgH3Q->|H zJx~0fKRkX-sN?68?!B$r-@Pw%+NzOjPAO^JzGU95;r5-ET)W|~j>eI(Ya#>gKmBrc z4o9Yn`LW64){+6gczEoE%g-Nc7jJuF(7=I%lpj7`^F&u-z`E$z8IwmZNZmMj>iGD9 zl|&SZuD<0_)khz%4CBrLI$^E-8m-n|uGQMf32N=xr@b`)Q|MJ%cb%}bj+mY)14fL! zi|ggN#dQ^Xxc}W|#}?OB0OE%W`fP-z*u#%L*0;8^uz zSmU#WKQUgWh$)3tu42$wGPoH9u(*8&H=^SD5IF$?fijdBi(AEnF*qc9GJ}Zbr8u`e z!oU)Z^;mDD5Vwy$wl#6ya8R_qxa}GKID!R%a*nLLca?O$O5m$DPO5V91`xOohxvvv%w(c;7S6bYDn?$C$}6 zR70@4UwD(&bSE5x$@=49SWi$~cg0PZ?5p0lXWs&lW(v9bW+tD9n4WI|4g1#C$&ab6 z_qunl+Pb=B+DGuFb?SB#xGn#ZCVc9B9~QnirmjwYOkMq$Irm#v$9ZRj567`8D_wLf zUuOF>a4dfi(ppV@PS0rQPIN3 zsSHQJb*ZAqYCNXkO3tR6s-#q%Ou^SFaAJNSTp6jV&H^X)ey?UyxH@fWxD7_l&4Z!P zg%foI(r_n}bAXjq&H)Kk#Q~Uz;CsZS$X6ch%V|S1H@y zoTB`X@-Feee%xDn|t;J_FZywjH7T-~I@bP{MY#s4~`*RCy zuAf|BLveGeh^K7gm|DCBcA6?hsMGD~ul=V|nA5yr3+8U{HKJvwNT8@Kg-TrLX&D@o0?=_w0FNDwf*5P~2v!@RKzc5d#W=}ZLH&l<{cvEFps7g7*(<6QB z@4e>QQ-A;8;AuvkpK!jR9xvj!bG|E^a?$23fo|wid(WBokF34_S4eVf?fnD<>SxiP zkXGLQQXt2r+H<*DfTwhOM%aC9%{`ZL$E&&juZWLoa!*JRF_V%wE)|~0V2hl??Bt%~ zAJhDX4NNcee7d<)kDH?XQlX!YIkQ^;0@}Y7*VI;H|jH2 zOn8Ocf#6qqgVEHxjST@8(6_*@y>m*rSZTKu-V?5bKX%qHf2qvgwTr9RwTn^f{TFd> z@Goff{+lxOehpz*4sx{KpDFfd(5kb=ek(2ZXD(-o{jUG}V!w>Feta+vJ``9j_5Q)DEf(N?{p3Pc^(B@g zPAd58F2=_^+IPpa*lWi$1s>=riURLD&FWVEoITPRz^0cDEI0u^s^&5;{rrTEvxGVkh+QtLE}h4Qd3uC5mNukAEtfe z>iqY>{PP(3Wb^l*eE#|Wf%((@;5d7?HhbIvDx2`}2eeLp5;|aqK);08ry9>=NWp#6 z(<7kL;|rii0rS+!=uvobdKCQ!=#d6_@K8K7v>HH?POI0KAHYH$C5rlB*%K2bV{Q8- z%zz#%d#s%JvauD?W*@{>|EINck8SI!~9cx;~SeJ;b0#Z$?7|)d;AyL7`q%t%FmFT*MZ|(5MGSUdOUFiht7}X>y zjZGA>(m#q+O{$=EIKOj_ukF}Q+O3gtY{$8;-}#;Q?|i;cR&?%k9qR;OkQFv9KW-vI zMzyip)-+ZwV%Xusx5-|_VX(z4PpITVVf`koMH`p8#`2XKm0DBv*s%tbTZ_w8fQzg` zuGMXFZQCu7Yl(+9!q@Uq;^lQIw({Q6n0@)p;-qkp=!A80S1Wk`aHCdx3H1-HpkxxB zWEuD`0xSXFa-w>dvn`rjlB#R=0oo}>b#!1|Z{FzxOmX@I1ikRHeXTrq;> ziC`W6FDe26v&ixYw^>2FAXD&vXBBIw!50)7DKZ5+0)pG1Gr78!TKqrhVq~|eXC}`) zp8fjW&(0ZT`2Xid0sh}T^6=1PsyKPN%Tz= zcKijQnCPa}>H?o{3gne01JOPnLrGSvZN|D1-FENBW9myqoPmGj{ zk=|2GC40+o@2MTXcySlW3*bu+qykKehmuU(3WPO0-cYPPxibErZY^sKR_vvt^M7)M z)_~){reQStj=BF%{(RL2nj6V`rw31+yWaQkcY1g9evleZ?|;3A-;4%O1Z{M?*ru8$ z1&tx`?-yi;I43**{bxG<*t>jL0?mOAJo?zcJxR%zP$j}-Pm4kN(zzznO5=}%Mq$9) z4wf2#IoaV1?0zSfdiCnPm6pn2L-dmcf&NTsL4dxa9j`;X+>1L8rapc2!6QHG4d3`%eCyO_zq={9u`vDMk5XG+c_eq_ z?U6?x`gy0^HT>v9&&Fl`+i0g09vt8DJ>H`ugc3W)w%uR6cK%y0oFucQJI1z6prN6^ zy#HH${;wC0=(BB4{`45O7mdl1ITs#zXx#T}qOyR}?lCO|mlQAVbGNJ=qUF1V5G~6A z-Hj#%%2b_Ttx`Fv4A}D6duA#^wnT&0{0Q}l7lj?tKR73sCfc@=S>PGW#{~^wL3DFY ze3wlnG&;#AyLm=^<%!LU<#8?Iu8HpnyQRyx?cKnEik6H=u&UgQN3eoIcBtaI0u&r! z1RH>JtIbq^K#)tQ&pPADYxO3Ku2;_wRWjnZY6RTJ`0gA(Cw^H-;O~MfxpNh?xpOID zD#g#iA^4cH{G2qf<}qip;$YT3WgQ?sO^iiJQw$oRc+ZWtA37aEz3n->k60_9|O2i}2i{s0k5v{d=mfPcF{e^Xer* z(Ps8smMD)fiN>4@+>@0B=SazFCVA#XV!M~CELgH5zkFgj$-9tFuK)Rp>$~jh13Igp zBHx`zLfk|v%`Ullmi-2^1&Fe5Ry zr46GXiR6z0P`M?bat91TIJHTpxvRR(nsh7y;w0x^k^mOT_d1{-OHvt9&`k(L{rF>u zq+yb_K}zA04$^aR_@ljS_ag42I{bl9xGh?ZgCr~fE{>nQYsAtdHblWrxB*m5>GVlS z#k7OMn%Vj$i5uZZ(L`)E`pwv^ltHnR>nDwDYNCJ7(U^p0V#S}t(BrY~_Q|ou9 zJ=sT$!ihAGE+78!;ouddX`bg*-=CfilQ%41VuD-r#ADz-C*@xoDfupG=lFPWBRW;QgV&2sAoRl<|3pK4@eJC6im!stLa^DeI1FuLR_oB< z{r}EDED0{_7m2Q}&Rt+@TfvYHYg?eMn-_iINFumGVAn~&X-r@j%o@T)NXKt=mGCh^ z8C5Bdt?Mc8R;Y)4ObK2~4}l1_H{gc}ib$Bq@!qs9FHkJMu<%s$sbKfJFAmR+Zc6XE zBV^4FI)9}kIfvqz8w}?>0I7!Y|}YryEkiXw?dg%$ynhC z{)1b(l=mx64_Pu?(~fHx(H8W*9Y!oTy$Z1+w~D=RMcYWr80{iroHdfhRGm`*d&!?7 z_R4T3(0hqKXE2l*FcVD8d0zCjrLzX}xIAhN_v2E{RGo8iBj?6#N3I)p&|w7giIuj? z;I|nSggGq}zcp1@U9k-Ow#%@CAy;)rU~xy6@JVvLH|?YNn0z=Cg*q5gd~iOU>BS%W zA%z&f7P}^N66OI3gt7mSXvtt-~R&_&_z@L0C?JCU}Rum0OBZBrAhJpHeVU! zIT%3T+`<)$VDx{be==+@*y|XW7&w3!2mm?B3@88q0C?JCU}Rw6NdLQ*fq}j5ztX=w zY%ds67(^M6!CL^PwFl1t0C?JMl3hqtQ51#OIeTA=k|HAnEu+9NL9KxyNn*$*%7_#s zE7T@!{7HH!Gt#m@%!&|%D1ylFEf6wAjcQ8mLB)~OP%#QhB0?pbs1WPrwC|WOmIpr8 z`P-L!*4p|D+>oJKF*~AWvDa!C4xNfNW+YP=a;%jY=i}I_QqZRN;)YH)_h}g(&^r{X z>!v~P#!)pPHTAOJg>%%U5=CPKw~!>12$e#=kSUa_Q8?6VGf4SJqaK{4GP&o1FKivW zyvO)C1x|M26lcLnBf_YxFQJed;Z)PuPR|g}gpG6uPOBE1_)+k@ly{2OqKprVEGwEA%G}d#e5bo`P$$+<1$K$1Qx*1U z5AtNbT!>OVh*y1PJ)K94J+~gqTRRagq*15w+cmma>T{5wy|^svT=JlX$I(b0GbXf< z+x!xb)6^5h=~UCg9-IhzKP2ZbP(YuN$ZrtIi*ZD?nyWh-m2f;N90-~~q5;>DB=8zs7v<||r3g8nul-Kz5N%(X*+I6An(5KLpqydQSA8n1fiE*dDQG;tb+);&&uwNUBIal8TTzC*3C_C6gg@M%F?$M@~m>oxFqm z2ZdD%e-x)FX(+8xx}zMW+@pL+g-K zmNtX7jdqkyicXu(DP1ewKHYzMDthbmZt4BeH_|WBzh}T^u+PxX$iirY(HCPY<2K_j zCN3uHOf^gom`RuwnO!h9F)uOSWd6&-&Ek?}h2=Y|46A3>Z8jV>8a5|vt!y{h>Dg_v z=dmxaf8x;NXyN$E>4_#{Xm$PWjM3Caqp2$~eMCg@Vow_vH@sNh+_ zcS86=!hoif zCX%FztA2n>@t^c7T>Am;J()}&>BbCq=G@1eoO=hrhL(Yb_?zF8Sc0<>B{+wzDbDj0 z%xbv;y)$eptg-4&;S4jj!r3{T}myt zOSsi43a_H0y(zo~UHew}6PuZm!e1~lJL9%*`H?T`zUR1J=sKbinU4cobX}|Gd(jw| z@Zlk1s}4+_CXvn96J8NuK4F;;*b&RFmFd86aeat49(`kmRI-s3S z@9eX>%_@r)(FE%9jzb2I3@(u%Pslyi5|P)G*&)vW4~+TfEhy4cUOE$ZGvr_TvY(KQ z12V-b6CJVgi1AoOlQ}!3WTdnOQcGWUf6nQctV_J~oiZbf(}?F}$T>#5d4dYpi(EQ+ zW?VY!1M03&`8;RHND~eAaDf)Cx$h~?|0mgyZ<_FR8M8RM$%|JdM~D9&(Q7gO%v0b< z<_@W}hGV(cX7Be0MqGFr0C?JM&_`@jK@fo9e-g(|oZfq-cfV&lg>GY;-g_sI#C8Zt za4-;BK=djQ5;qX-0*GD(0uF%aO%wqK&UA4IT*2^`8!N3o&F;)-G(^F#_QY%@90z@iCNP3}*x*8O3PE zFqUzQX95$M#AK#0m1#_81~Zw(Z00bRc~pyqBWz$J*V)GhHnEHC?B^IqC6;ZxV?BE% zPOR*dc(!nh_maQ?j`M?desY+T+~+Q*n9l-sNA~rQTJG_Hr##{@Pxwe3FL=gtPUGi0 zdw9)DUJ>APWRF{^C&)q;(Lf^yX<{+WglHkm5|*-zPb_Bzt*m4c|K{tFLZznEm3E~= z=~R|0E0mSWfBjsBn(C`**1DiS9ICCUZV4Dqy3f$*GkL?IraysK&Eaz!b41jYcBSJt zXyd1|0C?Ih&7ls$FcgL1cHO#ej07=DQUvN^cmm8iGeb~q;5o2hknk+0D8kE)8*<(G z%=tfhen3Cf1b2*C>sQL7rMST=P3TWN?r9B6m#AQTfT01U4{zfO`lDi$TY+U%XF<)d z2Z3YMdqKl+J3-56NU&hEa;?!xaOuNWb+=g+l+AdhA)%)q10XbS0003}d-x&%0MM=U Ax&QzG literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-Light-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Light-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..c414478ca52ec23f6a0155331ba2788c25e7896e GIT binary patch literal 25204 zcmY&<18^=)(C!=S#I|i)C$??dws~UP_K9uV&WUY1x%vM8R^7Tg_0)7f+ubv@T{}Hf zwd*D)DhdDs{9K040ObGrR?z>||C9g!OH@=@1^@sm`(bhZgWimGhnTR4=npseqf`EX z2KWVNSxjC|>4&=k0Kj7a0JsY7R@!1QC6(U*0HptqjnEI|>)9F=<&@}|ez^a9Reqq~ zlKLKOWMg0t06_i32TJ_G9hiL4tdXlT0RRA9_hZBL9}w7~N=)s|Y<{?lADh1b0I-Qt zBYmlvfzyxgue=`{$o~rjGiwjiAMR()(pdmNQd&+1E0MX0fiVD}A^c;*_yaaz>dbrd zAM%HD|IzV(KmvyaE^cn){G)56{ls?$06^;KCgiTH?Tmixw154KJMn{cRQp?h8w2;B zadlRI{0RO7FceUxt$~fn5BKt8)BfX|<>hboYH#QC6GyM{hkN*m`6^1{acb{q^5bjp z-!mQf;RWr2|G~PM82-%J(E5i1{|_(xie5kc|72;Bw_xT+1`OUH(*4&@=a+T4uN&zb z>+A1;7@~qB=o{*r>KnVm4buC^MeLFeSP^7`(fou~Y6kt!(S5x*)x%m@1!cI0ePKSo zv7rfWr@NRS&>uQdpK(wbl!XqGC_Hp<(0uU(pM+}84?3`bA`%9%IFh2?!kAW76Q-5A zK$xln8tzgEA@wEf^!E8`(PBc8rkj2{ll}Rh(-r4-*YgynN5Y*;yXSaEz4f5DNRrT> zNt6e7x9!{Bu13oH=x&5u!o#r2!;p#1)vpXlGK&)3MQ>WA;$;t|&Qr~!sR!LBRrg%4 z$A8=T=H})n>$BPU3_G^785V3~2hqBV748TMEK)8^on(iZJL!E`;I6$Te8gGyOfLuo^+^(Nb+YQs_+pgMU2R{AZ~2eg~)d404Z+#ohh1gH0_hPRI6S1?4zZd6=m z0UK2TPl@2^wTc@x(5*m6^Y?JP?-pC)=c1bIRI!~Y`}@ROE3J~Tnr@xig)NtqU3REd zjwmXj6n$m*EP^66q@q$lw6KlBg5+<34vg=SIV4ys3mqZJWo~TGYe+nP0mp6bHGNJ^ zuIub{PrSmLnXGNUrUPnw)poVjrJWs2BiTjbjcw+uWHOp08rUqBNiR*+M~{hGG3WxM zvUM$%4BmM+!CMAo-EVGp_WMSHVj=LY{$I<*Wy#VWIgxhN4df7;Zw9UjuH3OyhvFSZ zEwJk!UsE`&Qsk^uo~BCfxbUvOADbrU&ixT!s@@XyfKrC26x^~)@wjq**3{4|@qJS3 zh#~41q*&F}7zSLhWN3^cOge4V+Q_UT@fMV27LYS)$RCqTPdIsN(K!_8E!fNoMVqo> zxKLI_pYlHvb{3C#@zx3&oUzD#Qe>Lwqou1;vgcwZL{CBKGc$bNaK8*<*MYS3tu9z| zdW55ceF}=EUf^?{dRaaW>Ylb^tX_?{_iQ)}r#)NSPT#MrXRIq*e8kkRXW}Y3HeCIm z#W&RM%;y{nnAHMHg%KofyjQ&+-|$l}n|$a7wrJO%9AdtGe0krMvwbo~Pp$B}njLz4 zELS!@Q|jz4clRgR<5|Bi%c#*Nsw$Vj^su&DwgOCDEtcsa0QeD^F0A{a)N9yEHjFQvDHG2v}+xNUh zi}$g08e?vNfyq3w8lBw2v00erT+UE#ntMKv$e~(ZTU*aBe6_Cj?r^=xXM0=shF|SN z&RMXN%8%M6?&^3J=XsaCiFG<6$3NuxIGY}4TLpdH}hSwhNE(X z-uti4tSa)^9+li%Pj#k5?j5BDYFc!k_v*dziDK~s&U)!h9~;_whHnYi7p-iWZWo2T z3=ru*QtJn0vn7zmmB-yPe;lBF7J#=1-{lN+_6Uwx5yW>U9x|Ic{`p%+_4aK|!(1V4qL}M7foR-4+X)?Sf2E zaEDP%w}@x7q|y?YSS~nx?~Tn7qG^nE%MavOCTxmuzu=j&ngi070d`j=WqQve<`hv4 zJjf~s6K+6Au}nm3@^na0jQRn8B7LbC^bIeFs3J`}gDs$~6;O)XyFj&(`0QowEtTD(AW`Qr7t9)Ps_>hjMX(GU*2G>IjX-0mjV@29q7A zlND%?ge^KvED)nayHdW~SDsR-Kp@5*#IprlAlsLwbfa9VKS_zP@|kA^gG(%18?FnE zSgP5rR6SdcCfZQ!4)-wb;^`uvTycwEhUVls)CecshyLNU8^nXR<-j}sQvc8}krPH)!#$M2j=nxnB*Vw|3?RW^a!gtdG?G!kU4RpfVG3yQw|qv(E*J=M5Psay=ngD^ zRR{e1e`1(Un=KmrR6c|3AB5-aG_YV=DibtJl|4 z0J=!5!p) zG4Pe<7rK8VP!Mn&DZrSK(VppsX_4{6?3ZbkKG(3bSb&tEu%M)%te}LTjG$hhB%!WB zbiq7G=r+ywchFSeiM+mgF9zsRUmigoK|TScV z|8S2TUY580L0=yP1m1;D-FaUL1T}smUtOo4@Gj@MQ<;?naFn&cuP2%~Q=krb86Xf4 zYHV%@a&&fhdi);$ytumXvb5IVqNK*Cs;thyf`t0Wij4Nql9c9{nw;){{P@2S<>_r9 z#mP<4)!AJ^g^3M+Dlf{6Yjgbs^yTfv&DGt7jg_6Ht+l;>8!9phiJ7@wFKWP~ z&}lRqzK$pG{Ixpn?s|u3NGd8e+pg8rPI|8*3d^^G6QTqWG9_Jg z@o@Q7`NUF+=D+&-2(PHQZidI{`8k~3!ESeH%HbWgds;X`ugj#o-90k9yt2N{>@a@- zwP3~|oyqP;H*kwQBp|~o@C(pYA7%KH6;Lj)T7WsA0$>6#16Tko0agHOfDOPF;0e$H zxc@X8fFr;Z;0*`_gaY&ct^j9%5kMFW@xOfPD0Ikb2_kI4!h-}1t`cyrqco()4SVH- zK@BcN8ya(m970;KJI4QU0Kyjt5Qbf^#mvhu63+xDL>Kq|%}0pDY!zO>!p+^_G@Z^Y zH6CB#c4{@FbW9T0zXd+U_9-jg)Bt!cIzNw&Y7BzVv^M>D$KJnSz%%EPxGe#PymplE zm@?b}nvJDjb05&5?;rU0+!&243lEE2P^W~TN0|;%LD!g5?Gx~CflIrF3=BnSC3|%I zC5m4yEXS4;bxrYeok7fquVX9%N>(c2>k`$EQ?Rg{`0 zU)K9{>KjhRorb zdjup04`x8ns4sKo{Yx_7jfbX&#T*V)$dldkyP)YTys{IWubzCN=db2ZwtsfLK;Pd0 zc=z=o9|(OElcyK&g=SMXGbz`Fi4p*z#;OctzFRVxK2YETT7@%`Cpr*Jc%Tl@--$_6 z!9$**K%JS$Lmpn<+JnV+t*cI<(Hp<(cT@IE@Xb%GjIFT?(<84#ug|VscUy#!Tjrul zjsRu{6Rh3eT?J%}`yF*;Qn`1JLcP&u&_9+Cz*3GCrBOE9Cp zU83!0sbUZjb`JIuSEez^Rr^ zROJ_Xnb}P)WOz8{*md$J44NP;Y%}NTMR+VO5h02%gX#^>uyN&xaUj`BQdFFwPS*hY z3LoagI_vrp5DnnRNfdsI-x;6oizO+~*KJl;+957D3SE3q(#$9olrfrM@xm$~YiKI! z&Xs5c2-DG`PQUF`(WB11@z^gP&s*CsUs=rFiASu{e!6ex@S8bxBc0!NZ+$sn!7nUs}dznr@5)B-=$?qp!z zo*NI>CWdU6&&2pKL(IT@YIphzw4)6EHdS?%)%fFQ+2c&|TVxP9I5%9t&s*>f`C&(n z)O+#!UeNEwqIVJK8-u~5cP=q#`>pCb>C5k$zv;W62mTA9%?P?YSN3Y5bZ5rdXAs2) ztqsI)hi5j%zG21%k!+mZo^%tBZXJ=L6itL>ZWG~>f{2dWwJu3mM?iQ9d2XH-RzZy` z$CVOoXL_2D^)M*~tZM8qN09{>LW6D!?BohznMa_mpzbW6Md{Ok781WSv`H~9k{X6% zRkheN$+I#oM{B26QO1_FHgaKJWJqVGxCnd~#~<&uM6`d4YWz>+gt&+YyYYzi9!v^4 z*u6;1%gPM6W^9_}YN@8fK~n)b}s`Yqp%5U{s5u`fyXfnr={ zn_>-lOVFQ`otld?^YBQ2DB2zKa2I+CL?y|_EBmt5ll(n%0?XRC@$7jVK>I+k2WBl% z&{4kuCQeFQqa()!Kzj}QUl*MJGMOpxf||vl$&Hk%pak1Tzr|Jul`SoEX@w*Vg?&=8 zPRI)YN4URSV~`r+{tp$UqbG7GEc|U^|GhV#*YVyEw;PRSz17*tu8>$X9-H%n%g2or z_W78}MNlz!X;eP`FKk*k?7v?TduQ!!^3Bwu7rpRwFqogQPk$o9@@6E^<77 zZ4cf?@b&m@AI}_!E6-J&BHYj9aKz1i54hn z4{XZdL#2<9qJ^HI6s{oK8g2cPQQU$u`q`Scrx2ECi&|qfhc*k4z|u)Y6u4E(%dg6~ zc`GZR>r_;JZaI`BJyazy>}TEC`am6G_Cnl*C&rEQ6%he%?!lvRiv#l}7p)!;;O&rp zWEEIW4zrd_gl={??Pse+ZT=pf5XnY|KXH0VDHn>B_Btz6Vb^8fOVNGXN(rR=R8w{C z;M}z6AEmAwt8YIpooK+r@VefIP?KBT^lQT-`oznr-z}`{S!H^-i!HFi!`O`d=u6?g zRsBl#?Fg+s=c#@VU&FTlofJqZoLEbpHLsXavoTez-1@RN zE?$96H>P#~nB-zil2D~SwfmOzvxqSP_al1L&PGQ@Yxs6O6M&X*1 z=P`++-4s=$>RMB+LHOH-e_q6|mcA;+exQ{%kPHt*h9lJSw~{dN6SxXANWDUWgDQf3 z@PH6eo;=8*Ij@LIb&DYCfWjYC{)V(2D^(agO-`xb{Hg~5Z&UY&l!7K08toa2XB%qw zQYrCn4|Ed`+ILf+Mq=c@d;ma*b8v){@{!&gLvrL<^ zuKVc5+;wGI!$NM*o17qgE)W3HKva<;R*E=iXH@Q%IT819Y;|VGWBX9S-Qt>D%u@EhO1IkdL9;6#lfC93vFp4}PN)+$ zaQ&0Pt$q=1f?r6z1TMXY;{M_0WP$r&_MPeO)7W)MgNhfpOoA>YX#g+Mq$(Z(@Tfg8 zufX4!zi|PjEX@8&kiu#=g*XHrW*qr1sBY5rW=C#$k_&V|t=a~tO9-zj6)V-nhw|do z$SJ~5)^wf%K(w-X{uxMlXk<`-o~o+4uG*)pmaEDub-HifL^@xNJ|XndSUqkxpYb_v zIM=8yy)U}8p09J9R@yJ0@m9%r-ng*2N+TRQO??Z?WGiz;MUmwHZtcAes* z%Sg^sPz@9{Ke52`5}LG|a0D{=cmlWdb1GFf!+35!gB-Dlzp}2c(t9)E-Z-6Rl5okk zH`jf)ka)~O;Fvn4r6yKL9htb$n2I9et(25eV;PF}wgAZx;S}>!f#Cx3kRHO@k;;cJ zzc_mGFd#<1JVdw7YH|G>CWS#Ok-;8}omb*^t7+TIG4p$T=vEt}vY5f8=X2Q^`nwbv z*VK5cohm8FG2(N_{i#>W%x{9++XL;+l1+VdWIqQmB+z0r{xh|S;Dio3o8%+~aD^C; z#gv713WYIP@&-7Gxz9`hl%{CfLCm9GP?kXLQH(*ZU$gY<);0kJZIYliAFwwenQK|V`eG!9PG(nr zufAk-TRvLWC-nr1hbskifZKCn+n*f=?(_t19Sc>N0`xR)x{Ljn_IPr^>NKM|I$?$^ z0d-XSe027Tf+^7j&~RIHOEIO~eGa8@rV!3e?~ zK>wt+GIVMTUG+>7)>2oO#?qchaP=Bq*UMLOI*)5sQe`z-ANNOMytP~0mhODqXtW%( zzK%E>nzh**Op(_bciUoc7n5f?@2_fAEr+smwEXV6o@M{Je}A&;exDS2uMKZpuhw17 z@{Z~i0-+E5`pOs)sdm^BA@v~CCyKK{1t0_ENA&cLj^HPrOirG`FYh0eU-k z2q}O22Ue>Gh*PRq0>J!L!#{>!F=nM#=4chQ(`CxmVnu8E43dAX&XrzOeBYdtzgw5z zxn%O2-*1t2)OemmFKwhuuIKm+WO%T{%=n5*KG;3#Jyg1U(sOFCwx#Fxba`9|@4apb z^=va7=4^X^5O0Wa2h1rBUa^=etQ*VZVG`xj8k8Uj{VJpcQjF0=NeLDq?46mG$?UB` zc*?Wsb5iP^R2am|)1oy8%j|_-Htvi=g(c|(UKOw4l;(XyGfjZiEAcN+YM`{GiD=n1 zF`(D(U%KUozV27yj7#A&ynBtfM*33FN#{xb;JwcJ!a>83yHUt69H|V*liXa(v2=uBm z37t&N*7_h<>u*3SlK}YY%K3(=ytXbdy`56d*b$oJ+^<2C67w7BiY~$o$tFKbF3WEm zG-!tfdB_ttR?;eyQa1lD`34>Hm_km0>?!M2B89$WXhqb{i!znJ^8A#n$RX9%cKKhp zJ4(-lP!OXigW)rzA9L7te%`wmblqHv?ys$g_a!Ryq-*7orfz2?Ip zBa9$!q;4|YgDQT=k;!#wXw3v>I@o;eWxk*k4s6W?&MuE=%ifV1^ z)Rv7q;JM`$$aUiz9&4k6>1 zVuRUXUOe%BltXRyyc;ro!#GbcbL!&-~z!dqOXR&F--GOzxtnt1!mS*JE7R&zatA7xgYYY% z1uaf(rONihc8d2PyVgYF(*$L$a^~3Mi+i;xOn}fw;8TL%JqhO5*a0Y%w2MBSq&|Ot zIhe722M=^UvWmRAOZ}%&(6%+5%5T~vsFC!#R|BcT0kL%1ie=8JYvVcUxPiPwr*so| zTmAZB%1I+EvRb0ps|qB?a2oHUpb%uVYxf>qzs%s3?MC0|FY1Ig_W_v=zm!E}ax}iU zV?Un_sq5uUpZ%19hLG&c=4%0uz^_>t0{r?`%24MU08h>@6lgsDRqI9fzd<@Sr9j*r zm|!AoU{hd#R&xT8rr>f;8)z#|fMf;Cs@`O8w*GT(KBYig?wfS)h2Z!oU&v`pc&=qa z$adG;UTLR&wev>K2A(IRZjlw(SHrR1dx~3}R=BVu-@CH!M?fv#;<4}D(&yt{m|lYP z0dHP}IYfp!2mH!VUO1~PBD^N)KldzP4&Z7;ssV3l9e^U|(D7d2P~MJbq9fid$2~lW z+V>}Z_q*)prQOmsiAs-@oxAAwA74Az(}&~ZFL5rSe{tw|Vejh#{q-H1dv2%!@Fe5c z=HJ7dTYJV=e6V2Zy-bSL21n1>(|k@3Ycqs`=GxQGmfjKDwB=6NJyjOot)XLnowbs? zmu44@s?RLUQ_IV|(b(&nn>TU#c3CseHr{x4Ps5wTw->sVKl|44UhK9_NxBT&<7hSv zA~f*{(E&Dn$vPjWe*(-aC^039&5~d%QI0%DiPmvNn2}T<4WRxSi&0clIpEhNPa2{? z&1!L0txIOxe(%%~dL-L!&s}m|AG2p&8gE_)eQ@}=U)5jmxZ5 zd@tVGdi9pF`=^X6iOQi;jIr}|^aI6~Oe85N5gK5TGFHf7HY{Sm8|a_NEd0ImGP7`r z42U<$fb^!1XlgY!y7&|%%SZqj_logiq51t3^84#OC$xjR;(}a2x`1)A)vvp zC&ks$^qUsE19q|g7Dp0qYM3q*h7RtlLq+!S;k4{m5xCyJU>}4_*%vT<*|VXvbQKgm zNqY6=wolFLsnm)}$%W=CXMP;8ORg8I_{ zfQgw0=@}>_TTlfgcDN1rraJE z_PaF06DDjpL#GJX_->$agct**pm*5W%dUTaX;c@8Xd_8~6Ur6Uz9yL$$=^43VWmC- zfKW~Z7$Iwj(O=jG#Tlt56yjX{O2hcnGJPlcN7;O$QLr3C|EU9otG1v`s>F>eZHv^$ z^uh-Xrtk{p1b6UOL*5q-Gsz)0iT(KD=!hu<*#6+rKe;E%|$?UCow z^}@j1v^P3~@C!y6y7wib<3_@I3|IMkfxYP%Y$jC$U`X>}ogm08(V9a#C%}Cfjv8+2 zfi4SfLgB&v*!Pa3qR`BPAMpX*j^bjy5Ri_{u+xr=)ANBPq3W98A|_b28^F*KTmlJ* zstf7IX~-q->d;mp;&;wyzID_Uz_UdTlbU0B%aV1;O$j2M<-Yi%b{Q0szdwN-cZ%K6 zraN6NJ*UrbzMBu361-jRAA^pLA)wi`+g&DfqF3%pue+~G zxb<_T0lq$nCko?B&A2ei2=&tP3hB@}6TkAN6wwrMOBw1*T}97fOpM`>4=oI$J}mhB zII2S8+&Nj!NX&u9)t7Lc3vp4^K z-b@qMc0P_2&d_N|2M;}O+kbv(Eu!nbU%mXj%pCK5jeob(YPegj_&0;^eiPJGnG+}M z@x0(hlj$dAQrm*ZbGO{zMxU7;MG?Sz3)j?SP&z(#i>?Bxw4%-z(utpJUZe_KjGs)3 z)iWfnmlzho8G^J6vL~*vKfPhTP!6wrxofUg}{AVpRDaC9=&khPf6F`f!NsIHm);gmeB*9y{(Vh2pF4_<- zGCl_xs1w{suHReOm%FNBwh5dHC(tMdZ8_OX-hy0iKLp#0F0UNuQDK@48i&00Bi+y` zWkmn$CT<&dAd~of1%-FG-3?-=gP2F3wFBj(4&f(Fr#_Qb7Tr*4RzG>2xKN7|U!mgw z^6i2D0LCqV4?r!bh{F=E|J?58co5^K_u>S>d0nmY#=UZ8#(e1fskZ_g>dPUai;9-R2bE!P;j)tLfnl+L{BoJAOpeNW>eVelt^gvwuW%f$TH!W8LSB+Q;cgr134iQ^iaxY=>d&7nO(XKY&+#pr}hP z@JJaPV z562RUVIWEH7pP9hDGCiUSp(PvU9^9XtiW*VoKOpo3m&(gw3OivNHiC}1;boCTo&_9OuU$B;G#rO?13%dLDLu}@#(%bkA7 z+AR%=fSX~=Nn5c8^+iD*p0H+Uo#J`(G!;Md#Asvlk30=3ukKs}%3 zg?Kj(x9Uuv%lh=?v0t9T@dJq#GQ2tTBEzJh^ZE-vYsrfI>Tu-9n9jUZ*65)V4HR*B z7ED7eQgLV8cyTyIjd~X{jbT;Ycl^-2Q^FVX*^vc-9raLr9mII6T{=WfiCBS$`?Ddn z`yNIWOHdcF^Yne5O*Bw7f(R~n3ygLJY!ha+1pv);pEAl#Y^X}sVAjkR%XhY&p)<#Q z_gImthY8@JK~6%;3l@jY;qWmS#Qz;HS>a#FM;iwROf{jBo1P2=4DBm}BySH#s^mjJ z%S?@jBQ+r%UI07^yk6N$1>CWks3u()_#4k!i~r5Tl+@CxL=`jsA?MaxKRih-Np0So zundkL0|s@B6b1Dr+X5fw^RYgo+zS16MS8<;Luomw2~BFHqttps$!iLg`D&8)?;Bb| zN3HMqyo^;v1kHq0Whu|A_`VzOQL0-{qy0k`gjuz(VDwLcPq8A=mCy{?`h=~O6j+#eTBR!T6MP7uMG zs*kY?JvD}5!aPJ4izsN;H+$KX-6F?vvzYiS_it^j=Ab&G&h_%G30duEJYYCeGC{?y(NL(@H382iX>fTAD_~xmsHE{891 zH4aS)D25N5mW+8jC>>WEhEWzKe*QNLdufOuXd>(jk8FfLwxB(5tp=*Ta1at1H%PDu z@+>ixtgIddhR%+UQY_-9g3D`Ed2GqiT#EU)VVxVUlFEn4RP7uZfy? zwYW}vy^sB*N{{DNu;bQk`5Y_=`v-t$k>lDXtM=j_f+9!<1pV(fTHRmqFLR^Q5--%g zXSJZe?BIycfv7KV7>EAEyCfF0p%jIZf~`qd6o^`)QIIsdqw(Dz{Ow}7`qP2$QF6Py z!4|cxzT9-44HM(to$%3UJGZxWu(ql-_`0A)J+l3n$)}?Bh$||65L$1uz9B983`QV2 zUYwQNk?^Nv9yg#}0sH_NAgDrwMGmy#AB_oFUfviu6vmiiuc%_ovLyBjPdml3Si%$u z0;A&+Oge>RA(^6i)o~o^lCGj5Yd6w3A=dM#T79!S*#Wn-LBnjaP%-2-#iK>!J^FN^ z?32a-xXA=VyI~?$OoyN`3>{)5ajR2AJQ>PzG>hD^R@ZC(vu+Ev>kb;HIvHY~uFp2& zTCG=(o}BdStf0-Jo)u)1?Z&ndj#6e$D%8!d*kG|a9YRSx&GPJ42)Ty7X~vUq{W_(h zv!>~jt%h=g?P_Pi<$O7|8usYtI|*M957+Bl@v|4k_vtL}wAiM6YjD?!8(4PO~`?-deO8f$ATzFwlNAjL%aJ%nt9Vv@~E#>p2ds+ zn<>m@M{TNrL0BgCg(B-(+AZGO67I93sj`1@@Ox&5!I;;6a~wfE#}OMTvYhWA#RIyN zQ+=N1*^Zm!V&E3pSx)r_S zaYf=1h>&N{7LlVo-V7qzQIS^+_ZK1ubTMyBpaMu3B1pCnWa)HG#YtS?F;!c-=qC@N z)qEB));dkUI}axVA!0RUEFQjg?g25i{_h)YUovbp!A*{V(TVb>ff%UPEQ7)dK7apK zpQ}w1>bRYUjizKi5=$NQu8kdtY;9reD`ENddq#lLF}DS$&$&|A+hyZ$e4@W(R`2vo zmN@$58x*$AGlqt20M>pti8bN~WYOcKJ_!yZ3TDg|=J2*YLaUDTvC$o$>+*Cu53l)Qc=I9bSPmaUmBp>i%UL?lG^1eE3O>MAUuQiyQPaRZyZ4=XdhCN?@%6~@`dAslI%U$(6 z|C8fuTdC7}p!^M61?6(+UYIaJ&O(w8agcxZdB&E@3;5L`ADC#8=FZU{=wU7_>P!hrQtq13F-9 z@wYNWvat{oUCz%k)zY-cR$Ajcp`tva*H%T~cdC+ioV)dIwujAZcuX(i{T4Uz0FUq9 z=83ZHG8sjG2CFgo42qF;yXuB?WJ3wtxIRAmlAjJ)$+9|2QDZqA>#M|gIMfzX(4^)W zXtDOed$zc&Ap-o4h^KbNlX&u$H)r|5;e?7`2{(iW}$frigROduTrP& z8;g1II*E$5G6;hftzS;UhDYI)IJMQNh!?>oNOIoAWR_M+v{SUmlTv|Kr702p&|3+U zb`4%b|XLAHAK*7CXcl zrM_v`6RQB<;sOchD|2Tby{Z)@7yNHD(fd!U7Pdi)aoT>|g1bO^GpF)al=N;ECME@%FBy%i3ns1fv+W*1U=mDCY!G_+qs>lTr55z$+ ziAf76&H1t2<%-=?88$OAdcCe~{4z&d1s%)(A&(-LAXluY(0h`~Np!*M72d1|E2>oO zor&1vYxENM3df5h6`Lm%Xp}b$P>!9DAX*xXTwZn;>7TEp6F9b%<+_Nyf#HgdmA*> zdH)M;SiyGa@qJ_Bqfv8g(}qx;GBG{9rm_4ka0AWc;aAA3mbVZ>wP+c~@rOpW<`LQ> zp%I$5n-7+GKkS`6n-Gg`@AOp7==56vR=-<(Ti;?3n$Hfg?>ftk&CAMe@0Sh8zNRWd_;8KDRH=PY2lH30 z3)tR>41b(-*#O4&hcXgC+HBsCCXkH10zKwAR4dC)v9d@a?+u;5M4X{;ru;FJ$dQ>V zEHn7Bb+*%Kh7_?{h^G#(#Yk0tlMu0y)VsC>-TJd6Tlut1PxrxHq^-+BI&8DT zwxID2`GcRnTY>iq$H2A4vZRIN3ol+_wB!ErV^e?V{>Y5F1u^OFQFa3qaS;(6{(iyg zc#)|HOYS1VRI#nIZ4BaYUkMUK7Mew~UXuJ0SYmY%j%sY23@@FtN8hrNjRCEWg#Z;= zUM&okuG0&G3><=*f>m!>IJj21#)g)XHF>gYH|p&@UTcHx{VwV4=roV6yR*zzHhkRA zmNPWmqUU|`{;4ZsHpY1iCWrrvQ2?}v{K@93fLAM1y(cAw6!6)vfGGq6=Oh$GobD(Y zab^m3{?ERPR*8-I)%wN$$CplHuF1cH&<2hM7+V9ye8%zg8^BF{u&_xF55Nf_#IcmSY!B8JTc=BT1SZ zNbqeQ7I}0F@yUgDqAy?1AMv3WJm_ z$W9~k`g6kVgWAqHR0nJPA8X{_l_h}k0tu@@i*FD-!wTwD;oMFTOi?e@jpVQ9#{p;B zjc;NFoZo{ zQ|d;m-r0HWKM+>B6^H+i^|{Y%`%8Kczs>8opOf12c@9sb-7s&+$c3iE@TxsPwRWlv z<)%%~@%9?K@pNdY(nOx55Tw^}KcWuB)F_hSUKCk2t)B`_DxruaCH9*CW#*l9#pfp0 zG_WM+UG$8s3$!^cw{qvon62r^dGNh-C(qwg-(8?^5v(mGLA_{DF;1~SWXD&^%|#&9`eq*ZUN$ojf7VHv zaF;f?cTsxZ$TDinw6OR!jMm=l*jJ73}OCA3rBTLe3)liMe%v7p<8KXF`hz>`Hjgql8dU>-CRZuQRLQQOAq zV#lL>P>wtu;T!)Z8t#nhe?z6agteSRJ9cZcr)zQTrhU)gKMUiexT<7mUF7;y3Bn)fvm>8Io2pQwcYsv!Q^(s3m zLJN|A;2dp-(^x{D-nW>i`8{_X+vl}$^ZYiQf9+~@x1^6WO!K$vI-VbE@i>#!vXvu%^E=#s_1v2!V@3 z2L+khq^Cw%Q+s}6@dZ6bTOOz@H5N`=ZI93*ED;h$AQYh-WVKAcsx;VDm@8?dz0TJl#r zy-xcBYWd(`*l7_F4Qo$krR1Jfrvpi5so%l*tCEwr zG(MjgzGw_vOr-%U;Qp2SHGiI3o6C)L3mpgoR;k@TdWvq{&SFHwI9Qqc5kWq5Eluwd z=r%cGDN@<`6@*0(kAaJEisCUXK4+0T)11-;$uGx_kI;wf@r3ne9<#I2W-hCJP7U{9 zjN)tBN_>6CLFWy&r=W+&<+x&KU`39DPA90dZ+lQKNf-Rx8rJM#k}JW>bP0}7#*st} zgE9yA7EUv2*ic{%-)<4~UYlB3-eN9Bql&ra+s-B}dtwG-&Fp%6@&H%H2US_0e-$S_;==0$61Vp-)Qq zOq>y*A)IwEm~FgSsW2Z3BW6eAf-gw;qd$ST<)$a7r$v$@I@F&1L@D?mi>sqdgL-aU z_~RUa2PW@Dy`n#rB6%P?p+H{bXrwncHWUnj6OQ1`F#Z&CE@TQp z8NSsJha2L<%IrOFb1N}LQs5MA2cdNg=@DF zcfFFvNqM7^EW{)=T|*O-G0k**C*pGiw1U=0)Vh4(`f91ctK7p!Z=#HGg(mO0JE13A z+21;G@5BG|vC*A8A30f7<_S`eG;e>L9$lQc^kv4kmmj&s>FYPByFDiF!L?psy02r(+hjBFl}1?J^$xR#vnVOF?4 zeMSf$VDL5X-{#*jb<#P-?oO6+)qJO)RWxvI%hffSy$<}Z-*so`3Zw4Pn?TN|%90nR zwU7eSb_$9Km_Tl3qBO4Kz7H0`q|fx$Y*!h78Y_03G%sa+1sV4G#psP=WnlVrXRdIq?6Q0121Lsm9WcKOg_fq6K_k6RdLKoo~ zA1ftc!BdxijRK6$32Hg-fF?V~9Bb7hbCTSzWVr~1Rvp!1)pn!opL4CJ{KaD#aAQg^ zpt_n>$wyQ6cs^xl3tdq*g{{?k3Je7eb6k9uGaVwN_;|rEI<{pfz4h4ajQYiHFsHG{ zjnCZqL4;YDYqj6RF1#JsIV)x5 z)Y65qgJeLY!^>yU;x5IGhHbi*t2HEETJ*Qu|HuH40Ycl2!cz9d?&JNP(u%l*A@ z@b6T|Av^Urx%3v#aYZsaZU40H=}klJALQ${u*dg>pQQt#(}VkX&v#3f-2(cO%w@1c zA|O7zqH#(7%(f+eaj77GG2NWPrTvW5>_UqX6DQ6&x4?4ESSu7w)v=V8lBueij3-Hn$DJMzzPtG*?#(Xi*IcFd&a~Ifb!-)F%ejcyg1c>O%K_j;RS^$VAI1 zI6Vf1<}aT8iHydPIl zVd$$LqP4p9#fQ?(PE$a#hi|BrbO$sW<<|S^vJE*%0}PzRT|`*^gqKIUm+(XNv7tFP z&dr4|!o&Wb0G%OX-a2CWQXIj)b`k7sfnY}h!45#MBY|KiAlQztcs|9!5bSWq5o}Mv z<2IY#F*OBBNbbb|^3J(4ku;>UeN7WgdJvCrH38T-1(s z78hyWaPDx~v4_uL$3Ad-qW9c~F$XM%iCXM5 z1x{hfFE}gM_LoYlEzx3fdl}q$zC4fdW58dT!hg#DrSW+W`HPnOF6X$F1Qxo zh1QbVdM$C)oQ#&`akU&@QU8QaX6um+jdAS-C}yXcBBhz8Xn%S2b_-`Ni8-16uq5k` zrU4d7D`yF;vN~er*ahesK zDv34~a#&!EcB~RnT*xNQI?5lX|7;8?;JGK@x<3M+;B`ABUDvoMhCZ=Iw)sUd%4Q>Q z(I=4TQ|b(ApZ+F3`+(_`VPD;9syi^L5Fq%xsI`;iA=6*j`%z;KX4A=>_Q6mgJGHl zykQznveQ7egEo&u9aO42u;Ir})8cl{f>r?C_C`T63%(^ro#ZK=#IO@&oxYpn6HoLz zYuU^bV%Gn~t2O-o#OM=VZG6g@e$x0}jWKtnknY1AZf~n;1!`Idr`+Lq8)uWzY0NG_ z1)sxIdX5VjIhM+POKqBKfHK`~!&ofFKzvyG;7o%f5!;dkb5-)312s)ibE;G~&Hp(w zfw59s%syz#UTm6&(6NQ7*nB=hGh31QBy?VasXL`xF7_<9F)<;pt?Kan_)KQeMrSf# zU}ysM8(U9K7=gR*LgN#J8&Gz~*INERzETd$R*;I6*VV)-qGgpaTHGP>S^cGg?i z)_RMQ)|-0~xQ$DP)@z>0WW@y+tr|*zYq@S(0n}>UnA^^WJ<&O&8=Igt@R1gq;euE= znY=wj0}mO-eDfeTt1V1`6w8fhSs+#O5^X9;h{$0AHiynNj|G{-6l_8SHm&4h#k!~j zY!^dnrWjiT+YQv?1p)lfs0AV1cG-!_KK(C=b=@~G&SzT(ojLar+}&6p{ckKMnk1ksq88B_~fI%yQL76v|Zn^6I z$F>*E=ye#3Zz+ztUOjI4VxTskVJ|O#^D~3r-!*f8h=^j3UY~w^_M7Z1L}tHvMZ~D5 z!Wk1_e<)lk*QfQA2{jeW5Pw^PCIUvWeAMao8A`s@un8(OlO|3)gwnFr*a`VkqyL(% zC5Nu1XIm35{^NyEiI_b4q6S4_tCdK74cQl8OF671z3sJRw6&JZrnRKDiCx;9a2*xnny%?BwnX$ED?(Wd~7rOsB;!eW1BgxY5ADt zNyXkBr`+!1+5Syh`)=5{trvbXvv1iv2~&Wxa1P9VHlHZr#EBA^bwM-3Wek-7lgm-n z&R7W+v}~F!VPein(|`%#$0YycPPq47bUv1k891Td)?w1sxajyIxdH18#imKI#ZEdNGnd`H-fGG-U*EfX!Ko2f>0j@@<6tOnc23t7Q>Kyh z?Gry4J#on+_h2^(@+zIBAbkmTP9fKY`-x3pR{aq+yRM_PA?iv@Qz*-J#iuEBrSKO4 zj76=dDHPFENmNYljMEgntaU2fVwysrOjOfPNSYs~Dfp76DfnFVsXjk@Y)e{tz=Ivq z{-jkm<1JLG zJ=&#%G2lWNos>IawD^(IqMXw&mKGKNHZ8jS@6rOR`a756*hu#=ph#ILTyBu0OV|Gv zk|fmSg~?(B7A{PfwQ;SQOqX)5FUfB~m+sBz(mPHUwJf9+gZt2x7RnN}AEiwgXT$ zAMKE?5iJkwr7qzlLYFpez;a4jY||}lMcVqjLW~I(FsrICPS=tox_Vt|1WDV4*;-vf zu?P!HRs1gzxk~{)z98GhY5+^yrt!s46|+-KRKx&hyObP#a7}`COYc7q`S#p*-UQ;T`gt+6*X&CP~hu@#!#SvzmC2;eR56hB3?Xx*NokTkvk?l za=h`58+Tv5p}cda+m|kRq`dP}n|Iw&H+{EW78no?tf{O)2n{%SNl#1$#blPcW{nXc2Y)<4Yhjzv80Jsx4lkmjrTT7 z=Lu{kl=Kyy2^HifU1%y)K|52S3XQ2yF)_uPL)!xXgSk-D@NaiGlt6Wa)S(@dY58UI9@zV)N*bYrWdKf+#XEi*|?z`hwCQ_SMi16>d+2c z9h2e8FHFD{EciF!N|>MR!j@rJo$W&R*QOYbXux+8;7jN7xgzeyBnSg&j}$~SKiDI| zFasDLWRSrC&Om^R698v~>-|==Khl}~VWk0E8Zam_Wty9UwV_xMvpG88fT48Qk11Y} zcGeUrs_WvgL2FYOMo{&)M#ijzdyLs;+wN}z6=LMR`x>h$13PKn^L36RPU+3&`AL$< zPhKGD*$wxIqgKMraHbCqu%a`0;F@nruhMH4wtda^5}wz z?<+s6AHAzYbdBus&#vDTj4mncb8YCo@GFz1{rQO_zpvZBaqV3s=jQoa_K;T_i1)JI z;+m7g9vOZhuQGLh*6_03!>_N?k9;!jANp&={l?xe8c$YRiY!BCe{IZb!kJd!=XpS? z>}+SM6?kV_x)r=9r$ahga0-)3f%pKsopPl!Ij7mY88};YW+^CpA2Umefh`G@`+6L^XY6)@s z5rbCa*;ce*{7c#y*Cu8rM+V+V9^NJzxOgd%7hRY{y)2Zxc@carmzo<+B4+cL zWx`VNStDmUJDxL*^HIL};&P@>nnlHDO(acKi;*(byx1F_uV~p_;azlVCTsI4a;(eu ze2G&;WrzR*yHpqV9XsZ-(H|cx9bM4p>dQxutt;&;lipc_iniW-S9WroyL z?;Wr8dut6M*6q6e#Xj$Et=?Bk1c%4>$-TPdbONGB-%w`cOzDg{!^gybuv$U3&R72n>%?4Uz2f!J zhhN$iEtO`L&bv;UYHc|mEtiVbRtpurP2^kIT7L&?&EzJuyVk6B)|%Y{r%}a^BXv}} zTMMLJaLEZ7+hW^^5vaaRIeJD=SBAOT5fWl3HWX1uh_X;Rn z{RykC2`Vi9%|MGHz$8i_6w6=|1$!0)egIdIF9J9wQCh%u8OoE^fc0U5glBWffOmZ0 zr##%wWU`F5CX;3UPfsR`Sz*JZ8;|&7)V;uzGMY7RZ&aC`HWjy?S0<_n-rS{65f;$7 zzj^U}gx)7h;Zox<+lm$2Dhb6OKZEmJd2cw3@so68*e87k4UWZPW687q?T;nL?!jh* zK6llXUN_#D^I2QlH0e_gG~PDiil#xI_)5)0G_NTX{qKCjZ0-ZB$W>R0+3=^lo#uztaLM@e9r#_<iAUX{W>o$3Yi$e_TH72E{E9HmVhRzZ)as$~Iwh`R zoJHVBruwrmgjm`&(N@c>fc68fP1Y2T20=XLI_~46%d~Lhu^EqI&|-Zx4O%Q*R1f=e z$&(+~?%uX}_ns|VcJoh=@7FJyc>{0H{hoxkEL*zu%!jknp8Baz)m8n5Zd|%_BkA(P zGmVYUC~5oRQCQ7O*H4)LzL|F1W885zPkM%^9W-?XjMUr7&@y zl6bTkI`IzJR04(VL8nL{XW;ut5R1rW z9uWG_C>(SEe@O`+pE%?jJ$FF47&=0F=qI0G>pcH;MB_Rw9;7Ucj?~Q+LnQoUOn<|; z&bboqpM<~Y>-cF%aCB+ij1P(Rtrzug@tN3g$uH!E45FI6FkHZF2rqzy0iVtm ziisj;6-$h__{b_V9iC#m;S7UT^DMSi4g&&E`uyiPMl#_;b&u-)?$+(_#7# z;hDxACdUPplfkAUswpmp?Ce=gcpZt0M`?{QThrKpt^8(lwZ_7Aj?g(9iR<8$4R)!f z>yV@g8)I7x)u7iI26lW`(EBA=CsmAq=7O@v&KA`c!Kr|tw__YBwx&>f9!F$mz->f& z!XKd>MZR#oqu7cwGdqXWPNy_p4u`R9PxA%U-M@5}Ty#z;1a^9N1j>;~URUr3q)7tKM!)@qlgN zs&~2^U|+$nNeO2-hY4qJD$u^N*;$)BeHxR-<x=ie3E1MR6`j5P)=E0hRzmuTLxzOeL)5#+E zRXO=rMVaSxk^FxsG(Rcq6Khtg;JH3^FH|nGax0m9NOJ|NpEl2)Sfe^zVb>f`KnI%aOU>MpUugd6Ubb@ zdOLjp%-Ir}x}g11!dL^Qr7dByvYn$@q?S*tsZX<7aD)8k0OBAP?);#%e2@*FQrE__ zY|fU@$9Mp>02XOFg(bvAxRVSTt+Y#o!J`&N5F|A_+#ITUC9PxE0zU;U>Isxk?kvAp zz;MQ7dRQx^^F*ViW%yZX34E=r6c5XA{#U6B9~P!X0R9RbI~j*x>rx6wh4>V&h@$1w zheV~|u+@AFey}pFcJPz;Z{Up_YLh2gBCbS z0F3+wasYVRZIeq#R8btpzjOZQPEyUHkQRbs+JlrV7lvpwNfTquB8~7dMmk~*W(;W( zgl?=9B`YV4CLE;4L=hO42)eLU4-_E?iXJ<2QCMKBprq-$SF0&p{O)(oz2|@K|9;=O z`ZK&K0MsL1)-Ga?)gAQdjc7L8Q6f(evI-C~wK%A}IIg#$O&20nRwB!2_*DRV)mP-H zFA303QRUa;>tEiJ!_+$!Q(uq^D9zItHEv6LNrVw$n z31JgPTt1PHoZmy6d5XB2#i)#<)Rggl6mjb|2F)a&H=xm}yF_iqEt85GSwJp(?=c0) zwtk?{Y(=rjM7DfDg_>s1G$FMKM54HP_MZ zcppWF45LYAP{Q>g<3^5(u>UlA*?_1OV-E(cl8KOv;-u}pbpy4M&TJmiBd_0@Y3~b{5IlS^UI+e8)M*_hqn#u*b=QIV7WEi76Fh98IgH@ z+ut1ie}6YyL08D{j3>|6w2sBP3n zs{J1o5l~*D41`(!jEXm4Q`{ zbpo3hTN2wN_7L_S4k3;+w$$5D{<@s1w*C=p$GsxIyrq5SNgRP@d2sp*uoEyWHgA-(7UFu zqu-+cz#zrolED|lGQ&MaOh!IN%Z#2F>liOGNio$j{bu&ae4oW8%M{C3RzX(#to5un zS>Lf?voW(Nv3X^iWxLN#$S%w7gMFNRpM!uyio-5PF~=q+4yQDyf6jF-5-u4olU#1# zfX`gMxH7p)xf;2KxmLN(alPgy;O6C4<+ce3uetql`{lmHL&ihHW0J=Y&t+ad0NUP| zaR2}T0002&07d`>0000000IC300ICO000310Z{+|004N}g;L8-!ax)~Er>zUpfQ@b zniaah@=!EBVq%CI6T$+CY}^zm4~5pWMdONpaB2J?{SnuGfD2ED;ZZj*nLFpcPS4x{ zz?QZR4Z&~L>hc`nh(seCMQVa$Ja?GYqH%iX*i~3#RZC%=nFoaFk14#4v_9_r#HN0x z@E5XrWms|Qj_-(~V>d0^vznsRZFhW8vD$;4>57_FZ#lL<#1$N>_aV@P$@l*?efOWheNpXKxz zDdC29xI`JZw0nwk-Xu$t7o0!yX>E;>I%hooXXNC!s>}`Ol^K8LDR3ln1>Pu+W7-0% zChzg4bfgoV=|We!(VZUj zq!+!>&@ph}#6=n9R8UDD`qGd73}7IGaN}VxRe13+grN*$I3pOzC`L1ev5aFp6PU;( zCNqVpOk+ATn8_?=Gl#iUi^7)Qm%Hr}zGy%H;N>=Zj&xW#*k z=K#m~$q#;Un3LS+E~l8se0E1(>mjw=;{i{3#ABZDkvd-RjOU!j&v*9lnwPvHz-PX) zm3o3KU?B}Oa*!q#(M*UI!YpP9OZmhymea}#R-tblu zBvFziSyCib(j;9nBvY~^TXH0qbDZZAm$}UaE^>#>oRBs%f)>O9y%p=2R>hf8<;ZW1x#H;r3dCWZ`8cK)K`3Gn3qd$1sJVqnPPzW=ekUh6_b%Q?5+^Mo zYmw$coU=SOWI7@zOUPg1*k=PWAjNtq5_nM3?qmhTBX$2*k)lv zxPMzHn=eKcu%3(>P`xthK<&b?f%<{b0PK*_1RC=n1hx1(r`T-m-q_6bc!R`6&si&a ppu1IUgWg!t2ir5n4(Lx6yI^PXXGXsK0D50qoB#j;QhWG5005LX)`0*3 literal 0 HcmV?d00001 diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-LightItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-LightItalic-webfont.woff new file mode 100755 index 0000000000000000000000000000000000000000..5536e16c5d67ec03304ec1e55cd560fea221e49a GIT binary patch literal 29796 zcmY&;1CVAt(C$07?b)$y+qP|6JLZmU+qP$RY}>ZA<2&E~->O?Tr=IHMsqW4>rz+_r z9d~&#F#r(Yr?{8^Q2vu!i2vLEum1lRF)ux&p&kpCBm<~E*YKU^OGpeFzTw6oN;4j)*U8kztA);}tMpZ)=Cz=&B0 z7C+<<$MVzu^#c-kWUw&{TNjTXPUFXaAOHYThw369Wn*vrqi4JM)9=I&)-@z)YHSTX ze)_fl@4pcK2Vf|mcsoN|(;sg1N2mA4w)M1r+tR__*#!V_{PV*-|M+|r!}L6Ja5DX| zb@uX5;a>KI z`TVBFX87HnV!}Xw=m-PGK^0III!K~Fp@V}Ki>Cx6RC9jNfdv#1Fi6D_6b+Upv}#%~ zZPW!KRGrZ9SHg&?|H97hUal7{CzNP<=yx*NU$&jEId{5Wrm#E{?p-^)#ycBq1|>w3 zgd-OHG@ z;1Rw(0g_ z|0)LhN85;pZ3WkX4If0jE5QLBGrU2o5!2i>Jm7RmHb8mH$blvo^l*Czo`tXzY~K&& z@QV+m+^!Q*`WB z)pZuIQ5Eo<2%cW6v{3`y26R0C0MGkwxg~KSro~Pb+m&)~K)kimCKapY-lbF6dPUi7 zk5=V`sv1hsUq-+pC|W}*CJjUj*CZlH{ub!S_#Tl%g1xfP8IoM)&i1l~%;Ohu((X~y z@7(OR&QABtE3%o%+73G%P}`@zr=ua`;%FAZE*fuQJ6|Q6(Ja}>X1PpyWu`HDLez#y z7a*OjXSrnf&btZTIw0q9d$)VgKN1uRL16v=o?JYZES-^488^K^4)OUG;F{pdT`LVJ z-eI%?`@iFBibqvSoRuomRLPx}-VG09)8yQ_KNXm&w?uuQlp!ib_v})9u3Vorb&N^^ zpVT^Hh=v7eRt44*f=FT>b%AZ-Kd zOV*rTk?3Hbf}*K^2szJvEFXt;&pR>Jug2W_wj4&&UTy7X@7FdnHkGYD;u<$Iag`h! zZvHP48|wEKb4~@!>Vc&qh?2M7tKLs<1gTfeK6C?HwCm4~G2cGEyzeU6J{hBD*7)5m zj=ersD;u9Fb@o?#2b1jatlw8<)aVmcl}ljy*gLIT0cLKN%lEpOhR&Co9C)nzEJO9- z+9A~quFqJ+`1O*T(wnDr=&eD|IluMnGB36H*qd`!d>i<+hTK9s?QQ28*4)6yS;|pt z+lQ-Z$UEOV2AZcXN#EPo*0Hl{gS8iG8;RMQ>%6Aa*0nZ(+3OIHyZ95MXeWGLNlCC%15I7p6IvGgO-AUM|9OsFv5(*7FNrZK}OH-7fRl-qyVl zR{K$M7VM?-qjre9J6|Mt-sNs%osY>0j(9%Kr^gvr`g)vSG)E%ZtJ^k~%RjX=%b%L4 zuZUKo@sh=r@q1%E**5X(DA^K`7^!zn_z$OFKzsab-J88@O=`s4LVaK{%wOhazAM&n zRgW?Hw(HERB3>NO$i4N|W=iDWQEQ;4#SVC{-x5&k2!eo(es0%=@%+`aQB0XpXa_=^bLEf#7jN!W(}>2Oe?f z@M_>eR(Y5|hQBD5iD*rqj|hv=J`hf2u9Skl5d@J`WoTz`1a!0mO7Z#@=yo|-AN<9y z!8Soml|AH)P);+g=Ghh4{`O|)Y=W@LyRD0sHN83aqGs)*UY?>(x+)#_2Mo-IcgYb^GFe;jx9a#cvKxFaY-clH`;f){DRKjgTAe#&lPkXAtOk{d~? zgkiQS$;zNSzS& z>+2b86s%0I=IZ=cF(ZQm2_qOa1H<%-`A-b3H`)TIjer7Fr{PNf$FG5bnZE%RxEMI1 z2go5~;42L*hJO=K5O5qRz=V;}f$5fMk@3SE)~w2aYuH6RKw3~lP)blvP*PA&{W{5f`LXKCg@Us9$_9~J|W1GA=FAgsITXPP)OpiTr? zAP^909Bv4540Z&1f?of;xVk@OX>GwpNlj5zSzUnz2@Mey86BY|DJ?NIIXwaS@%7>5 z>FpuK$<5K#+1){fiH(t!nVn&!sjacKxqbo$3Jww$8Xh7hDlRfMI^IG?N=(Ay=I%F( znlLGJnk`1J<4e{nRocCd10_@1t@a!B)+asP?+6S@MaAYjwOTq!?^Q%$`PT5llpw-p zq^qu;uCSHQET!oFtFKQ8N?PmY_?%v!!`Yqe_E%;c-cfsJg(LKOOe#A)BeTmZ>pRSj z^M_Ci<_t2K?0$3ucPK*wvaABIfbNDUqn{NFC>K~QzyeSKFa?+cECE(O3ppErEx-=o z4bTO+0b&4p03(1sz~QIv1@Hp|0m1=_0BwKgB4ImB(0EmDg zd93RycFJiBB5uJUfCLP#5^}DiHm1i7|IP=48eEDtGT{z6g0y6JijQ;zA`l1=fm^S| z%F8d3$OQj|A>l2=_Y0ZX`cDB1H+Q4+bUL&2czlKXne~kF2}#`FE$}I}PdSO^M!-wa z#YJ>fQxJrfjoHuGd;fv~&zwu*wgMdS+EXH6$#Ms1HI;tNeL#o4e-J!yV>YoYJT7uU zo&5qm&UBOxy1|<2m_T?7T-q~YU?@r}*{9<#QTl3SIkB3kYmT4m3Svfj9b*wtwzdV$ z_*(q6^x%jp9XC-@&z6?f@I`{sA2Q~ms@y#JZ@pi)q4Crph-xYTVs${x6BZvHfedxN zm1LX-4(Zvbau5Sg6LSm|~q1ZFsG~kCy?=u2P{zrGQe%X*s*Q+8tsm-qN zjB1cxz8jnO6A_(i?bDG$?)DW{W%HC%?|RccBOp2WFav_d{h2fGUs3^YJTx^d7Vx0L zUhH1q1$Uz{_yPfdzP|wo9_xcX5C*8O&)?pqHaDzmi&JE4!AYc@ zmeMeUgt`%r+rk)Nz!Dgi&_rx1i0_8EYtct-%7sQRAD&)LWu@*pM|5sSw=oHO-`nnH z?47^zzOLP0Ug!ocWf~qndtQ2OeDq#>3^hs!2I?9^7-2^&(xt)~^6?pI_3vFZ<{|yg zkq$ICOiAGBVLUiXmXdPLkcd5ajb1h)KivHI+4Sj1-VenLC(Fa$?s|=PsnhLXg}*5N zJpNu`U~E$(b3zKgttP^WfuiwV6!}vI6Mk4W=%p=6_FbmCzZ={fuEldwT}?!g(4^Ky z0#zh4uc1xKONnCb6ZRXAk1xwH70V;Oe#leI;cx54zLwkpjWMa7wkGBCGg67QY7t$* zQ5I5-c7I9WC5Bp;!ztqHpWp8?q<9AeuU>8`WI;%n#Lh6pOusOdm?xOMdSv!p_85?4 zf5mgI)BM9E&?kdt3lC9rh*9%MDf(qPC3X}#M|QRwx;;mgp1j8@BIg}5(70z1w!B{AMG)GOgSI)bLHU{fE#nci&|3;LvMAjhmZz)$}eb#}~bY*RS`^={qzl~G_ci_d)DK4+oQ#BBtw&zHk_nJ`Td&6E(^%Do>ocR>Fr z7T}E(O^#esT}cbCme8RGij^+i6tKQ85Fl20=dl+Qq9K`AlQ5|l-|5x=!JQQ|cVWMg zcEsI9?5FlMeB?>ZOmCajxv_`q`PAdub7}r^I6b-LMcsZ5VGl1}MPP%bn z-=>wzf4g`!p{}!4x}zM1Z&lCx5OswYXCLr-e3|+o`!_Nwy*@}qg<&h*K1~B_R)J~cj>348Z&biU=a}NU146G!xF+)zu<7V9Jr|Ua@ zB_*@f!AW#U2=!)%r?Uf;vnB7$Q~P+TM!ASBW#86yvgIZTQU})6zmF}9B9+c2CdWtb zL8Qq`tXtIFAL71m>Zd~S=arZ6V@9!J#Ww6&YgLLSKykdt&l=$&pwJQ7gVI?^=N%sctto*2~?}CoEqYY&s^gPOc$Gu~lKnm&#@A#O~FzNS#0$(?)Z z8Ck67QvnW%1WYy?pY!X68`o$vT_gH>u3qdf(}&5{Bk-ABRs(6wIHC64`a0a+@7??B zl+-KHczm|6>yCX)vne0}=D4?2{^g*)PliToeYF~VOx0=D(O7Yoig>26r<_!t+0~<@ zV;4ZLlfYO}=o@&k4{rK$ni}y!s9jR71uENd6s|}xb6z0*NuDe5gqz@R9(R1j{Q@hr z+^o&T%QTkO3u2|V$8Ef}8RNb!q&b#BwVTd&qyLg1_SxGp?64iA`P*etviP5sjolpl zbhG%OkUsgXV8wf*spu^w_3g4I(jCK7|xu(7isNdKPQd_2X;`{w>a|2B_!_(>{u@@#^@ zO|&vF{=K7+HSkNwV2#X^^XaVgbNiz3$lII3II6{Z``~p-^O0f(QiD6Fa%OP#6d7>R zTHad%k6h!mL6~>QG+1}&$f)gf(GqC1!q`}l%2=J{VJfP>BDhDPdj(3@XNsFNv_5^w z?nWy4YP$E)sRrpufLr$Dh5sidhNIIYSF>kLE(2?!riWwtdraJJWz-s5lN#%iD7E2o zKXUA{1>G}Ob$j*MkU0O~cT#r(4oJuLUs&@#ks!Cj{v<77B6pSj0BknbHQx_}Fd;Yw zew)bA4Uznicnu(^0CAGwLFYOh7w}pAHodOAv&gPMWor__lftDPgf!-a!?Pp)>YVyp zdXS!7akiRY6NlC-?0R&5vzac*J9YqR!I7rGwTbRwexQ^`tEQraX-Kh(Eh+L1MQS2BgFNJ-t%t0XW{(Ev*%i5C|`U0;3gUCj3_{YX=bc{!y zGHX2iyAGtESww9XOVvl9bU&>8U$ZZR8g-Y+aFVL>C~mFy`=jc@+C*w%>tW5rk@r2u zc<#O=6TV!k9i) z6ZZ6SV(conVe>m>HRMVJKjguShG=*;5gq;q;rE+HDZEWx?a<2I+f={AdYMz6t$=tax+3Jfdh`&*?{Y5?_dT zM~BSi>q|p8M*A?yhN&%BhxZR5ttafEepwYD_bZ40aYQX}sOq7i#HGizS2C-nVNon2 zxh;gxSwxEm@C?gc$Mb7Q!Ui(9eUwLrSS3LS6mE*;RmX-`@Js>5i7w+^I+oL>Lu2RD zANvZc;*#Ka5|}V&Ur4CVFB8@xOX!ruk*&0GAjz7#Q#?M`rKmlfKbbOrUuLPkc%;_# z&=YL^R&KdSsXUaW+1TJqp4xuDZ?eAjI7gpiCg-!z`@X7nlFyGHKeO27jQQEr#e^sC zws~Nxv&Bq+9XOdDwCP~IHJCNlli)Z6X~M8jNgFVg<1RDx2Q(O@MFC<&$fz=|j09C& zI0q+LqEtfv{mF4n0}_=qC>E=o)abIg3y7L*zfenmRZYEe^BktlcAf41GxhKAIrHU& zV`Vyvbu@j)>qJ&QA^W$l>kFK_4l~n1OmnAQ=ZTf<<)^VceZsg@j@J9z90DfQjDW{A zS)3n;u_?Z(k=J3H+#ZKRPdekdW*VZsP#5fkvbBJP4NUl3VFG-KnY!oK$6c~Y(UZym zaLtLu< zMipF0tTz19`ox+} z!p6*SQ$kTE)(Sc1bFECg5G=?rJmA>^5|ZQ!io*RtXUlWimm&+xaP3B#z7s-xV+Whu z3X5o_Smr^aDd-Apx~s%-V~9yD3^dQqvgm>uXNEdJ*Sb`kPjvFGlfv=TaqCjvesjn1-1Fa7WNd&kzKMoyDobMYAh+n1Z0 zEh}bvEGl2eTfL(@iL>R#`rf|oAJr3NN?5Om%R@VYVltCtWF-Tt`OymRgp6f}Q@<45 zQGJ}o3J?oVdhFq;1_68QO@Fi&eH^3Fd7ZLpYgVxQaUE>P#bNCV020`|S~5IJK(ps% zNzAu|9D4;3SH@U!H~_@skJ;&*20v1Q+?-Y(bEdQV76yZU;8z0M+6n5~t^}QFJt@$L z+!2600+4*Sj6W1364|j>jafj}g(EUr-%{RAHAD@#qNXdHq-vRbkgtHC8eIk9aR+K& zmWn{Ri+f2{>{!Q1M7(e}sbA0MdXQ+(g_(SMlH&7L{%7oVe5-?~v9sIse(^)@Y9g!W z^I#`aW65NiH~CuE1N)FJe&_xY#@FrQuz19HuHO4|dAXa4DFh}=n`MRXP3YB?UFhENQ|v%uRo zl5^2n;TKInh;zU^!@E49A}|6X&0MXegmH*-i}DeF?$aZ{+1CPOl^pTC=Rvx$CFt^f zQJ%;&G;$Xu8euzh%ITJlM3b4{S$MD9q4#)+?KcQ0cw8TRUQ(Vfh$fU=u9DYKFyz~t zX&Md87NRuj82R7n?W+#OqRDe!_=&;pG48QEt3K#o!+2_5=U!Unc3wtWzw)aL{Z~!M z_2my_l(~k}5#cO}695D(dk>(QNu$VF_a3qrwg`;fE+1AT#NwzT;Q;7gx^VtwTF^yp z-H;`#jeejWt&un2tAeqs64GU~@hrC*ygbWs{_&V>z;%RYyO!AA8TdJ7myop0EJj5G ze~{>r{kuhAtixM}IQ33wj1Pi~H5!@g!o!rHIrPJT$OOp!8UtU`?k25j0)C?~a2?nX z4px$CwWeB(XoEe={wlm0!AeaC;=7L(02c18u}Q zznW!WBU(W;q*5leYaSW4bzy1UPuy;QXXNz0KLmYUZKmd6c=USvuOox7oLH@8edKtm zO$GnlSR4x6a7fH(V*U_u0o>EtEIIZb6Om#i`}|C2QA%_4VDnEB+~X9KG6dw5>g2#L z{JVdM6{PHl$tw(evzTNM4)>?cr`Q0~;-;B(a)m-ii*0!qzmFd@# z>w{-DvPNdQZqnHiu3n!?GR79zX9zZ2wvM!0FLm7Bwm~3JJ@v@lG4#;ffU4h^TA3NC zr&S5!&x6$%>1jfRmC}nuFS9LOE zGw@VsuCK%F&#(fUuGsGc0o{>gdCZ5Gao_Z@&409wLe?vW`+0q3clq=W@-@vn;;0eg zMQbOob4`~Z?ZpP<^$)I5bAxWqYmT?cbFiiI_g$7u#YN>>%SnsO3ew)Bvk45+k$;zL zj~09~L};=bsrk0|enPu%%WU%9mU&yu4mf8gf=w0IT)1U>(JQ3W4+2W2o2uWBzIrzndulHK}qlzcrgB8#Uefi*xl z2S`?~^IN+C>HAmUg&M|qXzIo&iVtg`hZShMqTXP`rMXBr2AnCT9)htO0*G`zd1hKdPO!uB%9!oq-${kq{g@PUUM8pZa0`P3)eT3hcHreYMIkJzsJ-#Ym5O?%GG#k6r+P`}D z$d1=qUtz(P)SIFACw(V6#MC#_Z?d--x(d!;#IX_dw6%R5JwVe(uGTelxr(IiMNf2G zpQU?%T*RT~$>REeHGx#nG~3^pz%JKZ7Xu^u3llINn^wq<0&}L_TOwZB#%yQ`)A$Yb zg2=C>Li{91)nQP~|4dW|-=dhSgdBPq;@ts7A#5}6os(-X7&@hzJT&bU9bM|BeJOtZq@FM}=?#v&Xt9vW{ z_Sn^{aVvhUKU3XA_@aSG3G!_KUI0o0TolpDx77@`9_=Y<$NL-R__D_@G~%kr_tJO< zdS4lb0F&v>^40d$ftd15dty~kUA`UvU(`bf=&`BEab@E?x!KSFHsa}cGN_1Di2_r{ zqLa6tkr_`m7JRO{2p{ikFwqPQmrv*D0;Run!AD=-T5sEio!;be`})`Xpx!b1Aeb!y zdkb&gzj=-xM+QD#W?fOrx6$?i;WwPJ@2b`o;UEk&!h%tpjtp<<_1S%nhUw6dptsIW z+m|DofX=HzTaJPij3(K>yQ%vAzP{^PmjU^bmvpm3KO6FsL;N_`fjv=^d&_qhb07q$I7fPx_Ekz}RmeJ)S zjQ)iD8CFYO)pE31kjb0;@w4w0RisL8tZHMaF6i!AA)eFA|Xuu>u@p&aW?oFVEGb^<&1QW)@!=mU3(2uL-tEq0610&}RVgj1845+@`@g_2pT4c$wIIr;Y9znJE3`CgE+9==6hfF~n( zeu!#XBmuYe&+m)9RP45wcT2t>8K*B)27t3~5;*BgW+oW^mxQ;OC`1gK1qDx>@srNW z=G0TMFdninIopG2$)e4Pf!w<-|84NarGk@o&Bl`+DN!|I`)T-+dvw|(ZC;$Y#KSnZm~1w8hvj1P;LFhqvsg|0qt@%g zGU)QT?2`ytqry_&u>`UeYbK3~Y+4iaxs^H!UGlr8Bx^k<7$A*t3qFGg